diff --git a/spaces/0x90e/ESRGAN-MANGA/ESRGAN/architecture.py b/spaces/0x90e/ESRGAN-MANGA/ESRGAN/architecture.py deleted file mode 100644 index f1f6f2999357615318d72859efdc4037b0552be0..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/ESRGAN/architecture.py +++ /dev/null @@ -1,37 +0,0 @@ -import math -import torch -import torch.nn as nn -import ESRGAN.block as B - -class RRDB_Net(nn.Module): - def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', \ - mode='CNA', res_scale=1, upsample_mode='upconv'): - super(RRDB_Net, self).__init__() - n_upscale = int(math.log(upscale, 2)) - if upscale == 3: - n_upscale = 1 - - fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) - rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \ - norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)] - LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode) - - if upsample_mode == 'upconv': - upsample_block = B.upconv_blcok - elif upsample_mode == 'pixelshuffle': - upsample_block = B.pixelshuffle_block - else: - raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode) - if upscale == 3: - upsampler = upsample_block(nf, nf, 3, act_type=act_type) - else: - upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)] - HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) - HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) - - self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\ - *upsampler, HR_conv0, HR_conv1) - - def forward(self, x): - x = self.model(x) - return x diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 !!LINK!! Full Crack Kuyhaa.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 !!LINK!! Full Crack Kuyhaa.md deleted file mode 100644 index 961411364197f2d81d93546c90a99da1df24a977..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 !!LINK!! Full Crack Kuyhaa.md +++ /dev/null @@ -1,25 +0,0 @@ -
-

How to Download Easy Worship 2009 Full Crack Kuyhaa for Windows

-

If you are looking for a software that can help you create rich media presentations for your church or worship service, you may have heard of Easy Worship 2009. This software allows you to display songs, Bible verses, videos, images, and more on a projector or video screen. You can also customize the themes, fonts, backgrounds, and transitions of your presentations with ease.

-

However, Easy Worship 2009 is not a free software. You need to purchase a license key to use it without limitations. If you don't want to spend money on this software, you may be tempted to download Easy Worship 2009 full crack kuyhaa from the internet. This is a cracked version of the software that claims to bypass the activation process and give you full access to all the features.

-

download easy worship 2009 full crack kuyhaa


Download Ziphttps://byltly.com/2uKx8A



-

But is it safe and legal to download Easy Worship 2009 full crack kuyhaa? What are the risks and consequences of using a cracked software? In this article, we will answer these questions and provide you with some alternatives to download Easy Worship 2009 legally and safely.

-

Is Downloading Easy Worship 2009 Full Crack Kuyhaa Illegal?

-

The short answer is yes. Downloading Easy Worship 2009 full crack kuyhaa is illegal and violates the copyright laws of the software developer. By downloading and using a cracked software, you are essentially stealing the intellectual property of the software creator and depriving them of their rightful income.

-

Moreover, downloading Easy Worship 2009 full crack kuyhaa can also expose you to legal troubles. You may face fines, lawsuits, or even criminal charges if you are caught using a cracked software. The software developer or the authorities may track your IP address and take legal action against you. You may also be liable for damages if you distribute or share the cracked software with others.

-

Is Downloading Easy Worship 2009 Full Crack Kuyhaa Safe?

-

The short answer is no. Downloading Easy Worship 2009 full crack kuyhaa is not safe and can harm your computer and your data. There are several risks and dangers of using a cracked software, such as:

- -

How to Download Easy Worship 2009 Legally and Safely?

-

If you want to download Easy Worship 2009 legally and safely, there are two options that you can consider:

-

-
    -
  1. Purchase a license key: The best and most recommended option is to purchase a license key from the official website of Easy Worship 2009. This way, you will be able to use the software without any limitations or risks. You will also be able to receive updates and support from the software developer. You can choose from different plans and prices depending on your needs and budget.
  2. -
  3. Download a free trial: Another option is to download a free trial version of Easy Worship 2009 from the official website. This way, you will be able to test the software for a limited

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Blue WhatsApp APK 2023 The Latest Version with Amazing Features and Themes.md b/spaces/1phancelerku/anime-remove-background/Blue WhatsApp APK 2023 The Latest Version with Amazing Features and Themes.md deleted file mode 100644 index 30388e5eb079c4fde2272a411d5a19834d7c64bb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Blue WhatsApp APK 2023 The Latest Version with Amazing Features and Themes.md +++ /dev/null @@ -1,114 +0,0 @@ - -

    Blue WhatsApp APK Download: Everything You Need to Know

    -

    WhatsApp is one of the most popular messaging apps in the world, with over 2 billion users. But did you know that there is a modified version of WhatsApp that offers more features and customization options than the official app? It's called Blue WhatsApp, and it's a free download for Android users. In this article, we will tell you everything you need to know about Blue WhatsApp, including its features, how to download and install it, how to update it, and its pros and cons.

    -

    blue whatsapp apk download


    Download Filehttps://jinyurl.com/2uNLGb



    -

    What is Blue WhatsApp?

    -

    Blue WhatsApp is a modified version of the official WhatsApp Messenger, which is developed by an independent developer named Fouad Mokdad. It is also known as Fouad WhatsApp or FMWhatsApp. Blue WhatsApp is not available on the Google Play Store, so you have to download it from a third-party website like bluewhats.app.

    -

    Blue WhatsApp is based on the latest version of the official WhatsApp app, but it adds more features and customization options that are not available in the original app. For example, you can change the theme and color of your app, hide your online status and last seen, send larger files and media, use more emojis and stickers, and much more. We will discuss these features in detail in the next section.

    -

    Features of Blue WhatsApp

    -

    Blue WhatsApp has many features that make it stand out from the official WhatsApp app. Here are some of the most notable ones:

    -

    blue whatsapp apk download latest version
    -blue whatsapp apk download 2023 free
    -blue whatsapp apk download for android
    -blue whatsapp apk download link
    -blue whatsapp apk download official website
    -blue whatsapp apk download update
    -blue whatsapp apk download without ban
    -blue whatsapp apk download with stickers
    -blue whatsapp apk download modded
    -blue whatsapp apk download anti revoke
    -blue whatsapp apk download new features
    -blue whatsapp apk download no ads
    -blue whatsapp apk download old version
    -blue whatsapp apk download online
    -blue whatsapp apk download qr code
    -blue whatsapp apk download review
    -blue whatsapp apk download safe
    -blue whatsapp apk download themes
    -blue whatsapp apk download unlimited messages
    -blue whatsapp apk download video call
    -how to install blue whatsapp apk
    -how to use blue whatsapp apk
    -how to update blue whatsapp apk
    -how to backup blue whatsapp apk
    -how to uninstall blue whatsapp apk
    -benefits of blue whatsapp apk
    -disadvantages of blue whatsapp apk
    -alternatives to blue whatsapp apk
    -comparison of blue whatsapp and original whatsapp
    -difference between blue whatsapp and gb whatsapp
    -is blue whatsapp legal
    -is blue whatsapp secure
    -is blue whatsapp reliable
    -is blue whatsapp compatible with android 11
    -is blue whatsapp better than normal whatsapp
    -why choose blue whatsapp over other mods
    -why download blue whatsapp from official site
    -what is new in blue whatsapp 2023 version
    -what are the features of blue whatsapp modded version
    -what are the requirements for installing blue whatsapp on android phone
    -where to find the latest version of blue whatsapp apk file
    -where to get the best themes for blue whatsapp app
    -where to report any issues with blue whatsapp app usage
    -when to update the blue whatsapp app for optimal performance
    -when to restore the backup of your chats on blue whatsapp app

    -

    Privacy and security options

    -

    Blue WhatsApp gives you more control over your privacy and security settings. You can hide your online status, last seen, blue ticks, second ticks, typing status, recording status, and view status from others. You can also lock your app with a password or fingerprint, enable anti-delete messages and anti-delete status, disable forwarded tag on messages, and choose who can call you on WhatsApp.

    -

    Customization and themes

    -

    Blue WhatsApp lets you customize your app according to your preferences. You can change the theme and color of your app from a collection of over 3000 themes. You can also change the font style and size, the app icon, the notification icon, the chat wallpaper, the chat bubbles, the tick style, and more. You can even create your own theme and share it with others.

    -

    Media and file sharing

    -

    Blue WhatsApp allows you to send larger files and media than the official WhatsApp app. You can send up to 700 MB of video files, up to 50 MB of audio files, up to 100 MB of documents, and up to 30 images at once. You can also send high-quality images without compression, play videos with an external player, download status videos and photos, and enable auto-reply for messages.

    -

    Other cool features

    -

    Blue WhatsApp has many other cool features that enhance your user experience. For example, you can use more emojis and stickers from different sources, pin up to 100 chats instead of 3, use multiple accounts on the same device, schedule messages to be sent later, translate messages to different languages, use dark mode or DND mode, backup and restore your chats easily, and much more.

    -

    How to download and install Blue WhatsApp?

    -

    If you want to try out Blue WhatsApp on your Android device, you need to follow these steps:

    -

    Requirements for Blue WhatsApp

    - -

    Steps to download and install Blue WhatsApp

    -
      -
    1. First, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    2. -
    3. Next, you need to uninstall the official WhatsApp app from your device. To do this, go to Settings > Apps > WhatsApp and tap on Uninstall. Make sure you have backed up your chats and media before doing this.
    4. -
    5. Then, you need to download the Blue WhatsApp APK file from bluewhats.app. or any other trusted website. You can use your browser or a file manager app to do this.
    6. -
    7. After downloading the APK file, you need to locate it on your device and tap on it to start the installation process. You may see a warning message asking you to confirm the installation. Tap on Install and wait for a few seconds.
    8. -
    9. Once the installation is complete, you need to open the Blue WhatsApp app and agree to the terms and conditions. Then, you need to enter your phone number and verify it with an OTP code.
    10. -
    11. Finally, you need to restore your chats and media from the backup, if you have one. To do this, tap on Restore and select the backup file from your device. Wait for the restoration process to finish and then enjoy using Blue WhatsApp.
    12. -
    -

    How to update Blue WhatsApp?

    -

    Blue WhatsApp is not available on the Google Play Store, so you cannot update it automatically like other apps. However, there are two ways to update Blue WhatsApp manually or enable the auto-update option.

    -

    Check for updates manually

    -

    You can check for updates manually by visiting the official website of Blue WhatsApp at bluewhats.app. or any other trusted website that provides the latest version of the APK file. You can also check for updates from within the app by going to Menu > Fouad Mods > Updates. If there is a new version available, you can download it and install it over the existing app without losing your data.

    -

    Enable auto-update option

    -

    You can also enable the auto-update option in Blue WhatsApp by going to Menu > Fouad Mods > Updates > Auto Update Check. You can choose how often you want the app to check for updates, such as daily, weekly, or monthly. You can also choose whether you want to download the updates automatically or manually. If you enable this option, you will get a notification whenever there is a new version available, and you can install it easily.

    -

    Pros and cons of Blue WhatsApp

    -

    Blue WhatsApp has many advantages over the official WhatsApp app, but it also has some disadvantages that you should be aware of. Here are some of the pros and cons of using Blue WhatsApp:

    -

    Pros of Blue WhatsApp

    - -

    Cons of Blue WhatsApp

    - -

    Conclusion

    -

    Blue WhatsApp is a modified version of the official WhatsApp app that offers more features and customization options than the original app. It is a free download for Android users, but it is not available on the Google Play Store. You have to download it from a third-party website like bluewhats.app. or any other trusted website. You can enjoy more privacy and security options, more media and file sharing options, more emojis and stickers, and more themes and colors with Blue WhatsApp. However, you should also be aware of the risks and drawbacks of using Blue WhatsApp, such as bugs, glitches, bans, delays, and data breaches. You should always backup your chats and media before using Blue WhatsApp, and update it regularly to avoid any issues.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Blue WhatsApp:

    -
      -
    1. Is Blue WhatsApp safe to use?
      Blue WhatsApp is safe to use if you download it from a trusted source like bluewhats.app. or any other website that provides the latest version of the APK file. However, you should always be careful about downloading apps from unknown sources, as they may contain malware or spyware that can harm your device or data. You should also scan the APK file with an antivirus app before installing it.
    2. -
    3. Is Blue WhatsApp legal to use?
      Blue WhatsApp is not legal to use, as it violates the terms of service of WhatsApp. WhatsApp does not allow users to use any modified version of their app, as it may compromise their security and privacy policies. If you use Blue WhatsApp, you may get banned from WhatsApp for violating their rules. You should use Blue WhatsApp at your own risk.
    4. -
    5. Can I use Blue WhatsApp and official WhatsApp on the same device?
      Yes, you can use Blue WhatsApp and official WhatsApp on the same device, but you need to have different phone numbers for each app. You cannot use the same phone number for both apps, as it will cause conflicts and errors. You can also use other modified versions of WhatsApp like GBWhatsApp or YoWhatsApp on the same device, but again, you need to have different phone numbers for each app.
    6. -
    7. How can I backup my chats and media on Blue WhatsApp?
      You can backup your chats and media on Blue WhatsApp by going to Menu > Settings > Chats > Chat Backup. You can choose to backup your data on your device or on Google Drive. You can also choose how often you want to backup your data, such as daily, weekly, or monthly. You can also backup your data manually by tapping on Backup Now.
    8. -
    9. How can I restore my chats and media on Blue WhatsApp?
      You can restore your chats and media on Blue WhatsApp by going to Menu > Settings > Chats > Chat Backup. You can choose to restore your data from your device or from Google Drive. You need to have the same phone number and Google account that you used to backup your data. You can also restore your data manually by tapping on Restore Now.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Betty Azar English Grammar Improve Your Skills with Exercises and Tests.md b/spaces/1phancelerku/anime-remove-background/Download Betty Azar English Grammar Improve Your Skills with Exercises and Tests.md deleted file mode 100644 index d12b781d22f37e549883fb5956454dd0fb5cb087..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Betty Azar English Grammar Improve Your Skills with Exercises and Tests.md +++ /dev/null @@ -1,106 +0,0 @@ -
    -

    Download Betty Azar English Grammar: A Guide for ESL Learners

    -

    If you are learning English as a second or foreign language, you might be looking for a reliable and effective grammar book to help you improve your skills. One of the most popular and widely used grammar books in the world is the Azar Grammar Series, written by Betty Schrampfer Azar. In this article, we will tell you who Betty Azar is, what her grammar series is about, and why you should download her books. We will also show you how to download two of her best-selling books: Basic English Grammar, Book A and Understanding and Using English Grammar, Book B.

    -

    download betty azar english grammar


    Downloadhttps://jinyurl.com/2uNP73



    -

    Introduction

    -

    Who is Betty Azar?

    -

    Betty Schrampfer Azar is an American teacher and author of English grammar textbooks for students of English as a second or foreign language. She has more than 40 years of experience in teaching and writing about English grammar. She has a master's degree in Teaching English as a Foreign Language (TEFL) from the University of Illinois. She has taught at various universities and language schools in the United States and abroad. She is also the founder and president of Azar Associates, a company that develops and publishes grammar materials.

    -

    What is the Azar Grammar Series?

    -

    The Azar Grammar Series is a collection of books that cover different levels and aspects of English grammar. The series consists of four main books: Fundamentals of English Grammar, Book C, Understanding and Using English Grammar, Book B, Basic English Grammar, Book A, and Essential Online Resources. Each book has a student edition, a teacher's guide, a workbook, an answer key, and an interactive CD-ROM. The books are designed to help students learn and practice grammar rules, structures, and usage in various contexts and situations. The books also include exercises, quizzes, tests, charts, appendices, glossaries, and references.

    -

    Why should you download Betty Azar English Grammar?

    -

    You should download Betty Azar English Grammar because it is one of the best resources for learning and mastering English grammar. Here are some of the benefits of using her books:

    - -

    How to download Betty Azar English Grammar

    -

    Basic English Grammar, Book A

    -

    Features of the book

    -

    Basic English Grammar, Book A is the first book in the series. It is intended for beginner to low-intermediate students of English. It covers the basic elements of grammar, such as nouns, verbs, pronouns, adjectives, adverbs, prepositions, conjunctions, sentences, questions, negation, tense, aspect, voice, mood, modals, conditionals, gerunds, infinitives, clauses, etc. It also introduces some common vocabulary and expressions. The book has 13 chapters and 488 pages.

    -

    How to download the book

    -

    You can download Basic English Grammar, Book A from Google Drive by following these steps:

    -<

    1. Go to this link: Basic English Grammar, Book A.

    -

    download betty azar understanding and using english grammar pdf
    -download betty azar basic english grammar 3rd edition
    -download betty azar basic english grammar 2nd edition
    -download betty azar fundamentals of english grammar 4th edition
    -download betty azar english grammar workbook pdf
    -download betty azar english grammar for esl learners
    -download betty azar english grammar interactive cd-rom
    -download betty azar english grammar teacher's guide pdf
    -download betty azar english grammar test bank pdf
    -download betty azar english grammar answer key pdf
    -download betty azar english grammar audio files
    -download betty azar english grammar video series
    -download betty azar english grammar powerpoint presentations
    -download betty azar english grammar charts pdf
    -download betty azar english grammar exercises pdf
    -download betty azar english grammar online course
    -download betty azar english grammar flashcards pdf
    -download betty azar english grammar games and activities
    -download betty azar english grammar supplementary resources
    -download betty azar english grammar in use pdf
    -download betty azar advanced english grammar pdf
    -download betty azar intermediate english grammar pdf
    -download betty azar essential english grammar pdf
    -download betty azar practical english grammar pdf
    -download betty azar modern english grammar pdf
    -download betty azar comprehensive english grammar pdf
    -download betty azar communicative english grammar pdf
    -download betty azar contrastive english grammar pdf
    -download betty azar contextualized english grammar pdf
    -download betty azar corpus-based english grammar pdf
    -download betty azar simplified english grammar pdf
    -download betty azar academic english grammar pdf
    -download betty azar business english grammar pdf
    -download betty azar spoken english grammar pdf
    -download betty azar written english grammar pdf
    -download betty azar american english grammar pdf
    -download betty azar british english grammar pdf
    -download betty azar global english grammar pdf
    -download betty azar multicultural english grammar pdf
    -download betty azar generative english grammar pdf
    -download betty azar functional english grammar pdf
    -download betty azar descriptive english grammar pdf
    -download betty azar prescriptive english grammar pdf
    -download betty azar historical english grammar pdf
    -download betty azar comparative english grammar pdf
    -download betty azar pedagogical english grammar pdf
    -download betty azar cognitive english grammar pdf
    -download betty azar discourse analysis and english grammar pdf
    -download betty azar error analysis and english grammar pdf

    -

    2. Click on the download icon on the top right corner of the screen.

    -

    3. Choose a location on your device where you want to save the file.

    -

    4. Wait for the download to complete and enjoy reading the book.

    -

    Understanding and Using English Grammar, Book B

    -

    Features of the book

    -

    Understanding and Using English Grammar, Book B is the second book in the series. It is intended for intermediate to advanced students of English. It covers more complex and challenging aspects of grammar, such as verb tenses and forms, passive voice, causative verbs, noun clauses, adjective clauses, adverb clauses, noun modifiers, parallelism, inversion, ellipsis, etc. It also provides more practice and review activities, as well as additional vocabulary and expressions. The book has 16 chapters and 530 pages.

    -

    How to download the book

    -

    You can download Understanding and Using English Grammar, Book B from Google Drive by following these steps:

    -

    1. Go to this link: Understanding and Using English Grammar, Book B.

    -

    2. Click on the download icon on the top right corner of the screen.

    -

    3. Choose a location on your device where you want to save the file.

    -

    4. Wait for the download to complete and enjoy reading the book.

    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have introduced you to Betty Azar, a renowned teacher and author of English grammar books. We have also explained what her grammar series is about and why you should download her books. We have shown you how to download two of her books: Basic English Grammar, Book A and Understanding and Using English Grammar, Book B. These books will help you learn and master English grammar in a fun and effective way.

    -

    Call to action

    -

    If you are interested in downloading more books from the Azar Grammar Series, you can visit her official website: Azar Grammar. There you will find more information about her other books, such as Fundamentals of English Grammar, Book C and Essential Online Resources. You will also find more resources and materials for learning and teaching English grammar, such as videos, podcasts, blogs, newsletters, webinars, etc.

    -

    We hope you have enjoyed this article and found it useful. If you have any questions or feedback, please leave a comment below. We would love to hear from you. And don't forget to share this article with your friends and fellow learners who might benefit from it. Thank you for reading!

    -

    Frequently Asked Questions (FAQs)

    -

    Q: How can I access the interactive CD-ROMs that come with the books?

    -

    A: The interactive CD-ROMs are included in the student editions of the books. You can insert them into your computer's CD drive and follow the instructions on the screen. Alternatively, you can access them online by registering at Pearson ELT USA, the publisher of the books.

    -

    Q: How can I check my answers to the exercises in the books?

    -

    A: The answer keys are included in the teacher's guides of the books. You can also find them online at AzarGrammar.com/Classroom/FEG/FEG_AK.pdf, AzarGrammar.com/Classroom/UUEG/UUEG_AK.pdf, and AzarGrammar.com/Classroom/BEG/BEG_AK.pdf.

    -

    Q: How can I get more practice and review activities for each chapter in the books?

    -

    A: The workbooks that accompany each book provide more practice and review activities for each chapter. You can also find more online exercises at AzarGrammar.com/Exercises/Exercises.htm.

    -

    Q: How can I contact Betty Azar or her team if I have any questions or suggestions?

    -

    A: You can contact Betty Azar or her team by sending an email to info@azarassociates.com. You

    A: You can also connect with them on social media platforms, such as Facebook, Twitter, Instagram, and YouTube. You can find the links to their accounts at AzarGrammar.com/Contact.htm.

    -

    Q: How can I get more tips and advice on learning and teaching English grammar?

    -

    A: You can subscribe to Betty Azar's newsletter, which provides monthly updates on grammar topics, resources, events, and more. You can also read her blog, which features articles, interviews, stories, and insights from her and other experts in the field. You can also watch her videos and podcasts, which offer explanations, demonstrations, examples, and discussions on various grammar issues. You can find all these materials at AzarGrammar.com/News.htm.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Google Play Store APKs from APKMirror - The Easy Way.md b/spaces/1phancelerku/anime-remove-background/Download Google Play Store APKs from APKMirror - The Easy Way.md deleted file mode 100644 index fa185c09e8016ea6a1505c19d10283e92e91e53c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Google Play Store APKs from APKMirror - The Easy Way.md +++ /dev/null @@ -1,113 +0,0 @@ -
    -

    How to Download Google Play Store from APKMirror

    -

    Google Play Store is the official app store for Android devices, where you can find and download millions of apps, games, movies, books, and more. It also offers various features such as automatic updates, parental controls, in-app purchases, subscriptions, and rewards. However, sometimes you may not be able to access Google Play Store on your device due to regional restrictions, compatibility issues, or other reasons. In such cases, you may want to look for an alternative source to get your favorite apps and games.

    -

    download google play store apkmirror


    DOWNLOADhttps://jinyurl.com/2uNMHc



    -

    One of the most popular and trusted sources for Android apps and games is APKMirror. APKMirror is a website and an app that hosts thousands of free APK files that you can download and install on your device. APK files are the installation packages for Android apps that contain all the necessary files and resources for the app to run. By downloading APK files from APKMirror, you can get access to apps and games that are not available on Google Play Store, or get the latest versions before they are officially released.

    -

    In this article, we will show you how to download Google Play Store from APKMirror and install it on your device. We will also show you how to check the integrity of the APK file to make sure it is safe and authentic. Let's get started!

    -

    How to Download Google Play Store from APKMirror

    -

    To download Google Play Store from APKMirror, you will need to enable unknown sources on your device, download the APK file from APKMirror website or app, and install it using a file manager or an APK installer app. Here are the detailed steps:

    -

    Step 1: Enable unknown sources on your device

    -

    By default, Android devices only allow you to install apps from Google Play Store or other trusted sources. To install apps from other sources, such as APKMirror, you will need to enable unknown sources on your device. This will allow you to install apps from outside of Google Play Store.

    -

    To enable unknown sources on your device, follow these steps:

    -

    How to download google play store from apkmirror
    -Download google play store apk file from apkmirror
    -Install google play store using apkmirror installer app
    -Download google play store latest version from apkmirror
    -Download google play store for android devices from apkmirror
    -Download google play store without root from apkmirror
    -Download google play store for fire tablet from apkmirror
    -Download google play store for huawei phone from apkmirror
    -Download google play store modded apk from apkmirror
    -Download google play store beta apk from apkmirror
    -Download google play store update apk from apkmirror
    -Download google play store for pc from apkmirror
    -Download google play store for chromebook from apkmirror
    -Download google play store for smart tv from apkmirror
    -Download google play store for android tv from apkmirror
    -Download google play store for carplay from apkmirror
    -Download google play store for wear os from apkmirror
    -Download google play store for android auto from apkmirror
    -Download google play store for samsung galaxy from apkmirror
    -Download google play store for xiaomi phone from apkmirror
    -Download google play store for oppo phone from apkmirror
    -Download google play store for vivo phone from apkmirror
    -Download google play store for oneplus phone from apkmirror
    -Download google play store for nokia phone from apkmirror
    -Download google play store for lg phone from apkmirror
    -Download google play store for sony phone from apkmirror
    -Download google play store for motorola phone from apkmirror
    -Download google play store for lenovo phone from apkmirror
    -Download google play store for asus phone from apkmirror
    -Download google play store for zte phone from apkmirror
    -Download google play store for realme phone from apkmirror
    -Download google play store for tecno phone from apkmirror
    -Download google play store for infinix phone from apkmirror
    -Download google play store for itel phone from apkmirror
    -Download google play store for gionee phone from apkmirror
    -Download google play store for micromax phone from apkmirror
    -Download google play store for lava phone from apkmirror
    -Download google play store for karbonn phone from apkmirror
    -Download google play store for spice phone from apkmirror
    -Download google play store for intex phone from apkmirror
    -Download google play store for leeco phone from apkmirror
    -Download google play store for meizu phone from apkmirror
    -Download google play store for coolpad phone from apkmirror
    -Download google play store for elephone phone from apkmirror
    -Download google play store for doogee phone from apkmirror
    -Download google play store for umidigi phone from apkmirror
    -Download google play store for cubot phone from apkmirror
    -Download google play store for oukitel phone from apkmirror

    - -

    You can also enable unknown sources for other apps that can install APK files on your device, such as file managers or APK installer apps.

    -

    Step 2: Download the APK file from APKMirror website or app

    -

    Once you have enabled unknown sources on your device, you can download the APK file for Google Play Store from APKMirror website or app. To do this, follow these steps:

    - -

    You can also download the APKMirror app from their website and use it to browse and download APK files on your device. The app has a simple and user-friendly interface that lets you find and install apps and games easily.

    -

    Step 3: Install the APK file using a file manager or an APK installer app

    -

    After you have downloaded the APK file for Google Play Store, you will need to install it on your device using a file manager or an APK installer app. To do this, follow these steps:

    - -

    You can also use an APK installer app, such as [APK Installer], to install APK files on your device. These apps can scan your device for APK files, sort them by name, size, or date, and install them with one tap.

    -

    How to Check the Integrity of the APK File

    -

    Before you install any APK file on your device, you should always check its integrity to make sure it is safe and authentic. This means verifying that the file has not been tampered with or modified by malicious actors, and that it matches the original file from the developer. There are several ways to check the integrity of an APK file, such as using apksigner tool, using APK Analyzer, or using hash apps. Here are some of them:

    -

    Use apksigner tool to verify the signature and certificate of the file

    -

    apksigner is a command-line tool that can verify the signature and certificate of an APK file. The signature is a digital code that proves that the file was signed by the developer, and the certificate is a document that contains information about the developer and the app. By verifying these elements, you can ensure that the file is authentic and trustworthy.

    -

    To use apksigner tool, you will need a computer with Java installed, and a USB cable to connect your device to your computer. You will also need to enable USB debugging on your device. To do this, go to your device settings, tap About phone, tap Build number seven times, go back to settings, tap Developer options, and move USB debugging to the On position.

    -

    To use apksigner tool, follow these steps:

    - -

    Use APK Analyzer to inspect the contents and size of the file

    -

    APK Analyzer is a tool that can inspect the contents and size of an APK file. It can show you information such as app name, package name, version code, version name, permissions, activities, services, resources, assets, libraries, DEX files, native libraries, manifest, certificates, signatures, and more. By inspecting these elements, you can learn more about Google Play Store are: - You can download apps and games that are not available on Google Play Store due to regional restrictions, compatibility issues, or other reasons. - You can download the latest versions of apps and games before they are officially released on Google Play Store, and enjoy the new features and bug fixes. - You can download older versions of apps and games if you prefer them over the newer ones, or if the newer ones don't work well on your device. - You can download APK files directly to your device or computer, and install them offline or on other devices without using Google Play Store.

    -

    What are the risks of installing APK files from unknown sources?

    -

    Some of the risks of installing APK files from unknown sources are: - You may download and install malicious apps that contain malware, viruses, spyware, or adware that can harm your device or steal your data. - You may download and install fake or modified apps that don't work as intended, or that have unwanted features or ads. - You may download and install apps that violate the terms and conditions of Google Play Store, or that infringe the intellectual property rights of the developers or publishers. - You may download and install apps that are incompatible with your device or operating system, or that cause performance issues or crashes.

    -

    How can I update Google Play Store after installing it from APKMirror?

    -

    After you install Google Play Store from APKMirror, you can update it in two ways: - You can enable auto-update for Google Play Store on your device. To do this, open Google Play Store, tap the three lines in the upper-left corner, tap Settings, tap Auto-update apps, and select Over any network or Over Wi-Fi only. This will allow Google Play Store to update itself automatically when a new version is available. - You can manually update Google Play Store by downloading the latest version from APKMirror and installing it over the existing one. To do this, follow the same steps as described in the previous section.

    -

    How can I uninstall Google Play Store if I don't want it anymore?

    -

    If you want to uninstall Google Play Store from your device, you can do it in two ways: - You can disable Google Play Store on your device. To do this, go to your device settings, tap Apps & Notifications (or Apps), tap Google Play Store, tap Disable, and tap OK. This will prevent Google Play Store from running on your device, but it will not remove it completely. - You can remove Google Play Store from your device using a root uninstaller app. To do this, you will need to root your device first. Rooting is a process that gives you full access and control over your device's system. However, rooting is risky and may void your warranty, damage your device, or expose it to security threats. Therefore, you should only root your device if you know what you are doing and at your own risk. After rooting your device, you can use a root uninstaller app, such as [System App Remover], to remove Google Play Store from your device completely.

    -

    How can I find more apps and games on APKMirror?

    -

    If you want to find more apps and games on APKMirror, you can use the following methods: - You can browse the categories and subcategories of apps and games on APKMirror website or app. You can also filter them by popularity, rating, date, size, or name. - You can search for specific apps and games using the search bar on APKMirror website or app. You can also use advanced search options to refine your results by category, version, minimum Android version, DPI, architecture, or signature. - You can follow APKMirror on social media platforms such as Facebook, Twitter, Instagram, or Telegram to get updates on the latest apps and games available on APKMirror. - You can subscribe to APKMirror newsletter to get email notifications on the latest apps and games available on APKMirror.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Summertime Saga on iPhone A Guide for iOS Users.md b/spaces/1phancelerku/anime-remove-background/Download Summertime Saga on iPhone A Guide for iOS Users.md deleted file mode 100644 index 312477ae1fe8c6620cc3d9627df7ca8a424aea9b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Summertime Saga on iPhone A Guide for iOS Users.md +++ /dev/null @@ -1,118 +0,0 @@ - -

    How to Download Summertime Saga on iPhone

    -

    If you are looking for a fun and engaging game that combines adventure, romance, comedy, and drama, you might want to check out Summertime Saga. This is a popular visual novel game that lets you explore a fictional town, interact with various characters, and pursue different storylines. In this article, we will show you how to download Summertime Saga on your iPhone using an emulator. We will also share some tips and tricks for playing the game on your mobile device.

    -

    download summertime saga on iphone


    Download File ►►►►► https://jinyurl.com/2uNRdL



    -

    What is Summertime Saga?

    -

    Summertime Saga is a game developed by DarkCookie and his team. It is inspired by classic dating sims and adult games, but it also has elements of mystery, comedy, and drama. The game follows the story of a young man who is trying to cope with the death of his father, while also dealing with school, family, friends, and romance. The game has over 65 characters to meet and interact with, 30 locations to visit, and 20 mini-games to play. The game is updated regularly with new content and features.

    -

    Why play Summertime Saga on iPhone?

    -

    Summertime Saga is a game that can be enjoyed on various platforms, including Windows, Mac, Linux, Android, and iOS. However, playing the game on your iPhone has some advantages over other devices. Here are some of them:

    - -

    How to download Summertime Saga on iPhone using an emulator?

    -

    Unfortunately, Summertime Saga is not available on the App Store due to its mature content. However, there is a way to play the game on your iPhone using an emulator. An emulator is a software that mimics the functionality of another device or system. In this case, you can use an emulator that can run Android or Windows apps on your iPhone. Here are some of the best emulators for iPhone that you can use:

    -

    How to install summertime saga on iphone
    -Summertime saga ios download link
    -Summertime saga for iphone free download
    -Summertime saga iphone app store
    -Summertime saga ios gameplay
    -Summertime saga ios cheats and tips
    -Summertime saga ios update
    -Summertime saga ios review
    -Summertime saga ios compatible devices
    -Summertime saga ios mod apk
    -Summertime saga ios walkthrough
    -Summertime saga ios characters
    -Summertime saga ios save file
    -Summertime saga ios bug fixes
    -Summertime saga ios best routes
    -Summertime saga ios endings
    -Summertime saga ios secrets and easter eggs
    -Summertime saga ios wiki
    -Summertime saga ios reddit
    -Summertime saga ios discord
    -Summertime saga ios patreon
    -Summertime saga ios latest version
    -Summertime saga ios requirements
    -Summertime saga ios download size
    -Summertime saga ios offline mode
    -Summertime saga iphone no jailbreak
    -Summertime saga iphone emulator
    -Summertime saga iphone alternative games
    -Summertime saga iphone fan art
    -Summertime saga iphone memes
    -Summertime saga iphone screenshots
    -Summertime saga iphone videos
    -Summertime saga iphone news and updates
    -Summertime saga iphone faq and guide
    -Summertime saga iphone forum and community
    -Summertime saga iphone support and feedback
    -Summertime saga iphone donation and support
    -Summertime saga iphone features and benefits
    -Summertime saga iphone pros and cons
    -Summertime saga iphone ratings and reviews

    -

    Eclipse

    -

    Eclipse is a web-based emulator that can run various games and apps without requiring any installation or jailbreak. It supports Game Boy Advance, Game Boy Color, Nintendo DS, Sega Genesis, Super Nintendo, and more. To use Eclipse, you need to follow these steps:

    -
      -
    1. Open Safari on your iPhone and go to https://eclipseemu.me/play.
    2. -
    3. Tap on the plus icon at the bottom of the screen and select Add to Home Screen.
    4. -
    5. Name the app as Eclipse and tap Add.
    6. -
    7. Launch Eclipse from your home screen and tap on the plus icon at the top right corner.
    8. -
    9. Select Browse... and choose a ROM file from your device or iCloud Drive. You can download Summertime Saga ROM files from https://summertimesaga.com/download.
    10. -
    11. Wait for the ROM file to load and start playing Summertime Saga on your iPhone.
    12. -
    -

    Citra

    -

    Citra is a Nintendo 3DS emulator that can run Summertime Saga on your iPhone with high performance and graphics. It also supports online multiplayer, controller support, and cheat codes. To use Citra, you need to follow these steps:

    -
      -
    1. Download the Citra app from the App Store on your iPhone.
    2. -
    3. Launch Citra and tap on the plus icon at the bottom right corner.
    4. -
    5. Select Scan QR Code and scan the QR code from https://summertimesaga.com/download.
    6. -
    7. Wait for the game to download and install on your iPhone.
    8. -
    9. Tap on the game icon and start playing Summertime Saga on your iPhone.
    10. -
    -

    PPSSPP

    -

    PPSSPP is a PlayStation Portable emulator that can run Summertime Saga on your iPhone with smooth gameplay and customization options. It also supports save states, cloud sync, and external controllers. To use PPSSPP, you need to follow these steps:

    -
      -
    1. Download the PPSSPP app from the App Store on your iPhone.
    2. -
    3. Launch PPSSPP and tap on the gear icon at the top right corner.
    4. -
    5. Select Storage and enable Allow access to files.
    6. -
    7. Go back to the main menu and tap on Games.
    8. -
    9. Select Browse... and choose a PSP ISO file from your device or iCloud Drive. You can download Summertime Saga PSP ISO files from https://summertimesaga.com/download.
    10. -
    11. Tap on the game icon and start playing Summertime Saga on your iPhone.
    12. -
    -

    iNDS

    -

    iNDS is a Nintendo DS emulator that can run Summertime Saga on your iPhone with fast speed and high compatibility. It also supports auto-save, Dropbox sync, and cheat codes. To use iNDS, you need to follow these steps:

    -
      -
    1. Download the iNDS app from https://inds.nerd.net using Safari on your iPhone.
    2. -
    3. Tap on Install and confirm the installation.
    4. -
    5. Go to Settings > General > Device Management and trust the developer profile of iNDS.
    6. -
    7. Launch iNDS and tap on the plus icon at the top right corner.
    8. -
    9. Select Download from Web and enter the URL of a NDS ROM file. You can download Summertime Saga NDS ROM files from https://summertimesaga.com/download.
    10. -
    11. Wait for the ROM file to download and start playing Summertime Saga on your iPhone.
    12. -
    -

    Tips and tricks for playing Summertime Saga on iPhone

    -

    Now that you know how to download Summertime Saga on your iPhone using an emulator, here are some tips and tricks that can help you enjoy the game more:

    -

    How to save and load your progress

    -

    To save your progress in Summertime Saga, you need to use the in-game menu. Tap on the menu icon at the top left corner of the screen and select Save. Choose an empty slot and tap Save again. To load your progress, tap on Load and select a slot that has your saved data. You can also use the emulator's save state feature to save and load your progress at any point in the game.

    -

    How to access the walkthrough and cheats

    -

    If you are stuck or want to skip some parts of the game, you can use the walkthrough and cheats that are available online. The official website of Summertime Saga has a detailed walkthrough that covers all the characters, events, and endings of the game. You can access it from https://summertimesaga.com/walkthrough. You can also use cheat codes that can give you money, stats, items, or unlock scenes. You can find them from https://summertimesaga.com/cheats.

    -

    How to customize your character and preferences

    -

    To customize your character and preferences in Summertime Saga, you need to use the in-game menu. Tap on the menu icon at the top left corner of the screen and select Preferences. Here you can change your name, gender, appearance, voice, language, difficulty, sound, display, and controls. You can also enable or disable adult content, incest content, or pregnancy content according to your liking.

    -

    Conclusion

    -

    Summertime Saga is a fun and engaging game that you can play on your iPhone using an emulator. You can choose from various emulators that can run Android or Windows apps on your iPhone. You can also enjoy the game's graphics, sound, and touch controls on your mobile device. Moreover, you can use the walkthrough and cheats to help you with the game's storylines and events. Summertime Saga is a game that will keep you entertained and engaged for hours. If you are interested in playing Summertime Saga on your iPhone, download an emulator today and start your adventure!

    FAQs

    -

    Here are some of the frequently asked questions about Summertime Saga and how to play it on iPhone:

    -

    Is Summertime Saga free to play?

    -

    Yes, Summertime Saga is free to play and download. However, you can support the developers by becoming a patron on Patreon. You can get access to exclusive content, previews, polls, and more by pledging a certain amount per month. You can visit their Patreon page from https://www.patreon.com/summertimesaga.

    -

    Is Summertime Saga safe to play?

    -

    Summertime Saga is safe to play as long as you download it from the official website or a trusted emulator. However, you should be aware that the game contains mature content that is not suitable for minors or sensitive people. The game also has some bugs and glitches that may affect your gameplay. You should always backup your save files before playing or updating the game.

    -

    How long is Summertime Saga?

    -

    Summertime Saga is a long game that has multiple storylines, endings, and achievements. The game is still in development and new content is added regularly. The current version of the game has over 60 hours of gameplay. However, the length of the game may vary depending on your choices, actions, and preferences.

    -

    How to update Summertime Saga on iPhone?

    -

    To update Summertime Saga on your iPhone, you need to download the latest version of the game from the official website or the emulator. You can check the latest version of the game from https://summertimesaga.com/download. You can also follow their social media accounts or join their Discord server to get notified of new updates.

    -

    How to delete Summertime Saga on iPhone?

    -

    To delete Summertime Saga on your iPhone, you need to delete the emulator app that you used to play the game. You can also delete the ROM or ISO files that you downloaded from your device or iCloud Drive. However, if you want to keep your save files, you can transfer them to another device or cloud service before deleting the game.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Final Destination 1 on Fzmovies A Guide to Downloading and Streaming the Classic Horror Movie.md b/spaces/1phancelerku/anime-remove-background/Final Destination 1 on Fzmovies A Guide to Downloading and Streaming the Classic Horror Movie.md deleted file mode 100644 index a7b14b30056191cdcb4eac6e26be6cd121c8dd0d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Final Destination 1 on Fzmovies A Guide to Downloading and Streaming the Classic Horror Movie.md +++ /dev/null @@ -1,125 +0,0 @@ -
    -

    How to Download Final Destination 1 on Fzmovies

    |

    If you are a fan of horror movies, you might have heard of Final Destination 1, a 2000 American supernatural horror film that started a successful franchise. In this article, we will tell you what Final Destination 1 is about, why it is worth watching, and how you can download it on Fzmovies, a popular website for streaming and downloading movies for free.

    -

    What is Final Destination 1?

    -

    Final Destination 1 is the first installment in the Final Destination film series, which revolves around the concept of death as an inevitable force that cannot be cheated or escaped. The film was directed by James Wong, with a screenplay written by Wong, Glen Morgan, and Jeffrey Reddick, based on a story by Reddick. It stars Devon Sawa, Ali Larter, Kerr Smith, and Tony Todd.

    -

    download final destination 1 on fzmovies


    DOWNLOADhttps://jinyurl.com/2uNTAc



    -

    A brief summary of the plot

    -

    The film follows Alex Browning, a high school student who has a premonition of a plane crash that kills everyone on board. He manages to get off the plane along with six other people, but soon discovers that death is still after them. One by one, the survivors die in bizarre and gruesome accidents that seem to follow a certain pattern. Alex and his remaining friends must figure out how to stop death from claiming them.

    -

    The cast and crew of the film

    -

    The film features a talented cast of actors who deliver convincing performances. Devon Sawa plays Alex Browning, the protagonist who has the power of foresight. Ali Larter plays Clear Rivers, Alex's love interest and one of the survivors. Kerr Smith plays Carter Horton, Alex's rival and another survivor. Tony Todd plays William Bludworth, a mysterious mortician who knows about death's design.

    -

    The film was directed by James Wong, who also co-wrote the screenplay with Glen Morgan. Wong and Morgan are best known for their work on The X-Files, a popular sci-fi TV series. Jeffrey Reddick, who came up with the original story idea for Final Destination 1, also contributed to the screenplay. Reddick was inspired by a real-life plane crash that he read about in a newspaper.

    -

    How to download final destination 1 on fzmovies app
    -Final destination 1 full movie download on fzmovies net
    -Fzmovies final destination 1 free download in HD quality
    -Watch final destination 1 online on fzmovies website
    -Final destination 1 fzmovies download link
    -Fzmovies skipogist final destination 2000 collection
    -Final destination 1 movie series download from fzmovies
    -FzStudios app for final destination 1 download
    -Final destination 1 torrent download fzmovies
    -Final destination 1 mp4 download fzmovies
    -Final destination 1 subtitles download fzmovies
    -Final destination 1 dual audio download fzmovies
    -Final destination 1 hindi dubbed download fzmovies
    -Final destination 1 streaming on fzmovies
    -Final destination 1 review on fzmovies
    -Final destination 1 cast and crew on fzmovies
    -Final destination 1 trivia and facts on fzmovies
    -Final destination 1 box office collection on fzmovies
    -Final destination 1 awards and nominations on fzmovies
    -Final destination 1 behind the scenes on fzmovies
    -Final destination 1 deleted scenes on fzmovies
    -Final destination 1 soundtrack download on fzmovies
    -Final destination 1 poster and images on fzmovies
    -Final destination 1 trailer and clips on fzmovies
    -Final destination 1 rating and comments on fzmovies
    -Final destination 1 genre and tags on fzmovies
    -Final destination 1 release date and runtime on fzmovies
    -Final destination 1 director and writer on fzmovies
    -Final destination 1 plot and summary on fzmovies
    -Final destination 1 sequel and prequel on fzmovies
    -Final destination franchise download on fzmovies
    -Download final destination movies in order on fzmovies
    -Best final destination movie to download on fzmovies
    -Similar movies to final destination on fzmovies
    -Horror movies like final destination on fzmovies
    -Thriller movies like final destination on fzmovies
    -Mystery movies like final destination on fzmovies
    -Death scenes in final destination movies on fzmovies
    -Survival tips from final destination movies on fzmovies
    -Fan theories about final destination movies on fzmovies

    -

    The reception and legacy of the film

    -

    Final Destination 1 was released on March 17, 2000, and became a financial success, grossing over $112 million worldwide against a budget of $23 million. The film received mixed reviews from critics, who praised its suspenseful premise and creative death scenes, but criticized its flat characters and lack of logic. The film also received some awards and nominations, such as the Saturn Award for Best Horror Film and Best Performance by a Younger Actor for Sawa.

    -

    The film's success spawned a media franchise that includes four sequels, a series of novels, and comic books. The sequels follow different groups of people who cheat death in various ways, such as escaping a highway pile-up or a roller coaster derailment. The franchise is known for its elaborate and gory death sequences that involve everyday objects and situations.

    -

    What is Fzmovies?

    -

    Fzmovies is a website that allows users to stream and download movies for free online. It has a large collection of movies from different genres and countries, such as Hollywood, Bollywood, Nollywood, etc. Users can

    search for movies by title, genre, year, or quality. They can also request for movies that are not available on the website. Fzmovies is compatible with various devices, such as smartphones, tablets, laptops, etc.

    -

    A brief introduction to the website

    -

    Fzmovies was launched in 2012 and has since become one of the most popular websites for movie lovers. It offers a user-friendly interface and a fast downloading speed. It also updates its content regularly and adds new releases as soon as possible. Fzmovies has a loyal fan base that visits the website frequently and leaves positive feedback.

    -

    The features and benefits of using Fzmovies

    -

    Some of the features and benefits of using Fzmovies are:

    - -

    The risks and challenges of using Fzmovies

    -

    Despite its advantages, Fzmovies also has some risks and challenges that users should be aware of. Some of them are:

    - -

    How to download Final Destination 1 on Fzmovies?

    -

    If you want to download Final Destination 1 on Fzmovies, you can follow these simple steps:

    -

    The steps to follow

    -
      -
    1. Go to the official website of Fzmovies at https://www.fzmovies.net/.
    2. -
    3. In the search box, type "Final Destination 1" and click on the search button.
    4. -
    5. You will see a list of results that match your query. Click on the one that says "Final Destination (2000)".
    6. -
    7. You will be directed to a page that shows the details of the movie, such as the genre, rating, synopsis, etc. Scroll down to the bottom of the page and click on "Download File".
    8. -
    9. You will see a list of download links that vary in size and quality. Choose the one that suits your needs and click on it.
    10. -
    11. You will be asked to verify that you are not a robot by completing a captcha. Follow the instructions and click on "Continue Download".
    12. -
    13. Your download will start automatically. Wait for it to finish and enjoy watching Final Destination 1 on your device.
    14. -
    -

    The tips and tricks to enhance the experience

    -

    To make your downloading process easier and faster, you can use some tips and tricks such as:

    - -

    The alternatives to Fzmovies

    -

    If you are looking for other websites that offer similar services as Fzmovies, you can try some of these alternatives:

    - -

    Conclusion

    -

    In conclusion, Final Destination 1 is a thrilling and entertaining horror movie that you can download on Fzmovies for free. Fzmovies is a website that offers a large collection of movies from different genres and languages. However, you should also be aware of the risks and challenges of using Fzmovies, such as legal issues, malware, ads, etc. You can also use some tips and tricks to enhance your downloading experience, such as using a VPN, an ad-blocker, a download manager, etc. Alternatively, you can try some other websites that offer similar services as Fzmovies.

    -

    We hope you enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy watching!

    -

    FAQs

    -

    Here are some frequently asked questions about Final Destination 1 and Fzmovies:

    -
      -
    1. Q: Is Final Destination 1 based on a true story?
      -A: No, Final Destination 1 is not based on a true story. However, the writer of the film, Jeffrey Reddick, was inspired by a real-life plane crash that he read about in a newspaper.
    2. -
    3. Q: How many movies are there in the Final Destination franchise?
      -A: There are five movies in the Final Destination franchise: Final Destination (2000), Final Destination 2 (2003), Final Destination 3 (2006), The Final Destination (2009), and Final Destination 5 (2011).
    4. -
    5. Q: Is Fzmovies legal and safe?
      -A: No, Fzmovies is not legal and safe. It violates the copyright laws of the movie industry and may contain viruses, malware, or spyware that can harm your devices or steal your personal information.
    6. -
    7. Q: How can I access Fzmovies if it is blocked or banned in my country or region?
      -A: You can use a VPN service to access Fzmovies if it is blocked or banned in your country or region. A VPN service can hide your IP address and location and allow you to access any website anonymously.
    8. -
    9. Q: What are some other websites like Fzmovies?
      -A: Some other websites like Fzmovies are O2tvseries, Toxicwap, Mp4moviez, and 123movies.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Free APK Download for Gacha Life - The Most Popular Anime Game by Lunime.md b/spaces/1phancelerku/anime-remove-background/Free APK Download for Gacha Life - The Most Popular Anime Game by Lunime.md deleted file mode 100644 index 9d3e24d941aac4ee99e807cd00703c9750960d2b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Free APK Download for Gacha Life - The Most Popular Anime Game by Lunime.md +++ /dev/null @@ -1,156 +0,0 @@ - -

    Gacha Life Download APK Free: How to Play and Enjoy This Anime Game

    -

    If you are a fan of anime, you might have heard of Gacha Life, a popular game that lets you create your own anime characters and stories. But did you know that you can download Gacha Life APK for free and play it on your Android device? In this article, we will show you how to do that, as well as how to play and enjoy this anime game.

    -

    What is Gacha Life?

    -

    Gacha Life is a game developed by Lunime, a company that specializes in making anime-style games. It was released in October 2018 for Android and iOS devices, and has since gained millions of downloads and positive reviews from players around the world. But what makes Gacha Life so appealing? Here are some of the reasons:

    -

    gacha life download apk free


    DOWNLOAD ->>->>->> https://jinyurl.com/2uNMmM



    -

    A game for anime fans and casual gamers

    -

    Gacha Life is a game that anime fans are sure to love because it will let you design and create your very own anime characters. You can choose from hundreds of clothing items, hairstyles, weapons, accessories, and more to dress up your characters in your favorite fashion outfits. You can also customize your characters' appearance, such as their skin color, eye color, hair color, and facial expressions. You can even mix and match different parts from different characters to create unique combinations.

    -

    But Gacha Life is not just a game for anime fans. It is also a game for casual gamers who just want something to play to destress and not overthink. You can play Gacha Life offline without Wi-Fi or internet connection, so you can enjoy it anytime and anywhere. You can also play mini-games within the game to earn gems, which you can use to gacha for rare items or gifts for your characters. The mini-games are simple and fun, such as Duck & Dodge, Phantom's Remix, Memory Match, and more.

    -

    A game for creating and customizing characters

    -

    Gacha Life is a game that lets you unleash your creativity by making an avatar that looks like you or an avatar designed according to your preferences. You can save up to 20 characters of your own design in different slots, and switch between them easily. You can also access more unique characters in the preset menu, where you can find characters created by Lunime or other players. You can also recover any character that you accidentally edited or deleted by visiting the preset menu.

    -

    But creating characters is not the only thing you can do in Gacha Life. You can also customize your characters' profile, where you can change or randomize their name, relationship, personality, and occupation. You can also change the background of your characters by choosing from over a hundred backgrounds available in the game. You can also zoom in or out your characters to get a better view of them.

    -

    A game for making stories and scenes

    -

    Gacha Life is a game that allows you to express your storytelling skills by making scenes and skits with your characters. You can use the studio mode to create your own scenes with up to 8 characters at a time. You can enter custom text for your characters and choose from many different poses and backgrounds. You can also use props such as weapons, hats, wings, tails, etc. to add more details to your scenes.

    -

    But studio mode is not the only way to make stories in Gacha Life. You can also use the skit maker mode to create your own stories with up to 2 characters per skit. You can easily combine multiple scenes to create sketches with dialogue and narration. Continuing the article:

    How to Download Gacha Life APK for Free?

    -

    Gacha Life is a free game that you can download and play on your Android device. However, you need to make sure that you download the game from a safe and reliable source, as there are many fake or malicious websites that may try to trick you into downloading harmful files. Here are some of the ways to download Gacha Life APK for free:

    -

    The official sources for downloading the game

    -

    The best and safest way to download Gacha Life APK for free is to use the official sources provided by Lunime, the developer of the game. You can visit their website at [Lunime.com](^1^) and click on the Gacha Life banner to access the download page. You can also download the game from the Google Play Store by searching for Gacha Life or clicking on this link: [Gacha Life - Apps on Google Play](^2^). These sources will ensure that you get the latest and updated version of the game, as well as protect your device from any malware or viruses.

    -

    gacha life apk free download for android
    -gacha life mod apk download free
    -gacha life old version apk free download
    -gacha life pc download free apk
    -gacha life 2 apk free download
    -gacha life apk download free latest version
    -gacha life apk free download no ads
    -gacha life apk free download offline
    -gacha life apk free download full version
    -gacha life apk free download unlimited gems
    -gacha life apk free download 2023
    -gacha life apk free download ios
    -gacha life apk free download windows 10
    -gacha life apk free download laptop
    -gacha life apk free download chromebook
    -gacha life apk free download uptodown
    -gacha life apk free download apkpure
    -gacha life apk free download android 1
    -gacha life apk free download mod menu
    -gacha life apk free download no verification
    -gacha life apk free download no wifi
    -gacha life apk free download online
    -gacha life apk free download update
    -gacha life apk free download hack
    -gacha life apk free download 1.1.4
    -gacha life anime dress up game apk free download
    -how to get gacha life for free on android apk
    -where can i download gacha life for free on android apk
    -how to install gacha life on android for free apk
    -how to play gacha life on android without downloading it for free apk
    -best site to download gacha life for android for free apk
    -how to update gacha life on android for free apk
    -how to get all items in gacha life for android for free apk
    -how to make your own character in gacha life for android for free apk
    -how to create your own story in gacha life for android for free apk
    -how to chat with other players in gacha life for android for free apk
    -how to play mini games in gacha life for android for free apk
    -how to collect gems in gacha life for android for free apk
    -how to get rare gifts in gacha life for android for free apk
    -how to customize your personal look in gacha life for android for free apk
    -how to change your hairstyle, eyes, mouth, and more in gacha life for android for free apk
    -how to enter the studio mode in gacha life for android for free apk
    -how to enter the skit maker in gacha life for android for free apk
    -how to enter the life mode in gacha life for android for free apk
    -how to explore different areas with your own characters in gacha life for android for free apk
    -how to discover new NPCs and learn more about them in gacha life for android for free apk
    -how to restart the game if you experience lag in gacha life for android for free apk
    -how to fix in-app-purchases issues in gacha life for android 6.0+ devices or rooted devices for free apk
    -how to like and follow Gacha Life on Facebook and join their group for more updates and news about the game for android users who downloaded it from Google Play Store or other sources as a APK file

    -

    The steps to install the game on your device

    -

    Once you have downloaded the Gacha Life APK file from a trusted source, you need to follow these steps to install the game on your device:

    -
      -
    1. Go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.
    2. -
    3. Locate the Gacha Life APK file in your device's storage and tap on it to start the installation process.
    4. -
    5. Follow the instructions on the screen and wait for the installation to finish.
    6. -
    7. Launch the game and enjoy!
    8. -
    -

    Note: If you encounter any problems or errors during the installation, you may need to uninstall any previous versions of Gacha Life or clear your cache and data before installing the new version.

    -

    The precautions to avoid malware and viruses

    -

    While downloading Gacha Life APK for free is possible and easy, you also need to be careful and cautious about the potential risks and dangers of downloading files from unknown sources. Here are some of the precautions that you should take to avoid malware and viruses:

    - Continuing the article:

    How to Play and Enjoy Gacha Life?

    -

    Now that you have downloaded and installed Gacha Life APK for free, you might be wondering how to play and enjoy this anime game. Well, there are many things that you can do in Gacha Life, and you can explore them at your own pace and preference. Here are some of the main features of the game and some tips and tricks to help you get the most out of it:

    -

    The main features of the game

    -

    Gacha Life is a game that has many modes and options for you to choose from, depending on what you want to do. Here are some of the main features of the game:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FeatureDescription
    HomeThis is where you can access the other modes of the game, such as Dress Up, Studio, Gacha, Life, etc. You can also see your character's level, energy, and gems here.
    Dress UpThis is where you can create and customize your characters. You can change their clothes, accessories, hair, eyes, etc. You can also save your characters in different slots or load them from the preset menu.
    StudioThis is where you can make scenes and stories with your characters. You can enter custom text, choose poses and backgrounds, and use props. You can also save your scenes or load them from the gallery.
    GachaThis is where you can gacha for rare items or gifts for your characters. You can use gems or tickets to gacha for different types of items, such as clothing, accessories, pets, etc. You can also trade your items with other players or NPCs.
    LifeThis is where you can interact with other characters in different locations. You can talk to them, give them gifts, play mini-games with them, or ask them questions. You can also unlock new locations by increasing your friendship level with them.
    GamesThis is where you can play mini-games to earn gems or tickets. There are 8 mini-games available in the game, such as Duck & Dodge, Phantom's Remix, Memory Match, etc. You can also see your high scores and achievements here.
    ChatThis is where you can chat with other players online. You can join different chat rooms or create your own. You can also use stickers or emojis to express yourself.
    OptionsThis is where you can change the settings of the game, such as the volume, language, quality, etc. You can also see the credits or contact the support team here.
    -

    The tips and tricks to level up and earn gems

    Continuing the article:

    Gacha Life is a game that requires you to level up your character and earn gems to unlock more features and items. Here are some of the tips and tricks that you can use to level up and earn gems faster:

    - -

    The fun and creative ways to use the game

    -

    Gacha Life is a game that offers you a lot of freedom and possibilities to use it in fun and creative ways. Here are some of the examples of how you can use the game:

    - -

    Conclusion

    -

    Gacha Life is a game that lets you create your own anime characters and stories. It is a game that is free to download and play on your Android device, as long as you follow the steps and precautions mentioned above. It is also a game that has many features and modes for you to explore and enjoy, as well as many ways for you to use it in fun and creative ways. If you are an anime fan or a casual gamer who likes to express yourself through games, then Gacha Life is a game that you should try out.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Gacha Life:

    -
      -
    1. Is Gacha Life safe for kids?
    2. -

      Gacha Life is a game that is rated for ages 9 and up by the Google Play Store. However, some of the content in the game may not be suitable for younger kids, such as violence, profanity, suggestive themes, etc. Therefore, parental guidance and supervision are recommended when playing Gacha Life.

      -
    3. Is Gacha Life online or offline?
    4. -

      Gacha Life is a game that can be played both online and offline. You can play most of the features and modes of the game offline without Wi-Fi or internet connection, such as dress up, studio, gacha, life, etc. However, some of the features and modes require internet connection to access, such as chat, games, etc.

      -
    5. How do I update Gacha Life?
    6. -

      To update Gacha Life, you need to visit the official sources provided by Lunime, such Continuing the article: as the website or the Google Play Store and download the latest version of the game. You can also check for updates within the game by going to the options menu and tapping on the check for updates button. You may need to uninstall any previous versions of Gacha Life or clear your cache and data before installing the new version.

      -
    7. How do I delete Gacha Life?
    8. -

      To delete Gacha Life, you need to go to your device's settings and find the apps or applications menu. Then, you need to locate Gacha Life and tap on it to open its details. Then, you need to tap on the uninstall button and confirm your action. This will remove Gacha Life from your device, along with its data and files.

      -
    9. How do I contact Gacha Life support?
    10. -

      To contact Gacha Life support, you need to go to the options menu in the game and tap on the contact us button. This will open a form where you can enter your name, email, subject, and message. You can also attach a screenshot if needed. Then, you need to tap on the send button and wait for a reply from the support team.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/next.config.js b/spaces/2023Liu2023/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/7hao/bingo/src/components/voice.tsx b/spaces/7hao/bingo/src/components/voice.tsx deleted file mode 100644 index 074d0e145229947282a472bd84f6578cf0b3c71c..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/components/voice.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import React, { useEffect } from 'react' -import { useSetAtom } from 'jotai' -import { useBing } from '@/lib/hooks/use-bing' -import Image from 'next/image' -import VoiceIcon from '@/assets/images/voice.svg' -import VoiceButton from './ui/voice' -import { SR } from '@/lib/bots/bing/sr' -import { voiceListenAtom } from '@/state' - -const sr = new SR(['发送', '清空', '退出']) - -const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => { - const setListen = useSetAtom(voiceListenAtom) - useEffect(() => { - if (sr.listening) return - sr.transcript = !isSpeaking - }, [isSpeaking]) - - useEffect(() => { - sr.onchange = (msg: string, command?: string) => { - switch (command) { - case '退出': - sr.stop() - break; - case '发送': - sendMessage(input) - case '清空': - setInput('') - break; - default: - setInput(input + msg) - } - } - }, [input]) - - const switchSR = (enable: boolean = false) => { - setListen(enable) - if (enable) { - sr.start() - } else { - sr.stop() - } - } - - return sr.listening ? ( - switchSR(false)} /> - ) : ( - start voice switchSR(true)} /> - ) -}; - -export default Voice; diff --git a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py b/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py deleted file mode 100644 index 9b127bc6427f5c60c8cf85603a3d8a093c3501c4..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/ADRXtractor/ADR_Xtractor/app.py b/spaces/ADRXtractor/ADR_Xtractor/app.py deleted file mode 100644 index 435b1ce88631d579f6684880bae7b9d2b5088baf..0000000000000000000000000000000000000000 --- a/spaces/ADRXtractor/ADR_Xtractor/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -from spacy import displacy - -from transformers import AutoTokenizer, AutoModelForTokenClassification,pipeline -tokenizer = AutoTokenizer.from_pretrained("abhibisht89/spanbert-large-cased-finetuned-ade_corpus_v2") -model = AutoModelForTokenClassification.from_pretrained("abhibisht89/spanbert-large-cased-finetuned-ade_corpus_v2").to('cpu') -adr_ner_model = pipeline(task="ner", model=model, tokenizer=tokenizer,grouped_entities=True) - -def get_adr_from_text(sentence): - tokens = adr_ner_model(sentence) - entities = [] - - for token in tokens: - label = token["entity_group"] - if label != "O": - token["label"] = label - entities.append(token) - - params = [{"text": sentence, - "ents": entities, - "title": None}] - - html = displacy.render(params, style="ent", manual=True, options={ - "colors": { - "DRUG": "#f08080", - "ADR": "#9bddff", - }, - }) - return html - -exp=["Abortion, miscarriage or uterine hemorrhage associated with misoprostol (Cytotec), a labor-inducing drug.", - "Addiction to many sedatives and analgesics, such as diazepam, morphine, etc.", - "Birth defects associated with thalidomide", - "Bleeding of the intestine associated with aspirin therapy", - "Cardiovascular disease associated with COX-2 inhibitors (i.e. Vioxx)", - "Deafness and kidney failure associated with gentamicin (an antibiotic)", - "Having fever after taking paracetamol"] - -desc="An adverse drug reaction (ADR) can be defined as an appreciably harmful or unpleasant reaction resulting from an intervention related to the use of a medicinal product.\ - The goal of this project is to extracts the adverse drug reaction from unstructured text with the Drug." - -inp=gr.inputs.Textbox(lines=5, placeholder=None, default="", label="text to extract adverse drug reaction and drug mention") -out=gr.outputs.HTML(label=None) - -iface = gr.Interface(fn=get_adr_from_text, inputs=inp, outputs=out,examples=exp,article=desc,title="Adverse Drug Reaction Xtractor",theme="huggingface",layout='horizontal') -iface.launch() \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab.py deleted file mode 100644 index e9fab23bc2c48203e541d356dc172e1fdee8f113..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab.py +++ /dev/null @@ -1,153 +0,0 @@ -import json -from tqdm import tqdm -import logging -import pickle -from collections import Counter -import re -import fire - - -class Vocabulary(object): - """Simple vocabulary wrapper.""" - def __init__(self): - self.word2idx = {} - self.idx2word = {} - self.idx = 0 - - def add_word(self, word): - if not word in self.word2idx: - self.word2idx[word] = self.idx - self.idx2word[self.idx] = word - self.idx += 1 - - def __call__(self, word): - if not word in self.word2idx: - return self.word2idx[""] - return self.word2idx[word] - - def __getitem__(self, word_id): - return self.idx2word[word_id] - - def __len__(self): - return len(self.word2idx) - - -def build_vocab(input_json: str, - threshold: int, - keep_punctuation: bool, - host_address: str, - character_level: bool = False, - zh: bool = True ): - """Build vocabulary from csv file with a given threshold to drop all counts < threshold - - Args: - input_json(string): Preprossessed json file. Structure like this: - { - 'audios': [ - { - 'audio_id': 'xxx', - 'captions': [ - { - 'caption': 'xxx', - 'cap_id': 'xxx' - } - ] - }, - ... - ] - } - threshold (int): Threshold to drop all words with counts < threshold - keep_punctuation (bool): Includes or excludes punctuation. - - Returns: - vocab (Vocab): Object with the processed vocabulary -""" - data = json.load(open(input_json, "r"))["audios"] - counter = Counter() - pretokenized = "tokens" in data[0]["captions"][0] - - if zh: - from nltk.parse.corenlp import CoreNLPParser - from zhon.hanzi import punctuation - if not pretokenized: - parser = CoreNLPParser(host_address) - for audio_idx in tqdm(range(len(data)), leave=False, ascii=True): - for cap_idx in range(len(data[audio_idx]["captions"])): - if pretokenized: - tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split() - else: - caption = data[audio_idx]["captions"][cap_idx]["caption"] - # Remove all punctuations - if not keep_punctuation: - caption = re.sub("[{}]".format(punctuation), "", caption) - if character_level: - tokens = list(caption) - else: - tokens = list(parser.tokenize(caption)) - data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens) - counter.update(tokens) - else: - if pretokenized: - for audio_idx in tqdm(range(len(data)), leave=False, ascii=True): - for cap_idx in range(len(data[audio_idx]["captions"])): - tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split() - counter.update(tokens) - else: - from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer - captions = {} - for audio_idx in range(len(data)): - audio_id = data[audio_idx]["audio_id"] - captions[audio_id] = [] - for cap_idx in range(len(data[audio_idx]["captions"])): - caption = data[audio_idx]["captions"][cap_idx]["caption"] - captions[audio_id].append({ - "audio_id": audio_id, - "id": cap_idx, - "caption": caption - }) - tokenizer = PTBTokenizer() - captions = tokenizer.tokenize(captions) - for audio_idx in tqdm(range(len(data)), leave=False, ascii=True): - audio_id = data[audio_idx]["audio_id"] - for cap_idx in range(len(data[audio_idx]["captions"])): - tokens = captions[audio_id][cap_idx] - data[audio_idx]["captions"][cap_idx]["tokens"] = tokens - counter.update(tokens.split(" ")) - - if not pretokenized: - json.dump({ "audios": data }, open(input_json, "w"), indent=4, ensure_ascii=not zh) - words = [word for word, cnt in counter.items() if cnt >= threshold] - - # Create a vocab wrapper and add some special tokens. - vocab = Vocabulary() - vocab.add_word("") - vocab.add_word("") - vocab.add_word("") - vocab.add_word("") - - # Add the words to the vocabulary. - for word in words: - vocab.add_word(word) - return vocab - - -def process(input_json: str, - output_file: str, - threshold: int = 1, - keep_punctuation: bool = False, - character_level: bool = False, - host_address: str = "http://localhost:9000", - zh: bool = False): - logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" - logging.basicConfig(level=logging.INFO, format=logfmt) - logging.info("Build Vocab") - vocabulary = build_vocab( - input_json=input_json, threshold=threshold, keep_punctuation=keep_punctuation, - host_address=host_address, character_level=character_level, zh=zh) - pickle.dump(vocabulary, open(output_file, "wb")) - logging.info("Total vocabulary size: {}".format(len(vocabulary))) - logging.info("Saved vocab to '{}'".format(output_file)) - - -if __name__ == '__main__': - fire.Fire(process) diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py deleted file mode 100644 index ce7931d0321b1ecf21142efe9c9f609e277189ed..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py +++ /dev/null @@ -1,256 +0,0 @@ - -import random -import torchaudio -from torch._six import string_classes -import collections -import re -import torch.nn.functional as F -import numpy as np -from transformers import AutoTokenizer -from wav_evaluation.models.utils import read_config_as_args -from wav_evaluation.models.clap import CLAP -import math -import torchaudio.transforms as T -import os -import torch -from importlib_resources import files - - -class CLAPWrapper(): - """ - A class for interfacing CLAP model. - """ - - def __init__(self, model_fp,config_path, use_cuda=False): - self.np_str_obj_array_pattern = re.compile(r'[SaUO]') - self.file_path = os.path.realpath(__file__) - self.default_collate_err_msg_format = ( - "default_collate: batch must contain tensors, numpy arrays, numbers, " - "dicts or lists; found {}") - with open(config_path,'r') as f: - self.config_as_str = f.read() - self.model_fp = model_fp - self.use_cuda = use_cuda - self.clap, self.tokenizer, self.args = self.load_clap() - - def load_clap(self): - r"""Load CLAP model with args from config file""" - - args = read_config_as_args(self.config_as_str, is_config_str=True) - - if 'bert' in args.text_model: - self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask'] - else: - self.token_keys = ['input_ids', 'attention_mask'] - - clap = CLAP( - audioenc_name=args.audioenc_name, - sample_rate=args.sampling_rate, - window_size=args.window_size, - hop_size=args.hop_size, - mel_bins=args.mel_bins, - fmin=args.fmin, - fmax=args.fmax, - classes_num=args.num_classes, - out_emb=args.out_emb, - text_model=args.text_model, - transformer_embed_dim=args.transformer_embed_dim, - d_proj=args.d_proj - ) - - - # Load pretrained weights for model - model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model'] - clap.load_state_dict(model_state_dict) - clap.eval() # set clap in eval mode - tokenizer = AutoTokenizer.from_pretrained(args.text_model) - - if self.use_cuda and torch.cuda.is_available(): - clap = clap.cuda() - - return clap, tokenizer, args - - def default_collate(self, batch): - r"""Puts each data field into a tensor with outer dimension batch size""" - elem = batch[0] - elem_type = type(elem) - if isinstance(elem, torch.Tensor): - out = None - if torch.utils.data.get_worker_info() is not None: - # If we're in a background process, concatenate directly into a - # shared memory tensor to avoid an extra copy - numel = sum([x.numel() for x in batch]) - storage = elem.storage()._new_shared(numel) - out = elem.new(storage) - return torch.stack(batch, 0, out=out) - elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ - and elem_type.__name__ != 'string_': - if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap': - # array of string classes and object - if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None: - raise TypeError( - self.default_collate_err_msg_format.format(elem.dtype)) - - return self.default_collate([torch.as_tensor(b) for b in batch]) - elif elem.shape == (): # scalars - return torch.as_tensor(batch) - elif isinstance(elem, float): - return torch.tensor(batch, dtype=torch.float64) - elif isinstance(elem, int): - return torch.tensor(batch) - elif isinstance(elem, string_classes): - return batch - elif isinstance(elem, collections.abc.Mapping): - return {key: self.default_collate([d[key] for d in batch]) for key in elem} - elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple - return elem_type(*(self.default_collate(samples) for samples in zip(*batch))) - elif isinstance(elem, collections.abc.Sequence): - # check to make sure that the elements in batch have consistent size - it = iter(batch) - elem_size = len(next(it)) - if not all(len(elem) == elem_size for elem in it): - raise RuntimeError( - 'each element in list of batch should be of equal size') - transposed = zip(*batch) - return [self.default_collate(samples) for samples in transposed] - - raise TypeError(self.default_collate_err_msg_format.format(elem_type)) - - def resample_and_duration(self,wav_sr,audio_duration,resample=False): - audio_time_series,sample_rate = wav_sr - resample_rate = self.args.sampling_rate - if resample: - resampler = T.Resample(sample_rate, resample_rate) - audio_time_series = resampler(audio_time_series) - audio_time_series = audio_time_series.reshape(-1) - - # audio_time_series is shorter than predefined audio duration, - # so audio_time_series is extended - if audio_duration*sample_rate >= audio_time_series.shape[0]: - repeat_factor = int(np.ceil((audio_duration*sample_rate) / - audio_time_series.shape[0])) - # Repeat audio_time_series by repeat_factor to match audio_duration - audio_time_series = audio_time_series.repeat(repeat_factor) - # remove excess part of audio_time_series - audio_time_series = audio_time_series[0:audio_duration*sample_rate] - else: - # audio_time_series is longer than predefined audio duration, - # so audio_time_series is trimmed - start_index = random.randrange( - audio_time_series.shape[0] - audio_duration*sample_rate) - audio_time_series = audio_time_series[start_index:start_index + - audio_duration*sample_rate] - return torch.FloatTensor(audio_time_series) - - def load_audio_into_tensor(self, audio_path, audio_duration, resample=False): - r"""Loads audio file and returns raw audio.""" - # Randomly sample a segment of audio_duration from the clip or pad to match duration - audio_time_series, sample_rate = torchaudio.load(audio_path) - return self.resample_and_duration((audio_time_series, sample_rate),audio_duration,resample) - - def preprocess_audio(self, audio_files, resample): - r"""Load list of audio files and return raw audio""" - audio_tensors = [] - for audio_file in audio_files: - if isinstance(audio_file,str): - audio_tensor = self.load_audio_into_tensor(audio_file, self.args.duration, resample) - elif isinstance(audio_file,tuple): - audio_tensor = self.resample_and_duration(audio_file, self.args.duration, resample) - else: - raise TypeError(f"type of audiofile is {type(audio_file)},which is not supported") - audio_tensor = audio_tensor.reshape( - 1, -1).cuda() if self.use_cuda and torch.cuda.is_available() else audio_tensor.reshape(1, -1) - audio_tensors.append(audio_tensor) - return self.default_collate(audio_tensors) - - def preprocess_text(self, text_queries): - r"""Load list of class labels and return tokenized text""" - tokenized_texts = [] - for ttext in text_queries: - tok = self.tokenizer.encode_plus( - text=ttext, add_special_tokens=True, max_length=self.args.text_len, padding="max_length", return_tensors="pt") # max_length=self.args.text_len, padding=True, - for key in self.token_keys: - tok[key] = tok[key].reshape(-1).cuda() if self.use_cuda and torch.cuda.is_available() else tok[key].reshape(-1) - tokenized_texts.append(tok) - return self.default_collate(tokenized_texts) - - def get_text_embeddings(self, class_labels): - r"""Load list of class labels and return text embeddings""" - preprocessed_text = self.preprocess_text(class_labels) - text_embeddings = self._get_text_embeddings(preprocessed_text) - text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True) - return text_embeddings - - def get_audio_embeddings(self, audio_files, resample): - r"""Load list of audio files and return a audio embeddings""" - preprocessed_audio = self.preprocess_audio(audio_files, resample) - audio_embeddings = self._get_audio_embeddings(preprocessed_audio) - audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True) - return audio_embeddings - - def _get_text_embeddings(self, preprocessed_text): - r"""Load preprocessed text and return text embeddings""" - with torch.no_grad(): - text_embeddings = self.clap.caption_encoder(preprocessed_text) - text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True) - return text_embeddings - - def _get_audio_embeddings(self, preprocessed_audio): - r"""Load preprocessed audio and return a audio embeddings""" - with torch.no_grad(): - preprocessed_audio = preprocessed_audio.reshape( - preprocessed_audio.shape[0], preprocessed_audio.shape[2]) - #Append [0] the audio emebdding, [1] has output class probabilities - audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0] - audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True) - return audio_embeddings - - def compute_similarity(self, audio_embeddings, text_embeddings,use_logit_scale = True): - r"""Compute similarity between text and audio embeddings""" - if use_logit_scale: - logit_scale = self.clap.logit_scale.exp() - similarity = logit_scale*text_embeddings @ audio_embeddings.T - else: - similarity = text_embeddings @ audio_embeddings.T - return similarity.T - - def cal_clap_score(self,txt,audio_path): - text_embeddings = self.get_text_embeddings([txt])# 经过了norm的embedding - audio_embeddings = self.get_audio_embeddings([audio_path], resample=True)# 这一步比较耗时,读取音频并重采样到44100 - score = self.compute_similarity(audio_embeddings, text_embeddings,use_logit_scale=False).squeeze().cpu().numpy() - return score - - def _generic_batch_inference(self, func, *args): - r"""Process audio and/or text per batch""" - input_tmp = args[0] - batch_size = args[-1] - # args[0] has audio_files, args[1] has class_labels - inputs = [args[0], args[1]] if len(args) == 3 else [args[0]] - args0_len = len(args[0]) - # compute text_embeddings once for all the audio_files batches - if len(inputs) == 2: - text_embeddings = self.get_text_embeddings(args[1]) - inputs = [args[0], args[1], text_embeddings] - dataset_idx = 0 - for _ in range(math.ceil(args0_len/batch_size)): - next_batch_idx = dataset_idx + batch_size - # batch size is bigger than available audio/text items - if next_batch_idx >= args0_len: - inputs[0] = input_tmp[dataset_idx:] - return func(*tuple(inputs)) - else: - inputs[0] = input_tmp[dataset_idx:next_batch_idx] - yield func(*tuple(inputs)) - dataset_idx = next_batch_idx - - def get_audio_embeddings_per_batch(self, audio_files, batch_size): - r"""Load preprocessed audio and return a audio embeddings per batch""" - return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size) - - def get_text_embeddings_per_batch(self, class_labels, batch_size): - r"""Load preprocessed text and return text embeddings per batch""" - return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size) - - def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size): - r"""Compute classification probabilities for each audio recording in a batch and each class label""" - return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size) diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Vercel.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Vercel.py deleted file mode 100644 index e5df9cf017e4c1a265f5c9d5e48eb5c10a56e60a..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Vercel.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import json -import base64 -import execjs -import queue -import threading - -from curl_cffi import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://play.vercel.ai' -supports_stream = True -needs_auth = False - -models = { - 'claude-instant-v1': 'anthropic:claude-instant-v1', - 'claude-v1': 'anthropic:claude-v1', - 'alpaca-7b': 'replicate:replicate/alpaca-7b', - 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b', - 'bloom': 'huggingface:bigscience/bloom', - 'bloomz': 'huggingface:bigscience/bloomz', - 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl', - 'flan-ul2': 'huggingface:google/flan-ul2', - 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b', - 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - 'santacoder': 'huggingface:bigcode/santacoder', - 'command-medium-nightly': 'cohere:command-medium-nightly', - 'command-xlarge-nightly': 'cohere:command-xlarge-nightly', - 'code-cushman-001': 'openai:code-cushman-001', - 'code-davinci-002': 'openai:code-davinci-002', - 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo', - 'text-ada-001': 'openai:text-ada-001', - 'text-babbage-001': 'openai:text-babbage-001', - 'text-curie-001': 'openai:text-curie-001', - 'text-davinci-002': 'openai:text-davinci-002', - 'text-davinci-003': 'openai:text-davinci-003' -} -model = models.keys() - -vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': { - 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}} - - -# based on https://github.com/ading2210/vercel-llm-api // modified -class Client: - def __init__(self): - self.session = requests.Session() - self.headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.5', - 'Te': 'trailers', - 'Upgrade-Insecure-Requests': '1' - } - self.session.headers.update(self.headers) - - def get_token(self): - b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text - data = json.loads(base64.b64decode(b64)) - - code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % ( - data['c'], data['a']) - - token_string = json.dumps(separators=(',', ':'), - obj={'r': execjs.compile(code).call('token'), 't': data['t']}) - - return base64.b64encode(token_string.encode()).decode() - - def get_default_params(self, model_id): - return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()} - - def generate(self, model_id: str, prompt: str, params: dict = {}): - if not ':' in model_id: - model_id = models[model_id] - - defaults = self.get_default_params(model_id) - - payload = defaults | params | { - 'prompt': prompt, - 'model': model_id, - } - - headers = self.headers | { - 'Accept-Encoding': 'gzip, deflate, br', - 'Custom-Encoding': self.get_token(), - 'Host': 'sdk.vercel.ai', - 'Origin': 'https://sdk.vercel.ai', - 'Referrer': 'https://sdk.vercel.ai', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - } - - chunks_queue = queue.Queue() - error = None - response = None - - def callback(data): - chunks_queue.put(data.decode()) - - def request_thread(): - nonlocal response, error - for _ in range(3): - try: - response = self.session.post('https://sdk.vercel.ai/api/generate', - json=payload, headers=headers, content_callback=callback) - response.raise_for_status() - - except Exception as e: - if _ == 2: - error = e - - else: - continue - - thread = threading.Thread(target=request_thread, daemon=True) - thread.start() - - text = '' - index = 0 - while True: - try: - chunk = chunks_queue.get(block=True, timeout=0.1) - - except queue.Empty: - if error: - raise error - - elif response: - break - - else: - continue - - text += chunk - lines = text.split('\n') - - if len(lines) - 1 > index: - new = lines[index:-1] - for word in new: - yield json.loads(word) - index = len(lines) - 1 - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - yield 'Vercel is currently not working.' - return - - conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n' - - for message in messages: - conversation += '%s: %s\n' % (message['role'], message['content']) - - conversation += 'assistant: ' - - completion = Client().generate(model, conversation) - - for token in completion: - yield token - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16_in1k.py deleted file mode 100644 index 19ee6ee4f82ec02f34628bdf8dd74a379798cc67..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16_in1k.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = ['./resnet50_8xb32_in1k.py'] - -# schedule settings -optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wewordle.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wewordle.py deleted file mode 100644 index c30887fb03b3ee53ed620d3e8259ae2a9245f934..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wewordle.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import random, string, time -from aiohttp import ClientSession - -from ..base_provider import AsyncProvider - - -class Wewordle(AsyncProvider): - url = "https://wewordle.org" - working = False - supports_gpt_35_turbo = True - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - - headers = { - "accept" : "*/*", - "pragma" : "no-cache", - "Content-Type" : "application/json", - "Connection" : "keep-alive" - } - - _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) - _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) - _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) - data = { - "user" : _user_id, - "messages" : messages, - "subscriber": { - "originalPurchaseDate" : None, - "originalApplicationVersion" : None, - "allPurchaseDatesMillis" : {}, - "entitlements" : {"active": {}, "all": {}}, - "allPurchaseDates" : {}, - "allExpirationDatesMillis" : {}, - "allExpirationDates" : {}, - "originalAppUserId" : f"$RCAnonymousID:{_app_id}", - "latestExpirationDate" : None, - "requestDate" : _request_date, - "latestExpirationDateMillis" : None, - "nonSubscriptionTransactions" : [], - "originalPurchaseDateMillis" : None, - "managementURL" : None, - "allPurchasedProductIdentifiers": [], - "firstSeen" : _request_date, - "activeSubscriptions" : [], - } - } - - - async with ClientSession( - headers=headers - ) as session: - async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response: - response.raise_for_status() - content = (await response.json())["message"]["content"] - if content: - return content \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/Factory.d.ts deleted file mode 100644 index 05a39fbaa32a7bbf5c1bbc0243372ea8ef40c9a1..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/Factory.d.ts +++ /dev/null @@ -1,5 +0,0 @@ -import BadgeLabel from './BadgeLabel'; - -export default function ( - config?: BadgeLabel.IConfig -): BadgeLabel; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/PreLayout.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/PreLayout.js deleted file mode 100644 index 1d6b0b3d0c12c9725edd0ef11573493ad0720d36..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/PreLayout.js +++ /dev/null @@ -1,9 +0,0 @@ -import PreLayoutBase from '../basesizer/PreLayout.js'; - -var PreLayout = function () { - this._maxChildWidth = undefined; - this._maxChildHeight = undefined; - PreLayoutBase.call(this); - return this; -} -export default PreLayout; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.d.ts deleted file mode 100644 index 90d3b003dfb3f918badd56f213d96b3357d36bf3..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -import { Pan } from '../../../plugins/gestures'; -export default Pan; \ No newline at end of file diff --git a/spaces/Amjadd/BookGPT/app.py b/spaces/Amjadd/BookGPT/app.py deleted file mode 100644 index 82c0ea94e622e59a129394cb034c430f915c823a..0000000000000000000000000000000000000000 --- a/spaces/Amjadd/BookGPT/app.py +++ /dev/null @@ -1,190 +0,0 @@ -import urllib.request -import fitz -import re -import numpy as np -import tensorflow_hub as hub -import openai -import gradio as gr -import os -from sklearn.neighbors import NearestNeighbors - - -def download_pdf(url, output_path): - urllib.request.urlretrieve(url, output_path) - - -def preprocess(text): - text = text.replace('\n', ' ') - text = re.sub('\s+', ' ', text) - return text - - -def pdf_to_text(path, start_page=1, end_page=None): - doc = fitz.open(path) - total_pages = doc.page_count - - if end_page is None: - end_page = total_pages - - text_list = [] - - for i in range(start_page-1, end_page): - text = doc.load_page(i).get_text("text") - text = preprocess(text) - text_list.append(text) - - doc.close() - return text_list - - -def text_to_chunks(texts, word_length=150, start_page=1): - text_toks = [t.split(' ') for t in texts] - page_nums = [] - chunks = [] - - for idx, words in enumerate(text_toks): - for i in range(0, len(words), word_length): - chunk = words[i:i+word_length] - if (i+word_length) > len(words) and (len(chunk) < word_length) and ( - len(text_toks) != (idx+1)): - text_toks[idx+1] = chunk + text_toks[idx+1] - continue - chunk = ' '.join(chunk).strip() - chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"' - chunks.append(chunk) - return chunks - - -class SemanticSearch: - - def __init__(self): - self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4') - self.fitted = False - - - def fit(self, data, batch=1000, n_neighbors=5): - self.data = data - self.embeddings = self.get_text_embedding(data, batch=batch) - n_neighbors = min(n_neighbors, len(self.embeddings)) - self.nn = NearestNeighbors(n_neighbors=n_neighbors) - self.nn.fit(self.embeddings) - self.fitted = True - - - def __call__(self, text, return_data=True): - inp_emb = self.use([text]) - neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0] - - if return_data: - return [self.data[i] for i in neighbors] - else: - return neighbors - - - def get_text_embedding(self, texts, batch=1000): - embeddings = [] - for i in range(0, len(texts), batch): - text_batch = texts[i:(i+batch)] - emb_batch = self.use(text_batch) - embeddings.append(emb_batch) - embeddings = np.vstack(embeddings) - return embeddings - - -recommender = SemanticSearch() - -def load_recommender(path, start_page=1): - global recommender - texts = pdf_to_text(path, start_page=start_page) - chunks = text_to_chunks(texts, start_page=start_page) - recommender.fit(chunks) - return 'Corpus Loaded.' - - -def generate_text(prompt, engine="text-davinci-003"): - completions = openai.Completion.create( - engine=engine, - prompt=prompt, - max_tokens=512, - n=1, - stop=None, - temperature=0.7, - ) - message = completions.choices[0].text - return message - - -def generate_answer(question): - topn_chunks = recommender(question) - prompt = "" - prompt += 'search results:\n\n' - for c in topn_chunks: - prompt += c + '\n\n' - - prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\ - "Cite each reference using [number] notation (every result has this number at the beginning). "\ - "Citation should be done at the end of each sentence. If the search results mention multiple subjects "\ - "with the same name, create separate answers for each. Only include information found in the results and "\ - "don't add any additional information. Make sure the answer is correct and don't output false content. "\ - "If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\ - "search results which has nothing to do with the question. Only answer what is asked. The "\ - "answer should be short and concise.\n\nQuery: {question}\nAnswer: " - - prompt += f"Query: {question}\nAnswer:" - answer = generate_text(prompt) - return answer - - -def question_answer(url, file, question, api_key): - openai.api_key = api_key - - if url.strip() == '' and file == None: - return '[ERROR]: Both URL and PDF is empty. Provide atleast one.' - - if url.strip() != '' and file != None: - return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).' - - if url.strip() != '': - glob_url = url - download_pdf(glob_url, 'corpus.pdf') - load_recommender('corpus.pdf') - - else: - old_file_name = file.name - file_name = file.name - file_name = file_name[:-12] + file_name[-4:] - os.rename(old_file_name, file_name) - load_recommender(file_name) - - if question.strip() == '': - return '[ERROR]: Question field is empty' - - return generate_answer(question) - - -title = 'BookGPT' -description = "BookGPT allows you to input an entire book and ask questions about its contents. This app uses GPT-3 to generate answers based on the book's information. BookGPT has ability to add reference to the specific page number from where the information was found. This adds credibility to the answers generated also helps you locate the relevant information in the book." - -with gr.Blocks() as demo: - - gr.Markdown(f'

    {title}

    ') - gr.Markdown(description) - gr.Markdown("Thank you for all the support this space has received! Unfortunately, my OpenAI $18 grant has been exhausted, so you'll need to enter your own OpenAI API Key to use the app. Sorry for inconvenience :-(.") - - with gr.Row(): - - with gr.Group(): - url = gr.Textbox(label='URL') - gr.Markdown("
    or
    ") - file = gr.File(label='PDF', file_types=['.pdf']) - question = gr.Textbox(label='question') - api_key = gr.Textbox(label='OpenAI API Key') - btn = gr.Button(value='Submit') - btn.style(full_width=True) - - with gr.Group(): - answer = gr.Textbox(label='answer') - - btn.click(question_answer, inputs=[url, file, question, api_key], outputs=[answer]) - -demo.launch() \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py deleted file mode 100644 index 4e2b89982a6aad0fb2f2b7c8735b0e645665359f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +++ /dev/null @@ -1,107 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionDualGuidedPipeline -from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -@nightly -@require_torch_gpu -class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_remove_unused_weights_save_load(self): - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") - # remove text_unet - pipe.remove_unused_weights() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - second_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - - generator = torch.manual_seed(0) - image = pipe( - prompt="first prompt", - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - with tempfile.TemporaryDirectory() as tmpdirname: - pipe.save_pretrained(tmpdirname) - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained(tmpdirname) - - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - new_image = pipe( - prompt="first prompt", - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" - - def test_inference_dual_guided(self): - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") - pipe.remove_unused_weights() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - first_prompt = "cyberpunk 2077" - second_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe( - prompt=first_prompt, - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.0787, 0.0849, 0.0826, 0.0812, 0.0807, 0.0795, 0.0818, 0.0798, 0.0779]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py b/spaces/Andy1621/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py deleted file mode 100644 index c4e86387e3ce4aad3dd68d7613160fced4d3785b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', - '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=15) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py deleted file mode 100644 index e7a94dbe9ce4a5550971635c6f8cd917de35f72e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' - -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/metrics.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/metrics.py deleted file mode 100644 index 16c7dd47cadd53cf1caaa194e28a343f2aacc599..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/metrics.py +++ /dev/null @@ -1,326 +0,0 @@ -from collections import OrderedDict - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch - - -def f_score(precision, recall, beta=1): - """calcuate the f-score value. - - Args: - precision (float | torch.Tensor): The precision value. - recall (float | torch.Tensor): The recall value. - beta (int): Determines the weight of recall in the combined score. - Default: False. - - Returns: - [torch.tensor]: The f-score value. - """ - score = (1 + beta**2) * (precision * recall) / ( - (beta**2 * precision) + recall) - return score - - -def intersect_and_union(pred_label, - label, - num_classes, - ignore_index, - label_map=dict(), - reduce_zero_label=False): - """Calculate intersection and Union. - - Args: - pred_label (ndarray | str): Prediction segmentation map - or predict result filename. - label (ndarray | str): Ground truth segmentation map - or label filename. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - label_map (dict): Mapping old labels to new labels. The parameter will - work only when label is str. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. The parameter will - work only when label is str. Default: False. - - Returns: - torch.Tensor: The intersection of prediction and ground truth - histogram on all classes. - torch.Tensor: The union of prediction and ground truth histogram on - all classes. - torch.Tensor: The prediction histogram on all classes. - torch.Tensor: The ground truth histogram on all classes. - """ - - if isinstance(pred_label, str): - pred_label = torch.from_numpy(np.load(pred_label)) - else: - pred_label = torch.from_numpy((pred_label)) - - if isinstance(label, str): - label = torch.from_numpy( - mmcv.imread(label, flag='unchanged', backend='pillow')) - else: - label = torch.from_numpy(label) - - if label_map is not None: - for old_id, new_id in label_map.items(): - label[label == old_id] = new_id - if reduce_zero_label: - label[label == 0] = 255 - label = label - 1 - label[label == 254] = 255 - - mask = (label != ignore_index) - pred_label = pred_label[mask] - label = label[mask] - - intersect = pred_label[pred_label == label] - area_intersect = torch.histc( - intersect.float(), bins=(num_classes), min=0, max=num_classes - 1) - area_pred_label = torch.histc( - pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1) - area_label = torch.histc( - label.float(), bins=(num_classes), min=0, max=num_classes - 1) - area_union = area_pred_label + area_label - area_intersect - return area_intersect, area_union, area_pred_label, area_label - - -def total_intersect_and_union(results, - gt_seg_maps, - num_classes, - ignore_index, - label_map=dict(), - reduce_zero_label=False): - """Calculate Total Intersection and Union. - - Args: - results (list[ndarray] | list[str]): List of prediction segmentation - maps or list of prediction result filenames. - gt_seg_maps (list[ndarray] | list[str]): list of ground truth - segmentation maps or list of label filenames. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - label_map (dict): Mapping old labels to new labels. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. Default: False. - - Returns: - ndarray: The intersection of prediction and ground truth histogram - on all classes. - ndarray: The union of prediction and ground truth histogram on all - classes. - ndarray: The prediction histogram on all classes. - ndarray: The ground truth histogram on all classes. - """ - num_imgs = len(results) - assert len(gt_seg_maps) == num_imgs - total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64) - total_area_union = torch.zeros((num_classes, ), dtype=torch.float64) - total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64) - total_area_label = torch.zeros((num_classes, ), dtype=torch.float64) - for i in range(num_imgs): - area_intersect, area_union, area_pred_label, area_label = \ - intersect_and_union( - results[i], gt_seg_maps[i], num_classes, ignore_index, - label_map, reduce_zero_label) - total_area_intersect += area_intersect - total_area_union += area_union - total_area_pred_label += area_pred_label - total_area_label += area_label - return total_area_intersect, total_area_union, total_area_pred_label, \ - total_area_label - - -def mean_iou(results, - gt_seg_maps, - num_classes, - ignore_index, - nan_to_num=None, - label_map=dict(), - reduce_zero_label=False): - """Calculate Mean Intersection and Union (mIoU) - - Args: - results (list[ndarray] | list[str]): List of prediction segmentation - maps or list of prediction result filenames. - gt_seg_maps (list[ndarray] | list[str]): list of ground truth - segmentation maps or list of label filenames. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - nan_to_num (int, optional): If specified, NaN values will be replaced - by the numbers defined by the user. Default: None. - label_map (dict): Mapping old labels to new labels. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. Default: False. - - Returns: - dict[str, float | ndarray]: - float: Overall accuracy on all images. - ndarray: Per category accuracy, shape (num_classes, ). - ndarray: Per category IoU, shape (num_classes, ). - """ - iou_result = eval_metrics( - results=results, - gt_seg_maps=gt_seg_maps, - num_classes=num_classes, - ignore_index=ignore_index, - metrics=['mIoU'], - nan_to_num=nan_to_num, - label_map=label_map, - reduce_zero_label=reduce_zero_label) - return iou_result - - -def mean_dice(results, - gt_seg_maps, - num_classes, - ignore_index, - nan_to_num=None, - label_map=dict(), - reduce_zero_label=False): - """Calculate Mean Dice (mDice) - - Args: - results (list[ndarray] | list[str]): List of prediction segmentation - maps or list of prediction result filenames. - gt_seg_maps (list[ndarray] | list[str]): list of ground truth - segmentation maps or list of label filenames. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - nan_to_num (int, optional): If specified, NaN values will be replaced - by the numbers defined by the user. Default: None. - label_map (dict): Mapping old labels to new labels. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. Default: False. - - Returns: - dict[str, float | ndarray]: Default metrics. - float: Overall accuracy on all images. - ndarray: Per category accuracy, shape (num_classes, ). - ndarray: Per category dice, shape (num_classes, ). - """ - - dice_result = eval_metrics( - results=results, - gt_seg_maps=gt_seg_maps, - num_classes=num_classes, - ignore_index=ignore_index, - metrics=['mDice'], - nan_to_num=nan_to_num, - label_map=label_map, - reduce_zero_label=reduce_zero_label) - return dice_result - - -def mean_fscore(results, - gt_seg_maps, - num_classes, - ignore_index, - nan_to_num=None, - label_map=dict(), - reduce_zero_label=False, - beta=1): - """Calculate Mean Intersection and Union (mIoU) - - Args: - results (list[ndarray] | list[str]): List of prediction segmentation - maps or list of prediction result filenames. - gt_seg_maps (list[ndarray] | list[str]): list of ground truth - segmentation maps or list of label filenames. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - nan_to_num (int, optional): If specified, NaN values will be replaced - by the numbers defined by the user. Default: None. - label_map (dict): Mapping old labels to new labels. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. Default: False. - beta (int): Determines the weight of recall in the combined score. - Default: False. - - - Returns: - dict[str, float | ndarray]: Default metrics. - float: Overall accuracy on all images. - ndarray: Per category recall, shape (num_classes, ). - ndarray: Per category precision, shape (num_classes, ). - ndarray: Per category f-score, shape (num_classes, ). - """ - fscore_result = eval_metrics( - results=results, - gt_seg_maps=gt_seg_maps, - num_classes=num_classes, - ignore_index=ignore_index, - metrics=['mFscore'], - nan_to_num=nan_to_num, - label_map=label_map, - reduce_zero_label=reduce_zero_label, - beta=beta) - return fscore_result - - -def eval_metrics(results, - gt_seg_maps, - num_classes, - ignore_index, - metrics=['mIoU'], - nan_to_num=None, - label_map=dict(), - reduce_zero_label=False, - beta=1): - """Calculate evaluation metrics - Args: - results (list[ndarray] | list[str]): List of prediction segmentation - maps or list of prediction result filenames. - gt_seg_maps (list[ndarray] | list[str]): list of ground truth - segmentation maps or list of label filenames. - num_classes (int): Number of categories. - ignore_index (int): Index that will be ignored in evaluation. - metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. - nan_to_num (int, optional): If specified, NaN values will be replaced - by the numbers defined by the user. Default: None. - label_map (dict): Mapping old labels to new labels. Default: dict(). - reduce_zero_label (bool): Wether ignore zero label. Default: False. - Returns: - float: Overall accuracy on all images. - ndarray: Per category accuracy, shape (num_classes, ). - ndarray: Per category evaluation metrics, shape (num_classes, ). - """ - if isinstance(metrics, str): - metrics = [metrics] - allowed_metrics = ['mIoU', 'mDice', 'mFscore'] - if not set(metrics).issubset(set(allowed_metrics)): - raise KeyError('metrics {} is not supported'.format(metrics)) - - total_area_intersect, total_area_union, total_area_pred_label, \ - total_area_label = total_intersect_and_union( - results, gt_seg_maps, num_classes, ignore_index, label_map, - reduce_zero_label) - all_acc = total_area_intersect.sum() / total_area_label.sum() - ret_metrics = OrderedDict({'aAcc': all_acc}) - for metric in metrics: - if metric == 'mIoU': - iou = total_area_intersect / total_area_union - acc = total_area_intersect / total_area_label - ret_metrics['IoU'] = iou - ret_metrics['Acc'] = acc - elif metric == 'mDice': - dice = 2 * total_area_intersect / ( - total_area_pred_label + total_area_label) - acc = total_area_intersect / total_area_label - ret_metrics['Dice'] = dice - ret_metrics['Acc'] = acc - elif metric == 'mFscore': - precision = total_area_intersect / total_area_pred_label - recall = total_area_intersect / total_area_label - f_value = torch.tensor( - [f_score(x[0], x[1], beta) for x in zip(precision, recall)]) - ret_metrics['Fscore'] = f_value - ret_metrics['Precision'] = precision - ret_metrics['Recall'] = recall - - ret_metrics = { - metric: value.numpy() - for metric, value in ret_metrics.items() - } - if nan_to_num is not None: - ret_metrics = OrderedDict({ - metric: np.nan_to_num(metric_value, nan=nan_to_num) - for metric, metric_value in ret_metrics.items() - }) - return ret_metrics diff --git a/spaces/Arnx/MusicGenXvAKN/audiocraft/models/lm.py b/spaces/Arnx/MusicGenXvAKN/audiocraft/models/lm.py deleted file mode 100644 index c8aad8f06797eef3293605056e1de14d07c56c2a..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/audiocraft/models/lm.py +++ /dev/null @@ -1,527 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -from functools import partial -import logging -import math -import typing as tp - -import torch -from torch import nn - -from ..utils import utils -from ..modules.streaming import StreamingModule, State -from ..modules.transformer import StreamingTransformer, create_norm_fn -from ..modules.conditioners import ( - ConditionFuser, - ClassifierFreeGuidanceDropout, - AttributeDropout, - ConditioningProvider, - ConditioningAttributes, - ConditionType, -) -from ..modules.codebooks_patterns import CodebooksPatternProvider -from ..modules.activations import get_activation_fn - - -logger = logging.getLogger(__name__) -ConditionTensors = tp.Dict[str, ConditionType] -CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]] - - -def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): - """LM layer initialization. - Inspired from xlformers: https://github.com/fairinternal/xlformers - - Args: - method (str): Method name for init function. Valid options are: - 'gaussian', 'uniform'. - input_dim (int): Input dimension of the initialized module. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - """ - # Compute std - std = 1 / math.sqrt(input_dim) - # Rescale with depth - if init_depth is not None: - std = std / math.sqrt(2 * init_depth) - - if method == 'gaussian': - return partial( - torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std - ) - elif method == 'uniform': - bound = math.sqrt(3) * std # ensure the standard deviation is `std` - return partial(torch.nn.init.uniform_, a=-bound, b=bound) - else: - raise ValueError("Unsupported layer initialization method") - - -def init_layer(m: nn.Module, - method: str, - init_depth: tp.Optional[int] = None, - zero_bias_init: bool = False): - """Wrapper around ``get_init_fn`` for proper initialization of LM modules. - - Args: - m (nn.Module): Module to initialize. - method (str): Method name for the init function. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - zero_bias_init (bool): Whether to initialize the bias to 0 or not. - """ - if isinstance(m, nn.Linear): - init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - if zero_bias_init and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Embedding): - init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - - -class ScaledEmbedding(nn.Embedding): - """Boost learning rate for embeddings (with `scale`). - """ - def __init__(self, *args, lr=None, **kwargs): - super().__init__(*args, **kwargs) - self.lr = lr - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - return group - - -@dataclass -class LMOutput: - # The logits are already re-aligned with the input codes - # hence no extra shift is required, e.g. when computing CE - logits: torch.Tensor # [B, K, T, card] - mask: torch.Tensor # [B, K, T] - - -class LMModel(StreamingModule): - """Transformer-based language model on multiple streams of codes. - - Args: - pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving. - condition_provider (MusicConditioningProvider): Conditioning provider from metadata. - fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input. - n_q (int): Number of parallel streams to model. - card (int): Cardinality, vocabulary size. - dim (int): Dimension of the transformer encoder. - num_heads (int): Number of heads for the transformer encoder. - hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder. - norm (str): Normalization method. - norm_first (bool): Use pre-norm instead of post-norm. - emb_lr (Optional[float]): Embedding-specific learning rate. - bias_proj (bool): Use bias for output projections. - weight_init (Optional[str]): Method for weight initialization. - depthwise_init (Optional[str]): Method for depthwise weight initialization. - zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros. - cfg_dropout (float): Classifier-free guidance dropout. - cfg_coef (float): Classifier-free guidance coefficient. - attribute_dropout (dict): Attribute dropout probabilities. - two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps. - **kwargs: Additional parameters for the transformer encoder. - """ - def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider, - fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8, - hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False, - emb_lr: tp.Optional[float] = None, bias_proj: bool = True, - weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None, - zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0, - attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False, - **kwargs): - super().__init__() - self.cfg_coef = cfg_coef - self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout) - self.att_dropout = AttributeDropout(p=attribute_dropout) - self.condition_provider = condition_provider - self.fuser = fuser - self.card = card - embed_dim = self.card + 1 - self.n_q = n_q - self.dim = dim - self.pattern_provider = pattern_provider - self.two_step_cfg = two_step_cfg - self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)]) - if 'activation' in kwargs: - kwargs['activation'] = get_activation_fn(kwargs['activation']) - self.transformer = StreamingTransformer( - d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim), - norm=norm, norm_first=norm_first, **kwargs) - self.out_norm: tp.Optional[nn.Module] = None - if norm_first: - self.out_norm = create_norm_fn(norm, dim) - self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)]) - self._init_weights(weight_init, depthwise_init, zero_bias_init) - self._fsdp: tp.Optional[nn.Module] - self.__dict__['_fsdp'] = None - - def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool): - """Initialization of the transformer module weights. - - Args: - weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options. - depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid: - 'current' where the depth corresponds to the current layer index or 'global' where the total number - of layer is used as depth. If not set, no depthwise initialization strategy is used. - zero_bias_init (bool): Whether to initalize bias to zero or not. - """ - assert depthwise_init is None or depthwise_init in ['current', 'global'] - assert depthwise_init is None or weight_init is not None, \ - "If 'depthwise_init' is defined, a 'weight_init' method should be provided." - assert not zero_bias_init or weight_init is not None, \ - "If 'zero_bias_init', a 'weight_init' method should be provided" - - if weight_init is None: - return - - for emb_layer in self.emb: - init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - for layer_idx, tr_layer in enumerate(self.transformer.layers): - depth = None - if depthwise_init == 'current': - depth = layer_idx + 1 - elif depthwise_init == 'global': - depth = len(self.transformer.layers) - init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init) - tr_layer.apply(init_fn) - - for linear in self.linears: - init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - @property - def special_token_id(self) -> int: - return self.card - - @property - def num_codebooks(self) -> int: - return self.n_q - - def forward(self, sequence: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor: - """Apply language model on sequence and conditions. - Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and - S the sequence steps, return the logits with shape [B, card, K, S]. - - Args: - indices (torch.Tensor): indices of the codes to model. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - torch.Tensor: Logits. - """ - B, K, S = sequence.shape - assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks' - input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)]) - if condition_tensors is None: - assert not self._is_streaming, "Conditions tensors should be precomputed when streaming." - # apply dropout modules - conditions = self.cfg_dropout(conditions) - conditions = self.att_dropout(conditions) - tokenized = self.condition_provider.tokenize(conditions) - # encode conditions and fuse, both have a streaming cache to not recompute when generating. - condition_tensors = self.condition_provider(tokenized) - else: - assert not conditions, "Shouldn't pass both conditions and condition_tensors." - - input_, cross_attention_input = self.fuser(input_, condition_tensors) - - out = self.transformer(input_, cross_attention_src=cross_attention_input) - if self.out_norm: - out = self.out_norm(out) - logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card] - - # remove the prefix from the model outputs - if len(self.fuser.fuse2cond['prepend']) > 0: - logits = logits[:, :, -S:] - - return logits # [B, K, S, card] - - def compute_predictions( - self, codes: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput: - """Given an input tensor of codes [B, K, T] and list of conditions, runs the model - forward using the specified codes interleaving pattern. - - Args: - codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size, - K the number of codebooks and T the number of timesteps. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - LMOutput: Language model outputs - logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes, - i.e. the first item corresponds to logits to predict the first code, meaning that - no additional shifting of codes and logits is required. - mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions. - Given the specified interleaving strategies, parts of the logits and codes should - not be considered as valid predictions because of invalid context. - """ - B, K, T = codes.shape - codes = codes.contiguous() - # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens - pattern = self.pattern_provider.get_pattern(T) - sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence( - codes, self.special_token_id, keep_only_valid_steps=True - ) - # apply model on pattern sequence - model = self if self._fsdp is None else self._fsdp - logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card] - # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card] - # and provide the corresponding mask over invalid positions of tokens - logits = logits.permute(0, 3, 1, 2) # [B, card, K, S] - # note: we use nans as special token to make it obvious if we feed unexpected logits - logits, logits_indexes, logits_mask = pattern.revert_pattern_logits( - logits, float('nan'), keep_only_valid_steps=True - ) - logits = logits.permute(0, 2, 3, 1) # [B, K, T, card] - logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T] - return LMOutput(logits, logits_mask) - - def _sample_next_token(self, - sequence: torch.Tensor, - cfg_conditions: CFGConditions, - unconditional_state: State, - use_sampling: bool = False, - temp: float = 1.0, - top_k: int = 0, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None) -> torch.Tensor: - """Sample next token from the model given a sequence and a set of conditions. The model supports - multiple sampling strategies (greedy sampling, softmax, top-k, top-p...). - - Args: - sequence (torch.Tensor): Current sequence of shape [B, K, S] - with K corresponding to the number of codebooks and S the number of sequence steps. - S = 1 in streaming mode, except for the first step that contains a bigger prompt. - condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used, - should be twice the batch size, being the concatenation of the conditions + null conditions. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - cfg_coef (float): classifier free guidance coefficient - Returns: - next_token (torch.Tensor): Next token tensor of shape [B, K, 1]. - """ - B = sequence.shape[0] - cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef - model = self if self._fsdp is None else self._fsdp - if self.two_step_cfg and cfg_conditions != {}: - assert isinstance(cfg_conditions, tuple) - condition_tensors, null_condition_tensors = cfg_conditions - cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors) - state = self.get_streaming_state() - self.set_streaming_state(unconditional_state) - uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors) - unconditional_state.update(self.get_streaming_state()) - self.set_streaming_state(state) - logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef - else: - assert isinstance(cfg_conditions, dict) - condition_tensors = cfg_conditions - if condition_tensors: - # Preparing for CFG, predicting both conditional and unconditional logits. - sequence = torch.cat([sequence, sequence], dim=0) - all_logits = model( - sequence, - conditions=[], condition_tensors=condition_tensors) - if condition_tensors: - cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card] - logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef - else: - logits = all_logits - - logits = logits.permute(0, 1, 3, 2) # [B, K, card, T] - logits = logits[..., -1] # [B x K x card] - - # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error. - if use_sampling and temp > 0.0: - probs = torch.softmax(logits / temp, dim=-1) - if top_p > 0.0: - next_token = utils.sample_top_p(probs, p=top_p) - elif top_k > 0: - next_token = utils.sample_top_k(probs, k=top_k) - else: - next_token = utils.multinomial(probs, num_samples=1) - else: - next_token = torch.argmax(logits, dim=-1, keepdim=True) - - return next_token - - @torch.no_grad() - def generate(self, - prompt: tp.Optional[torch.Tensor] = None, - conditions: tp.List[ConditioningAttributes] = [], - num_samples: tp.Optional[int] = None, - max_gen_len: int = 256, - use_sampling: bool = True, - temp: float = 1.0, - top_k: int = 250, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None, - two_step_cfg: bool = False, - remove_prompts: bool = False, - check: bool = False, - callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor: - """Generate tokens sampling from the model given a prompt or unconditionally. Generation can - be perform in a greedy fashion or using sampling with top K and top P strategies. - - Args: - prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T]. - conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None. - num_samples (int or None): Number of samples to generate when no prompt and no conditions are given. - max_gen_len (int): Maximum generation length. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - remove_prompts (bool): Whether to remove prompts from generation or not. - Returns: - torch.Tensor: Generated tokens. - """ - assert not self.training, "generation shouldn't be used in training mode." - first_param = next(iter(self.parameters())) - device = first_param.device - - # Checking all input shapes are consistents. - possible_num_samples = [] - if num_samples is not None: - possible_num_samples.append(num_samples) - elif prompt is not None: - possible_num_samples.append(prompt.shape[0]) - elif conditions: - possible_num_samples.append(len(conditions)) - else: - possible_num_samples.append(1) - assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes" - num_samples = possible_num_samples[0] - - # below we create set of conditions: one conditional and one unconditional - # to do that we merge the regular condition together with the null condition - # we then do 1 forward pass instead of 2. - # the reason for that is two-fold: - # 1. it is about x2 faster than doing 2 forward passes - # 2. avoid the streaming API treating the 2 passes as part of different time steps - # We also support doing two different passes, in particular to ensure that - # the padding structure is exactly the same between train anf test. - # With a batch size of 1, this can be slower though. - cfg_conditions: CFGConditions - two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg - if conditions: - null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions) - if two_step_cfg: - cfg_conditions = ( - self.condition_provider(self.condition_provider.tokenize(conditions)), - self.condition_provider(self.condition_provider.tokenize(null_conditions)), - ) - else: - conditions = conditions + null_conditions - tokenized = self.condition_provider.tokenize(conditions) - cfg_conditions = self.condition_provider(tokenized) - else: - cfg_conditions = {} - - if prompt is None: - assert num_samples > 0 - prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device) - - B, K, T = prompt.shape - start_offset = T - assert start_offset < max_gen_len - - pattern = self.pattern_provider.get_pattern(max_gen_len) - # this token is used as default value for codes that are not generated yet - unknown_token = -1 - - # we generate codes up to the max_gen_len that will be mapped to the pattern sequence - gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device) - # filling the gen_codes with the prompt if needed - gen_codes[..., :start_offset] = prompt - # create the gen_sequence with proper interleaving from the pattern: [B, K, S] - gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id) - # retrieve the start_offset in the sequence: - # it is the first sequence step that contains the `start_offset` timestep - start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset) - assert start_offset_sequence is not None - - with self.streaming(): - unconditional_state = self.get_streaming_state() - prev_offset = 0 - gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S] - for offset in range(start_offset_sequence, gen_sequence_len): - # get current sequence (note that the streaming API is providing the caching over previous offsets) - curr_sequence = gen_sequence[..., prev_offset:offset] - curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1) - if check: - # check coherence between mask and sequence - assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all() - # should never happen as gen_sequence is filled progressively - assert not (curr_sequence == unknown_token).any() - # sample next token from the model, next token shape is [B, K, 1] - next_token = self._sample_next_token( - curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p, - cfg_coef=cfg_coef) - # ensure the tokens that should be masked are properly set to special_token_id - # as the model never output special_token_id - valid_mask = mask[..., offset:offset+1].expand(B, -1, -1) - next_token[~valid_mask] = self.special_token_id - # ensure we don't overwrite prompt tokens, we only write over unknown tokens - # (then mask tokens should be left as is as well, which is correct) - gen_sequence[..., offset:offset+1] = torch.where( - gen_sequence[..., offset:offset+1] == unknown_token, - next_token, gen_sequence[..., offset:offset+1] - ) - prev_offset = offset - if callback is not None: - callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence) - unconditional_state.clear() - - # ensure sequence has been entirely filled - assert not (gen_sequence == unknown_token).any() - # ensure gen_sequence pattern and mask are matching - # which means the gen_sequence is valid according to the pattern - assert ( - gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id) - ).all() - # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps - out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token) - - # sanity checks over the returned codes and corresponding masks - assert (out_codes[..., :max_gen_len] != unknown_token).all() - assert (out_mask[..., :max_gen_len] == 1).all() - - out_start_offset = start_offset if remove_prompts else 0 - out_codes = out_codes[..., out_start_offset:max_gen_len] - - # ensure the returned codes are all valid - assert (out_codes >= 0).all() and (out_codes <= self.card).all() - return out_codes diff --git a/spaces/AtomdffAI/wechatgpt4atom/config.py b/spaces/AtomdffAI/wechatgpt4atom/config.py deleted file mode 100644 index 3d19a63b362bc1abedb140f1310a7406bf42c84f..0000000000000000000000000000000000000000 --- a/spaces/AtomdffAI/wechatgpt4atom/config.py +++ /dev/null @@ -1,34 +0,0 @@ -# encoding:utf-8 - -import json -import os -from common.log import logger - -config = {} - - -def load_config(): - global config - config_path = "config.json" - if not os.path.exists(config_path): - raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件') - - config_str = read_file(config_path) - # 将json字符串反序列化为dict类型 - config = json.loads(config_str) - config['open_ai_api_key'] = os.getenv('API_KEY') - logger.info("[INIT] load config: {}".format(config)) - - - -def get_root(): - return os.path.dirname(os.path.abspath( __file__ )) - - -def read_file(path): - with open(path, mode='r', encoding='utf-8') as f: - return f.read() - - -def conf(): - return config diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_registry.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_registry.py deleted file mode 100644 index 4e425a6ec44c7c47a5a106bfdf5ce8062c2110c9..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_registry.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import unittest -import torch - -from detectron2.modeling.meta_arch import GeneralizedRCNN -from detectron2.utils.registry import _convert_target_to_string, locate - - -class A: - class B: - pass - - -class TestLocate(unittest.TestCase): - def _test_obj(self, obj): - name = _convert_target_to_string(obj) - newobj = locate(name) - self.assertIs(obj, newobj) - - def test_basic(self): - self._test_obj(GeneralizedRCNN) - - def test_inside_class(self): - # requires using __qualname__ instead of __name__ - self._test_obj(A.B) - - def test_builtin(self): - self._test_obj(len) - self._test_obj(dict) - - def test_pytorch_optim(self): - # pydoc.locate does not work for it - self._test_obj(torch.optim.SGD) - - def test_failure(self): - with self.assertRaises(ImportError): - locate("asdf") - - def test_compress_target(self): - from detectron2.data.transforms import RandomCrop - - name = _convert_target_to_string(RandomCrop) - # name shouldn't contain 'augmentation_impl' - self.assertEqual(name, "detectron2.data.transforms.RandomCrop") - self.assertIs(RandomCrop, locate(name)) diff --git a/spaces/Benson/text-generation/Examples/Barbie Dreamhouse Adventures Hack Apk.md b/spaces/Benson/text-generation/Examples/Barbie Dreamhouse Adventures Hack Apk.md deleted file mode 100644 index f4a808bb20b3546fbc0825ac6ee10befb8d7408e..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Barbie Dreamhouse Adventures Hack Apk.md +++ /dev/null @@ -1,38 +0,0 @@ -
    -

    Barbie Dreamhouse Adventures Hack APK: Cómo obtener monedas ilimitadas y acceso VIP

    -

    ¿Te encanta jugar Barbie Dreamhouse Adventures, el juego de simulación donde puedes crear tu propia experiencia Barbie? ¿Te gustaría tener más monedas para comprar nuevos artículos y trajes, o acceder a funciones VIP como habitaciones exclusivas, mascotas y peinados? Si es así, usted podría estar interesado en el uso de un hack apk para el juego.

    -

    Un apk hack es una versión modificada de la aplicación de juego original que le permite engañar y obtener recursos ilimitados, desbloquear características premium, y las restricciones de bypass. Con un hack apk, se puede disfrutar de Barbie Dreamhouse Aventuras sin gastar dinero o esperar anuncios.

    -

    barbie dreamhouse adventures hack apk


    Download File ⚹⚹⚹ https://bltlly.com/2v6Js4



    -

    En este artículo, le mostraremos cómo descargar e instalar Barbie Dreamhouse Adventures hack apk, cómo usarlo para obtener monedas ilimitadas y acceso VIP, y cómo jugar el juego con consejos y trucos. ¡Sigue leyendo para saber más!

    -

    Cómo descargar e instalar Barbie Dreamhouse aventuras Hack APK

    -

    Antes de que pueda utilizar Barbie Dreamhouse aventuras hack apk, es necesario descargarlo de una fuente confiable. Hay muchos sitios web que afirman ofrecer apks hack, pero algunos de ellos pueden ser falsos, anticuados, o infectados con malware. Para evitar cualquier riesgo, se recomienda utilizar [este sitio web]( 1 ), que tiene un enlace verificado para la última versión de Barbie Dreamhouse Adventures hack apk.

    -

    Una vez que haya descargado el archivo apk hack, es necesario habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de la tienda de aplicaciones oficial. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.

    -

    Ahora puede instalar el apk hack tocando en el archivo y siguiendo las instrucciones. Es posible que necesite conceder algunos permisos para que la aplicación funcione correctamente. Una vez completada la instalación, puede abrir la aplicación y comenzar a jugar.

    -

    Cómo utilizar Barbie Dreamhouse aventuras Hack APK

    - -

    Con monedas ilimitadas, puedes comprar todo lo que quieras en el juego, como muebles nuevos, ropa, accesorios, etc. También puedes desbloquear funciones VIP que normalmente solo están disponibles para suscriptores de pago. Estos incluyen habitaciones exclusivas como un spa, un estudio de baile, una sala de mascotas, etc., así como mascotas especiales, peinados, trajes, etc.

    -

    Con el hack apk, se puede disfrutar de Barbie Dreamhouse Adventures sin limitaciones o interrupciones. Puede diseñar su casa de ensueño, vestir a Barbie y sus amigos, unirse a actividades divertidas y mini juegos, y explorar Malibu con su convertible rosa.

    -

    -

    Consejos y trucos para Barbie Dreamhouse Adventures Game

    -

    Incluso con el hack apk, todavía puede querer algunos consejos y trucos para hacer su juego más divertido y emocionante. Estos son algunos de ellos:

    -
      -
    • Cómo diseñar tu casa de ensueño y decorar habitaciones: Puedes elegir entre diferentes temas y estilos para tus habitaciones, como moderno, clásico, glam, etc. También puedes mezclar y combinar diferentes elementos para crear tu propio look único. Puedes arrastrar y soltar elementos para colocarlos en cualquier lugar que desees, o usar la función de decoración automática para que el juego lo haga por ti. También puede cambiar el fondo de pantalla, el suelo y el techo de sus habitaciones.
    • -
    • Cómo vestir a Barbie y sus amigos en trajes de moda: Usted puede elegir entre una variedad de ropa, zapatos, accesorios y peinados para Barbie y sus amigos. También puede crear sus propios trajes mediante la combinación de diferentes elementos y colores. Puede guardar sus trajes favoritos en su armario y cambiar entre ellos en cualquier momento. También puedes compartir tus atuendos con otros jugadores y obtener comentarios.
    • -
    • Cómo participar en actividades divertidas y mini juegos: Puedes unirte a Barbie y sus amigos en varias actividades y mini juegos, como hornear, bailar, nadar, jardinería, etc. Puedes ganar monedas y recompensas completando tareas y desafíos. También puedes descubrir sorpresas y secretos ocultos en el juego.
    • -
    -

    Conclusión

    - -

    Si usted es un fan de Barbie y juegos de simulación, usted debe probar definitivamente Barbie Dreamhouse Adventures hack apk. Te dará un nuevo nivel de diversión y emoción. ¡Descárgalo ahora y comienza tu aventura!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes y respuestas sobre Barbie Dreamhouse aventuras hack apk y el juego:

    -

    Es Barbie Dreamhouse aventuras hack apk seguro de usar?

    -

    Sí, es seguro de usar siempre y cuando lo descargue de una fuente confiable como [este sitio web]. El apk hack es probado y verificado por muchos usuarios y no contiene ningún virus o malware. Sin embargo, siempre debes tener cuidado al instalar aplicaciones de fuentes desconocidas y conceder permisos solo cuando sea necesario.

    -

    ¿Voy a conseguir prohibido para el uso de Barbie Dreamhouse aventuras hack apk?

    -

    No, no se le prohibió el uso de Barbie Dreamhouse Adventures hack apk. El hack apk está diseñado para ser indetectable por los servidores del juego y no interfiere con la experiencia de otros jugadores. Sin embargo, debes usarlo responsablemente y no abusar de él o presumir de ello a otros jugadores.

    -

    ¿Puedo actualizar Barbie Dreamhouse aventuras hack apk?

    -

    Sí, se puede actualizar Barbie Dreamhouse aventuras hack apk cada vez que hay una nueva versión disponible. Puede comprobar si hay actualizaciones en [este sitio web] o activar la función de actualización automática en el menú de hackeo. Sin embargo, siempre debe hacer copias de seguridad de sus datos antes de actualizar para evitar cualquier pérdida o corrupción.

    -

    ¿Puedo jugar Barbie Dreamhouse aventuras hack apk offline?

    -

    Sí, puedes jugar Barbie Dreamhouse Adventures hack apk offline sin conexión a Internet. Sin embargo, algunas funciones pueden no funcionar correctamente o requerir una verificación en línea. Por ejemplo, es posible que no puedas acceder a funciones VIP o compartir tus atuendos con otros jugadores sin conexión.

    -

    ¿Puedo jugar Barbie Dreamhouse aventuras hack apk en otros dispositivos?

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Buscando Capcut Editor De Vdeo Aplicacin.md b/spaces/Benson/text-generation/Examples/Buscando Capcut Editor De Vdeo Aplicacin.md deleted file mode 100644 index bd1e257394311e5284b52c00d76d2ee98aef8b2b..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Buscando Capcut Editor De Vdeo Aplicacin.md +++ /dev/null @@ -1,72 +0,0 @@ -
    -

    Buscando CapCut Editor de Video App Descargar?

    -

    Si usted está buscando un editor de vídeo potente y fácil de usar y la aplicación fabricante de vídeo, es posible que desee comprobar CapCut. CapCut es una aplicación gratuita que ofrece todo lo que necesita para crear impresionantes videos de alta calidad para TikTok, YouTube, Instagram, WhatsApp, Facebook y más. En este artículo, le diremos qué es CapCut, por qué debe usarlo, cómo descargarlo e instalarlo en su dispositivo y cómo usarlo para editar y hacer videos.

    -

    Qué es CapCut y por qué deberías usarlo

    -

    CapCut es una aplicación gratuita todo-en-uno editor de vídeo y fabricante de vídeo que es desarrollado por Bytedance Pte. Ltd., la empresa detrás de TikTok. CapCut era anteriormente conocido como Viamaker, pero ha sido renombrado y mejorado con más características y funciones. Aquí hay algunas razones por las que deberías usar CapCut:

    -

    buscando capcut editor de vídeo aplicación


    Download >>>>> https://bltlly.com/2v6JpY



    -

    CapCut es una aplicación gratuita todo-en-uno editor de vídeo y fabricante de vídeo

    -

    A diferencia de algunas otras aplicaciones de edición de video que requieren que usted pague por características o suscripciones premium, CapCut es completamente gratuito. Puede acceder a todas las funciones básicas y avanzadas sin limitaciones ni marcas de agua. También puede disfrutar de fuentes y efectos gratuitos dentro de la aplicación que se actualizan semanalmente con las últimas tendencias.

    -

    CapCut ofrece funciones de edición de vídeo avanzadas y fáciles de usar

    -

    CapCut tiene una interfaz fácil de usar que le permite editar y hacer videos en cuestión de segundos. Puede recortar, dividir, combinar, ajustar la velocidad, acercar/alejar, invertir/rebobinar, congelar, transicionar, animar, estabilizar y mucho más con sus vídeos. También puede usar funciones avanzadas como animación de fotograma clave, cámara lenta suave, clave de croma, Picture-in-Picture (PIP), subtítulos automáticos, texto a voz, seguimiento de movimiento y eliminación de fondo.

    -

    CapCut te ayuda a crear vídeos impresionantes para plataformas de redes sociales

    - -

    Cómo descargar e instalar CapCut en tu dispositivo

    -

    CapCut está disponible para dispositivos Android desde Google Play Store, dispositivos iOS desde App Store y otros dispositivos desde Uptodown. Estos son los pasos para descargar e instalar CapCut en su dispositivo:

    -

    Descargar CapCut de Google Play Store para dispositivos Android

    -
      -
    1. Abre Google Play Store en tu dispositivo Android.
    2. -
    3. Buscar "CapCut" o escanear el código QR a continuación.
    4. -
    5. Toque en "Instalar" para descargar e instalar la aplicación.
    6. -
    7. Abra la aplicación y conceda los permisos necesarios.
    8. -
    9. Disfruta editando y haciendo videos con CapCut.
    10. -
    - -
  4. Toque en "Obtener" para descargar e instalar la aplicación.
  5. -
  6. Abra la aplicación y conceda los permisos necesarios.
  7. -
  8. Disfruta editando y haciendo videos con CapCut.
  9. -
-CapCut App Store Código QR -
  • Toque en "Descargar APK" para descargar el archivo de la aplicación.
  • -
  • Localice y abra el archivo descargado y siga las instrucciones para instalar la aplicación.
  • -
  • Abra la aplicación y conceda los permisos necesarios.
  • -
  • Disfruta editando y haciendo videos con CapCut.
  • - -Código QR de CapCut Uptodown -

    Cómo usar CapCut para editar y hacer videos

    - -

    Importar o grabar vídeos con CapCut

    -

    Para iniciar un nuevo proyecto, pulse el botón "+" en la pantalla principal de CapCut. Puede optar por importar vídeos de la galería de su dispositivo o grabar nuevos vídeos con la cámara incorporada. También puede utilizar plantillas de otros usuarios o de la biblioteca de la aplicación. Puede seleccionar varios vídeos a la vez y organizarlos en el orden que desee. También puedes previsualizar tus vídeos antes de importarlos.

    -

    Recortar, dividir, combinar y ajustar la velocidad de los vídeos con CapCut

    -

    Para editar sus vídeos, toque en el clip de vídeo que desea modificar en la línea de tiempo. Puede utilizar la herramienta de recorte para cortar partes no deseadas de su vídeo. También puede utilizar la herramienta de división para dividir el vídeo en dos o más segmentos. También puede utilizar la herramienta de combinación para combinar dos o más clips de vídeo en uno. También puede utilizar la herramienta de velocidad para cambiar la velocidad de reproducción de su vídeo. Puede aplicar curvas de velocidad para crear transiciones suaves entre diferentes velocidades.

    -

    Añadir texto, pegatinas, filtros, efectos y música a los vídeos con CapCut

    -

    Para mejorar tus videos, toca el botón "+" en la línea de tiempo. Puede agregar texto a sus videos con diferentes fuentes, estilos, colores y animaciones. También puedes añadir pegatinas a tus vídeos desde la biblioteca de la aplicación o desde la galería de tu dispositivo. También puede agregar filtros a sus videos para cambiar su estado de ánimo y tono. También puede agregar efectos a sus videos para crear imágenes increíbles. También puedes agregar música a tus videos desde la biblioteca de la aplicación o desde la biblioteca de música de tu dispositivo. También puede ajustar el volumen, fundido de entrada/salida y sincronización de su música.

    -

    Utilice animación de fotograma clave, cámara lenta, clave de croma y estabilización con CapCut

    - -

    Exportar y compartir vídeos con CapCut

    -

    Para exportar y compartir tus videos, toca el botón "Exportar" en la esquina superior derecha de CapCut. Puede elegir la resolución, el formato y la calidad de su vídeo. También puede habilitar el HDR inteligente para una mejor representación del color. También puede obtener una vista previa de su vídeo antes de exportarlo. Una vez que tu vídeo se exporta, puedes compartirlo en TikTok u otras plataformas de redes sociales con un solo toque.

    -

    Conclusión y preguntas frecuentes

    -

    CapCut es una aplicación gratuita todo-en-uno editor de video y fabricante de video que ofrece todo lo que necesita para crear impresionantes videos de alta calidad para TikTok, YouTube, Instagram, WhatsApp, Facebook y más. Tiene una interfaz fácil de usar que le permite editar y hacer videos en cuestión de segundos. Tiene una rica biblioteca de clips de música y efectos de sonido que puede agregar a sus videos. Tiene una amplia gama de filtros, efectos, colores y pegatinas que puedes agregar a tus videos. Tiene funciones de edición de video avanzadas y fáciles de usar, como animación de fotograma clave, cámara lenta, clave de croma y estabilización. También le permite personalizar la resolución, el formato y la calidad de la exportación de vídeo. También puedes compartir fácilmente tus videos en TikTok y otras plataformas de redes sociales con un solo clic.

    -

    -

    Si está buscando una aplicación de editor de video CapCut, puede seguir los pasos de este artículo para descargar e instalar CapCut en su dispositivo. También puede seguir los consejos de este artículo para usar CapCut para editar y hacer videos. Esperamos que este artículo le resulte útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación.

    -

    Aquí hay algunas preguntas frecuentes que puede tener sobre CapCut:

    -
      -
    1. ¿Es seguro usar CapCut?
    2. -

      Sí, CapCut es seguro de usar. No contiene ningún virus, malware o spyware. Tampoco recopila ni comparte ninguna información personal o confidencial de su dispositivo. Puede usar CapCut sin preocupaciones.

      - -

      CapCut es compatible con la mayoría de los dispositivos Android que ejecutan Android 5.0 o superior, y la mayoría de los dispositivos iOS que ejecutan iOS 11.0 o superior. También puedes usar CapCut en otros dispositivos como Windows PC, Mac o Chromebook descargando el archivo APK de Uptodown.

      -
    3. ¿Cómo puedo actualizar CapCut?
    4. -

      Puede actualizar CapCut visitando Google Play Store o App Store en su dispositivo y buscando actualizaciones. También puede habilitar actualizaciones automáticas para CapCut en la configuración de su dispositivo. Alternativamente, puede visitar Uptodown y descargar la última versión del archivo CapCut APK.

      -
    5. ¿Cómo puedo contactar al soporte de CapCut?
    6. -

      Puede ponerse en contacto con el soporte de CapCut visitando su sitio web oficial y llenando el formulario de comentarios. También puede enviarlos por correo electrónico a capcut.support@bytedance.com o seguirlos en sus cuentas de redes sociales como Facebook, Instagram, Twitter y YouTube.

      -
    7. ¿Cómo puedo aprender más sobre CapCut?
    8. -

      Usted puede aprender más sobre CapCut visitando su sitio web oficial y leyendo sus entradas de blog, tutoriales, consejos y trucos. También puedes ver sus vídeos en YouTube y aprender de las experiencias y creaciones de otros usuarios.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Plata.md b/spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Plata.md deleted file mode 100644 index 80b90ba32c7264a8ff9ec5531aae69995b98047d..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Plata.md +++ /dev/null @@ -1,81 +0,0 @@ - -

    Descargar Mod Hungry Shark Evolution Moneda ilimitada Serta Diamond v8.2.0

    -

    ¿Eres un fan de Hungry Shark Evolution, el juego de aventura acuática lleno de acción donde tomas el control de un tiburón muy hambriento y vas a un alboroto en el océano? ¿Quieres desbloquear más tiburones, accesorios y misiones sin gastar dinero real? Si es así, entonces usted podría estar interesado en la descarga de mod tiburón hambriento evolución ilimitada moneda serta diamante v8.2.0, una versión modificada del juego que afirma darle monedas ilimitadas y diamantes gratis. En este artículo, le diremos qué es Hungry Shark Evolution, qué características tiene, por qué debe descargar el mod apk, cómo hacerlo y qué precauciones y riesgos debe tener en cuenta.

    -

    ¿Qué es la evolución del tiburón hambriento?

    -

    Hungry Shark Evolution es un popular juego para móviles desarrollado por Ubisoft Entertainment que fue lanzado en 2012. Es la quinta entrega de la serie Hungry Shark y se ha descargado más de 100 millones de veces en Google Play Store. El juego también está disponible en dispositivos iOS y Apple TV.

    -

    como hacer un anillo de plata


    Download –––––>>> https://bltlly.com/2v6LGG



    -

    En Hungry Shark Evolution, puedes elegir entre más de una docena de tiburones únicos y otras criaturas para evolucionar y explorar un mundo abierto tanto por encima como por debajo de las olas. Puede disfrutar de impresionantes gráficos en 3D y efectos de sonido a medida que descubre y devora misteriosas criaturas de las profundidades, reclutar tiburones bebé para aumentar sus poderes depredadores, equipar accesorios impresionantes como láseres, jetpacks y sombreros de copa, encontrar y recoger objetos de bonificación hundidos, completar misiones desafiantes, activar la fiebre del oro para sobrevivir más tiempo y puntuación más alta, participar en eventos regulares en el juego y ganar premios de edición limitada, y más.

    -

    Características de la evolución del tiburón hambriento

    -

    Algunas de las características principales de Hungry Shark Evolution son:

    - -

    ¿Por qué descargar mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0?

    - -

    Si desea disfrutar de todas las características de Hungry Shark Evolution sin gastar dinero o perder el tiempo, es posible que desee descargar mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0. Esta es una versión modificada del juego que pretende darle monedas ilimitadas y diamantes gratis. Con este mod apk, puede desbloquear todos los tiburones y accesorios que desee, actualizar sus tiburones al nivel máximo, completar todas las misiones con facilidad, activar el modo de fiebre del oro cuando quieras, y dominar las tablas de clasificación. Suena tentador, ¿verdad?

    -

    Beneficios de monedas y diamantes ilimitados

    -

    Algunos de los beneficios de tener monedas y diamantes ilimitados en Hungry Shark Evolution son:

    - -

    Cómo descargar e instalar el mod apk

    -

    Si usted está interesado en la descarga de mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0, aquí están los pasos que debe seguir:

    -
      -
    1. En primer lugar, es necesario desinstalar la versión original de Hungry Shark Evolution desde su dispositivo si lo tiene instalado.
    2. - -
    3. Después de descargar el archivo, es necesario habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.
    4. -
    5. Siguiente, es necesario localizar el archivo descargado en el almacenamiento del dispositivo y toque en él para iniciar el proceso de instalación.
    6. -
    7. Siga las instrucciones en la pantalla y espere a que termine la instalación.
    8. -
    9. Finalmente, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio y disfrutar de monedas y diamantes ilimitados.
    10. -
    - Precauciones y riesgos de usar el mod apk -

    Si bien descargar mod hungry shark evolution unlimited coin serta diamond v8.2.0 puede parecer una gran idea, también debe ser consciente de algunas precauciones y riesgos que vienen con él. Estos son algunos de ellos:

    - -

    Por lo tanto, usted debe descargar y utilizar el apk mod a su propio riesgo y discreción. No nos hacemos responsables de las consecuencias que puedan derivarse de su uso.

    -

    -

    Conclusión

    - -

    Si quieres disfrutar de todas estas características sin gastar dinero o perder tiempo, puedes descargar mod hungry shark evolution unlimited coin serta diamond v8.2.0, una versión modificada del juego que pretende darte monedas ilimitadas y diamantes gratis. Con este mod apk, puede desbloquear todos los tiburones y accesorios que desee, actualizar sus tiburones al nivel máximo, completar todas las misiones con facilidad, activar el modo de fiebre del oro cuando quieras, y dominar las tablas de clasificación. Sin embargo, también debe ser consciente de algunas precauciones y riesgos que vienen con el uso de la apk mod, tales como violar los términos y condiciones del juego, exponer su dispositivo a malware o virus, perder su progreso o datos en el juego, experimentar errores o errores en el juego, o perder la diversión y el desafío del juego.

    -

    Por lo tanto, usted debe descargar y utilizar el apk mod a su propio riesgo y discreción. Esperamos que este artículo ha sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos en la sección de comentarios a continuación. ¡Gracias por leer!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Hungry Shark Evolution y su mod apk:

    -
      -
    1. Q: ¿Es Hungry Shark Evolution libre para jugar?
    2. -
    3. A: Sí, Hungry Shark Evolution es gratis para jugar en dispositivos Android, iOS y Apple TV. Sin embargo, algunas características requieren dinero real para desbloquear o acceder.
    4. -
    5. Q: ¿Cuál es la última versión de Hungry Shark Evolution?
    6. -
    7. A: La última versión de Hungry Shark Evolution a partir de junio de 2023 es v8.2.0. Fue lanzado en mayo de 2023 y agregó nuevas características como nuevos tiburones (Ancient Lava Shark, Ancient alíen Shark), nuevos accesorios (Lava Jetpack), nuevas misiones (Lava World), nuevos eventos (Lava Rush), nuevos logros (Lava Master), nuevas tablas de clasificación (Lava Legends) y más.
    8. -
    9. P: ¿Cómo puedo obtener más monedas y gemas en Hungry Shark Evolution?
    10. - -
    11. Q: ¿Es el mod apk seguro de usar?
    12. -
    13. A: El mod apk no es un producto oficial de Ubisoft Entertainment y no está respaldado o apoyado por ellos. Por lo tanto, no se garantiza que sea seguro o confiable. Es posible que encuentre malware, virus u otro software dañino que puede dañar su dispositivo o comprometer su seguridad. También puede violar los términos y condiciones del juego y obtener prohibido jugar en línea o acceder a sus características. También puede perder su progreso o datos en el juego si el apk mod no es compatible con su dispositivo o la última versión del juego. También es posible que experimentes errores, fallos, fallos o errores en el juego que pueden afectar tu juego o rendimiento. Por lo tanto, debe utilizar el apk mod a su propio riesgo y discreción.
    14. -
    15. Q: ¿Cómo puedo actualizar el apk mod?
    16. -
    17. A: El apk mod no se actualiza automáticamente y es necesario descargar e instalar la última versión manualmente cada vez que hay una nueva actualización para el juego. Puede consultar las actualizaciones en Internet o utilizar este enlace: [Descargar Mod Hungry Shark Evolution Unlimited Coin Serta Diamond v8.2.0]. Sin embargo, debe tener cuidado con la descarga de fuentes desconocidas y asegurarse de que el archivo es seguro y compatible con su dispositivo y el juego.
    18. -
    19. Q: ¿Cómo puedo desinstalar el apk mod?
    20. -
    21. A: Si desea desinstalar el apk mod, simplemente puede ir a la configuración del dispositivo, encontrar la aplicación, y toque en desinstalar. Sin embargo, debes ser consciente de que perderás todo tu progreso y datos en el juego si lo haces. Si desea mantener su progreso y los datos, puede tratar de copia de seguridad de sus datos antes de desinstalar el apk mod y restaurarlo después de instalar la versión original del juego de Google Play Store o App Store.
    22. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/BetterAPI/BetterChat/src/app.html b/spaces/BetterAPI/BetterChat/src/app.html deleted file mode 100644 index 0a54a7b84112c70b0f62cf8a9d6be10e069f9ad3..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat/src/app.html +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - BetterChat - - %sveltekit.head% - - -
    %sveltekit.body%
    - - - - - diff --git a/spaces/BetterAPI/BetterChat_new/src/lib/updateSettings.ts b/spaces/BetterAPI/BetterChat_new/src/lib/updateSettings.ts deleted file mode 100644 index d8cc90839ef3efbd7e54abf31ecfca1a48aab1a9..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/src/lib/updateSettings.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { invalidate } from "$app/navigation"; -import { base } from "$app/paths"; -import { error } from "$lib/stores/errors"; -import type { Settings } from "./types/Settings"; -import { UrlDependency } from "./types/UrlDependency"; - -export async function updateSettings( - settings: Partial> -): Promise { - try { - const res = await fetch(`${base}/settings`, { - method: "PATCH", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(settings), - }); - if (!res.ok) { - error.set("Error while updating settings, try again."); - return false; - } - await invalidate(UrlDependency.Settings); - return true; - } catch (err) { - console.error(err); - error.set(String(err)); - return false; - } -} diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/default_styles.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/default_styles.py deleted file mode 100644 index dca37193abffab8b5b388018f895f197316ab652..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/default_styles.py +++ /dev/null @@ -1,190 +0,0 @@ -from typing import Dict - -from .style import Style - -DEFAULT_STYLES: Dict[str, Style] = { - "none": Style.null(), - "reset": Style( - color="default", - bgcolor="default", - dim=False, - bold=False, - italic=False, - underline=False, - blink=False, - blink2=False, - reverse=False, - conceal=False, - strike=False, - ), - "dim": Style(dim=True), - "bright": Style(dim=False), - "bold": Style(bold=True), - "strong": Style(bold=True), - "code": Style(reverse=True, bold=True), - "italic": Style(italic=True), - "emphasize": Style(italic=True), - "underline": Style(underline=True), - "blink": Style(blink=True), - "blink2": Style(blink2=True), - "reverse": Style(reverse=True), - "strike": Style(strike=True), - "black": Style(color="black"), - "red": Style(color="red"), - "green": Style(color="green"), - "yellow": Style(color="yellow"), - "magenta": Style(color="magenta"), - "cyan": Style(color="cyan"), - "white": Style(color="white"), - "inspect.attr": Style(color="yellow", italic=True), - "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), - "inspect.callable": Style(bold=True, color="red"), - "inspect.async_def": Style(italic=True, color="bright_cyan"), - "inspect.def": Style(italic=True, color="bright_cyan"), - "inspect.class": Style(italic=True, color="bright_cyan"), - "inspect.error": Style(bold=True, color="red"), - "inspect.equals": Style(), - "inspect.help": Style(color="cyan"), - "inspect.doc": Style(dim=True), - "inspect.value.border": Style(color="green"), - "live.ellipsis": Style(bold=True, color="red"), - "layout.tree.row": Style(dim=False, color="red"), - "layout.tree.column": Style(dim=False, color="blue"), - "logging.keyword": Style(bold=True, color="yellow"), - "logging.level.notset": Style(dim=True), - "logging.level.debug": Style(color="green"), - "logging.level.info": Style(color="blue"), - "logging.level.warning": Style(color="red"), - "logging.level.error": Style(color="red", bold=True), - "logging.level.critical": Style(color="red", bold=True, reverse=True), - "log.level": Style.null(), - "log.time": Style(color="cyan", dim=True), - "log.message": Style.null(), - "log.path": Style(dim=True), - "repr.ellipsis": Style(color="yellow"), - "repr.indent": Style(color="green", dim=True), - "repr.error": Style(color="red", bold=True), - "repr.str": Style(color="green", italic=False, bold=False), - "repr.brace": Style(bold=True), - "repr.comma": Style(bold=True), - "repr.ipv4": Style(bold=True, color="bright_green"), - "repr.ipv6": Style(bold=True, color="bright_green"), - "repr.eui48": Style(bold=True, color="bright_green"), - "repr.eui64": Style(bold=True, color="bright_green"), - "repr.tag_start": Style(bold=True), - "repr.tag_name": Style(color="bright_magenta", bold=True), - "repr.tag_contents": Style(color="default"), - "repr.tag_end": Style(bold=True), - "repr.attrib_name": Style(color="yellow", italic=False), - "repr.attrib_equal": Style(bold=True), - "repr.attrib_value": Style(color="magenta", italic=False), - "repr.number": Style(color="cyan", bold=True, italic=False), - "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same - "repr.bool_true": Style(color="bright_green", italic=True), - "repr.bool_false": Style(color="bright_red", italic=True), - "repr.none": Style(color="magenta", italic=True), - "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False), - "repr.uuid": Style(color="bright_yellow", bold=False), - "repr.call": Style(color="magenta", bold=True), - "repr.path": Style(color="magenta"), - "repr.filename": Style(color="bright_magenta"), - "rule.line": Style(color="bright_green"), - "rule.text": Style.null(), - "json.brace": Style(bold=True), - "json.bool_true": Style(color="bright_green", italic=True), - "json.bool_false": Style(color="bright_red", italic=True), - "json.null": Style(color="magenta", italic=True), - "json.number": Style(color="cyan", bold=True, italic=False), - "json.str": Style(color="green", italic=False, bold=False), - "json.key": Style(color="blue", bold=True), - "prompt": Style.null(), - "prompt.choices": Style(color="magenta", bold=True), - "prompt.default": Style(color="cyan", bold=True), - "prompt.invalid": Style(color="red"), - "prompt.invalid.choice": Style(color="red"), - "pretty": Style.null(), - "scope.border": Style(color="blue"), - "scope.key": Style(color="yellow", italic=True), - "scope.key.special": Style(color="yellow", italic=True, dim=True), - "scope.equals": Style(color="red"), - "table.header": Style(bold=True), - "table.footer": Style(bold=True), - "table.cell": Style.null(), - "table.title": Style(italic=True), - "table.caption": Style(italic=True, dim=True), - "traceback.error": Style(color="red", italic=True), - "traceback.border.syntax_error": Style(color="bright_red"), - "traceback.border": Style(color="red"), - "traceback.text": Style.null(), - "traceback.title": Style(color="red", bold=True), - "traceback.exc_type": Style(color="bright_red", bold=True), - "traceback.exc_value": Style.null(), - "traceback.offset": Style(color="bright_red", bold=True), - "bar.back": Style(color="grey23"), - "bar.complete": Style(color="rgb(249,38,114)"), - "bar.finished": Style(color="rgb(114,156,31)"), - "bar.pulse": Style(color="rgb(249,38,114)"), - "progress.description": Style.null(), - "progress.filesize": Style(color="green"), - "progress.filesize.total": Style(color="green"), - "progress.download": Style(color="green"), - "progress.elapsed": Style(color="yellow"), - "progress.percentage": Style(color="magenta"), - "progress.remaining": Style(color="cyan"), - "progress.data.speed": Style(color="red"), - "progress.spinner": Style(color="green"), - "status.spinner": Style(color="green"), - "tree": Style(), - "tree.line": Style(), - "markdown.paragraph": Style(), - "markdown.text": Style(), - "markdown.em": Style(italic=True), - "markdown.emph": Style(italic=True), # For commonmark backwards compatibility - "markdown.strong": Style(bold=True), - "markdown.code": Style(bold=True, color="cyan", bgcolor="black"), - "markdown.code_block": Style(color="cyan", bgcolor="black"), - "markdown.block_quote": Style(color="magenta"), - "markdown.list": Style(color="cyan"), - "markdown.item": Style(), - "markdown.item.bullet": Style(color="yellow", bold=True), - "markdown.item.number": Style(color="yellow", bold=True), - "markdown.hr": Style(color="yellow"), - "markdown.h1.border": Style(), - "markdown.h1": Style(bold=True), - "markdown.h2": Style(bold=True, underline=True), - "markdown.h3": Style(bold=True), - "markdown.h4": Style(bold=True, dim=True), - "markdown.h5": Style(underline=True), - "markdown.h6": Style(italic=True), - "markdown.h7": Style(italic=True, dim=True), - "markdown.link": Style(color="bright_blue"), - "markdown.link_url": Style(color="blue", underline=True), - "markdown.s": Style(strike=True), - "iso8601.date": Style(color="blue"), - "iso8601.time": Style(color="magenta"), - "iso8601.timezone": Style(color="yellow"), -} - - -if __name__ == "__main__": # pragma: no cover - import argparse - import io - - from pip._vendor.rich.console import Console - from pip._vendor.rich.table import Table - from pip._vendor.rich.text import Text - - parser = argparse.ArgumentParser() - parser.add_argument("--html", action="store_true", help="Export as HTML table") - args = parser.parse_args() - html: bool = args.html - console = Console(record=True, width=70, file=io.StringIO()) if html else Console() - - table = Table("Name", "Styling") - - for style_name, style in DEFAULT_STYLES.items(): - table.add_row(Text(style_name, style=style), str(style)) - - console.print(table) - if html: - print(console.export_html(inline_styles=True)) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py deleted file mode 100644 index 4a06bc69d5c850fa9f7c4861bc6b3acca3905056..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py +++ /dev/null @@ -1,921 +0,0 @@ -""" -SecureTranport support for urllib3 via ctypes. - -This makes platform-native TLS available to urllib3 users on macOS without the -use of a compiler. This is an important feature because the Python Package -Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL -that ships with macOS is not capable of doing TLSv1.2. The only way to resolve -this is to give macOS users an alternative solution to the problem, and that -solution is to use SecureTransport. - -We use ctypes here because this solution must not require a compiler. That's -because pip is not allowed to require a compiler either. - -This is not intended to be a seriously long-term solution to this problem. -The hope is that PEP 543 will eventually solve this issue for us, at which -point we can retire this contrib module. But in the short term, we need to -solve the impending tire fire that is Python on Mac without this kind of -contrib module. So...here we are. - -To use this module, simply import and inject it:: - - import pip._vendor.urllib3.contrib.securetransport as securetransport - securetransport.inject_into_urllib3() - -Happy TLSing! - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - -.. code-block:: - - Copyright (c) 2015-2016 Will Bond - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import contextlib -import ctypes -import errno -import os.path -import shutil -import socket -import ssl -import struct -import threading -import weakref - -from pip._vendor import six - -from .. import util -from ..util.ssl_ import PROTOCOL_TLS_CLIENT -from ._securetransport.bindings import CoreFoundation, Security, SecurityConst -from ._securetransport.low_level import ( - _assert_no_error, - _build_tls_unknown_ca_alert, - _cert_array_from_pem, - _create_cfstring_array, - _load_client_cert_chain, - _temporary_keychain, -) - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -__all__ = ["inject_into_urllib3", "extract_from_urllib3"] - -# SNI always works -HAS_SNI = True - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - -# This dictionary is used by the read callback to obtain a handle to the -# calling wrapped socket. This is a pretty silly approach, but for now it'll -# do. I feel like I should be able to smuggle a handle to the wrapped socket -# directly in the SSLConnectionRef, but for now this approach will work I -# guess. -# -# We need to lock around this structure for inserts, but we don't do it for -# reads/writes in the callbacks. The reasoning here goes as follows: -# -# 1. It is not possible to call into the callbacks before the dictionary is -# populated, so once in the callback the id must be in the dictionary. -# 2. The callbacks don't mutate the dictionary, they only read from it, and -# so cannot conflict with any of the insertions. -# -# This is good: if we had to lock in the callbacks we'd drastically slow down -# the performance of this code. -_connection_refs = weakref.WeakValueDictionary() -_connection_ref_lock = threading.Lock() - -# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over -# for no better reason than we need *a* limit, and this one is right there. -SSL_WRITE_BLOCKSIZE = 16384 - -# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to -# individual cipher suites. We need to do this because this is how -# SecureTransport wants them. -CIPHER_SUITES = [ - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_AES_256_GCM_SHA384, - SecurityConst.TLS_AES_128_GCM_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_AES_128_CCM_8_SHA256, - SecurityConst.TLS_AES_128_CCM_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, -] - -# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. -# TLSv1 to 1.2 are supported on macOS 10.8+ -_protocol_to_min_max = { - util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), - PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), -} - -if hasattr(ssl, "PROTOCOL_SSLv2"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, - SecurityConst.kSSLProtocol2, - ) -if hasattr(ssl, "PROTOCOL_SSLv3"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, - SecurityConst.kSSLProtocol3, - ) -if hasattr(ssl, "PROTOCOL_TLSv1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, - SecurityConst.kTLSProtocol1, - ) -if hasattr(ssl, "PROTOCOL_TLSv1_1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, - SecurityConst.kTLSProtocol11, - ) -if hasattr(ssl, "PROTOCOL_TLSv1_2"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, - SecurityConst.kTLSProtocol12, - ) - - -def inject_into_urllib3(): - """ - Monkey-patch urllib3 with SecureTransport-backed SSL-support. - """ - util.SSLContext = SecureTransportContext - util.ssl_.SSLContext = SecureTransportContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_SECURETRANSPORT = True - util.ssl_.IS_SECURETRANSPORT = True - - -def extract_from_urllib3(): - """ - Undo monkey-patching by :func:`inject_into_urllib3`. - """ - util.SSLContext = orig_util_SSLContext - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_SECURETRANSPORT = False - util.ssl_.IS_SECURETRANSPORT = False - - -def _read_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport read callback. This is called by ST to request that data - be returned from the socket. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - requested_length = data_length_pointer[0] - - timeout = wrapped_socket.gettimeout() - error = None - read_count = 0 - - try: - while read_count < requested_length: - if timeout is None or timeout >= 0: - if not util.wait_for_read(base_socket, timeout): - raise socket.error(errno.EAGAIN, "timed out") - - remaining = requested_length - read_count - buffer = (ctypes.c_char * remaining).from_address( - data_buffer + read_count - ) - chunk_size = base_socket.recv_into(buffer, remaining) - read_count += chunk_size - if not chunk_size: - if not read_count: - return SecurityConst.errSSLClosedGraceful - break - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - data_length_pointer[0] = read_count - if error == errno.ECONNRESET or error == errno.EPIPE: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = read_count - - if read_count != requested_length: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -def _write_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport write callback. This is called by ST to request that data - actually be sent on the network. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - bytes_to_write = data_length_pointer[0] - data = ctypes.string_at(data_buffer, bytes_to_write) - - timeout = wrapped_socket.gettimeout() - error = None - sent = 0 - - try: - while sent < bytes_to_write: - if timeout is None or timeout >= 0: - if not util.wait_for_write(base_socket, timeout): - raise socket.error(errno.EAGAIN, "timed out") - chunk_sent = base_socket.send(data) - sent += chunk_sent - - # This has some needless copying here, but I'm not sure there's - # much value in optimising this data path. - data = data[chunk_sent:] - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - data_length_pointer[0] = sent - if error == errno.ECONNRESET or error == errno.EPIPE: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = sent - - if sent != bytes_to_write: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -# We need to keep these two objects references alive: if they get GC'd while -# in use then SecureTransport could attempt to call a function that is in freed -# memory. That would be...uh...bad. Yeah, that's the word. Bad. -_read_callback_pointer = Security.SSLReadFunc(_read_callback) -_write_callback_pointer = Security.SSLWriteFunc(_write_callback) - - -class WrappedSocket(object): - """ - API-compatibility wrapper for Python's OpenSSL wrapped socket object. - - Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage - collector of PyPy. - """ - - def __init__(self, socket): - self.socket = socket - self.context = None - self._makefile_refs = 0 - self._closed = False - self._exception = None - self._keychain = None - self._keychain_dir = None - self._client_cert_chain = None - - # We save off the previously-configured timeout and then set it to - # zero. This is done because we use select and friends to handle the - # timeouts, but if we leave the timeout set on the lower socket then - # Python will "kindly" call select on that socket again for us. Avoid - # that by forcing the timeout to zero. - self._timeout = self.socket.gettimeout() - self.socket.settimeout(0) - - @contextlib.contextmanager - def _raise_on_error(self): - """ - A context manager that can be used to wrap calls that do I/O from - SecureTransport. If any of the I/O callbacks hit an exception, this - context manager will correctly propagate the exception after the fact. - This avoids silently swallowing those exceptions. - - It also correctly forces the socket closed. - """ - self._exception = None - - # We explicitly don't catch around this yield because in the unlikely - # event that an exception was hit in the block we don't want to swallow - # it. - yield - if self._exception is not None: - exception, self._exception = self._exception, None - self.close() - raise exception - - def _set_ciphers(self): - """ - Sets up the allowed ciphers. By default this matches the set in - util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done - custom and doesn't allow changing at this time, mostly because parsing - OpenSSL cipher strings is going to be a freaking nightmare. - """ - ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) - result = Security.SSLSetEnabledCiphers( - self.context, ciphers, len(CIPHER_SUITES) - ) - _assert_no_error(result) - - def _set_alpn_protocols(self, protocols): - """ - Sets up the ALPN protocols on the context. - """ - if not protocols: - return - protocols_arr = _create_cfstring_array(protocols) - try: - result = Security.SSLSetALPNProtocols(self.context, protocols_arr) - _assert_no_error(result) - finally: - CoreFoundation.CFRelease(protocols_arr) - - def _custom_validate(self, verify, trust_bundle): - """ - Called when we have set custom validation. We do this in two cases: - first, when cert validation is entirely disabled; and second, when - using a custom trust DB. - Raises an SSLError if the connection is not trusted. - """ - # If we disabled cert validation, just say: cool. - if not verify: - return - - successes = ( - SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed, - ) - try: - trust_result = self._evaluate_trust(trust_bundle) - if trust_result in successes: - return - reason = "error code: %d" % (trust_result,) - except Exception as e: - # Do not trust on error - reason = "exception: %r" % (e,) - - # SecureTransport does not send an alert nor shuts down the connection. - rec = _build_tls_unknown_ca_alert(self.version()) - self.socket.sendall(rec) - # close the connection immediately - # l_onoff = 1, activate linger - # l_linger = 0, linger for 0 seoncds - opts = struct.pack("ii", 1, 0) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts) - self.close() - raise ssl.SSLError("certificate verify failed, %s" % reason) - - def _evaluate_trust(self, trust_bundle): - # We want data in memory, so load it up. - if os.path.isfile(trust_bundle): - with open(trust_bundle, "rb") as f: - trust_bundle = f.read() - - cert_array = None - trust = Security.SecTrustRef() - - try: - # Get a CFArray that contains the certs we want. - cert_array = _cert_array_from_pem(trust_bundle) - - # Ok, now the hard part. We want to get the SecTrustRef that ST has - # created for this connection, shove our CAs into it, tell ST to - # ignore everything else it knows, and then ask if it can build a - # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) - _assert_no_error(result) - if not trust: - raise ssl.SSLError("Failed to copy trust reference") - - result = Security.SecTrustSetAnchorCertificates(trust, cert_array) - _assert_no_error(result) - - result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) - _assert_no_error(result) - - trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result)) - _assert_no_error(result) - finally: - if trust: - CoreFoundation.CFRelease(trust) - - if cert_array is not None: - CoreFoundation.CFRelease(cert_array) - - return trust_result.value - - def handshake( - self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase, - alpn_protocols, - ): - """ - Actually performs the TLS handshake. This is run automatically by - wrapped socket, and shouldn't be needed in user code. - """ - # First, we do the initial bits of connection setup. We need to create - # a context, set its I/O funcs, and set the connection reference. - self.context = Security.SSLCreateContext( - None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType - ) - result = Security.SSLSetIOFuncs( - self.context, _read_callback_pointer, _write_callback_pointer - ) - _assert_no_error(result) - - # Here we need to compute the handle to use. We do this by taking the - # id of self modulo 2**31 - 1. If this is already in the dictionary, we - # just keep incrementing by one until we find a free space. - with _connection_ref_lock: - handle = id(self) % 2147483647 - while handle in _connection_refs: - handle = (handle + 1) % 2147483647 - _connection_refs[handle] = self - - result = Security.SSLSetConnection(self.context, handle) - _assert_no_error(result) - - # If we have a server hostname, we should set that too. - if server_hostname: - if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode("utf-8") - - result = Security.SSLSetPeerDomainName( - self.context, server_hostname, len(server_hostname) - ) - _assert_no_error(result) - - # Setup the ciphers. - self._set_ciphers() - - # Setup the ALPN protocols. - self._set_alpn_protocols(alpn_protocols) - - # Set the minimum and maximum TLS versions. - result = Security.SSLSetProtocolVersionMin(self.context, min_version) - _assert_no_error(result) - - result = Security.SSLSetProtocolVersionMax(self.context, max_version) - _assert_no_error(result) - - # If there's a trust DB, we need to use it. We do that by telling - # SecureTransport to break on server auth. We also do that if we don't - # want to validate the certs at all: we just won't actually do any - # authing in that case. - if not verify or trust_bundle is not None: - result = Security.SSLSetSessionOption( - self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True - ) - _assert_no_error(result) - - # If there's a client cert, we need to use it. - if client_cert: - self._keychain, self._keychain_dir = _temporary_keychain() - self._client_cert_chain = _load_client_cert_chain( - self._keychain, client_cert, client_key - ) - result = Security.SSLSetCertificate(self.context, self._client_cert_chain) - _assert_no_error(result) - - while True: - with self._raise_on_error(): - result = Security.SSLHandshake(self.context) - - if result == SecurityConst.errSSLWouldBlock: - raise socket.timeout("handshake timed out") - elif result == SecurityConst.errSSLServerAuthCompleted: - self._custom_validate(verify, trust_bundle) - continue - else: - _assert_no_error(result) - break - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, bufsiz): - buffer = ctypes.create_string_buffer(bufsiz) - bytes_read = self.recv_into(buffer, bufsiz) - data = buffer[:bytes_read] - return data - - def recv_into(self, buffer, nbytes=None): - # Read short on EOF. - if self._closed: - return 0 - - if nbytes is None: - nbytes = len(buffer) - - buffer = (ctypes.c_char * nbytes).from_buffer(buffer) - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLRead( - self.context, buffer, nbytes, ctypes.byref(processed_bytes) - ) - - # There are some result codes that we want to treat as "not always - # errors". Specifically, those are errSSLWouldBlock, - # errSSLClosedGraceful, and errSSLClosedNoNotify. - if result == SecurityConst.errSSLWouldBlock: - # If we didn't process any bytes, then this was just a time out. - # However, we can get errSSLWouldBlock in situations when we *did* - # read some data, and in those cases we should just read "short" - # and return. - if processed_bytes.value == 0: - # Timed out, no data read. - raise socket.timeout("recv timed out") - elif result in ( - SecurityConst.errSSLClosedGraceful, - SecurityConst.errSSLClosedNoNotify, - ): - # The remote peer has closed this connection. We should do so as - # well. Note that we don't actually return here because in - # principle this could actually be fired along with return data. - # It's unlikely though. - self.close() - else: - _assert_no_error(result) - - # Ok, we read and probably succeeded. We should return whatever data - # was actually read. - return processed_bytes.value - - def settimeout(self, timeout): - self._timeout = timeout - - def gettimeout(self): - return self._timeout - - def send(self, data): - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLWrite( - self.context, data, len(data), ctypes.byref(processed_bytes) - ) - - if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: - # Timed out - raise socket.timeout("send timed out") - else: - _assert_no_error(result) - - # We sent, and probably succeeded. Tell them how much we sent. - return processed_bytes.value - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - with self._raise_on_error(): - Security.SSLClose(self.context) - - def close(self): - # TODO: should I do clean shutdown here? Do I have to? - if self._makefile_refs < 1: - self._closed = True - if self.context: - CoreFoundation.CFRelease(self.context) - self.context = None - if self._client_cert_chain: - CoreFoundation.CFRelease(self._client_cert_chain) - self._client_cert_chain = None - if self._keychain: - Security.SecKeychainDelete(self._keychain) - CoreFoundation.CFRelease(self._keychain) - shutil.rmtree(self._keychain_dir) - self._keychain = self._keychain_dir = None - return self.socket.close() - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - # Urgh, annoying. - # - # Here's how we do this: - # - # 1. Call SSLCopyPeerTrust to get hold of the trust object for this - # connection. - # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. - # 3. To get the CN, call SecCertificateCopyCommonName and process that - # string so that it's of the appropriate type. - # 4. To get the SAN, we need to do something a bit more complex: - # a. Call SecCertificateCopyValues to get the data, requesting - # kSecOIDSubjectAltName. - # b. Mess about with this dictionary to try to get the SANs out. - # - # This is gross. Really gross. It's going to be a few hundred LoC extra - # just to repeat something that SecureTransport can *already do*. So my - # operating assumption at this time is that what we want to do is - # instead to just flag to urllib3 that it shouldn't do its own hostname - # validation when using SecureTransport. - if not binary_form: - raise ValueError("SecureTransport only supports dumping binary certs") - trust = Security.SecTrustRef() - certdata = None - der_bytes = None - - try: - # Grab the trust store. - result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust)) - _assert_no_error(result) - if not trust: - # Probably we haven't done the handshake yet. No biggie. - return None - - cert_count = Security.SecTrustGetCertificateCount(trust) - if not cert_count: - # Also a case that might happen if we haven't handshaked. - # Handshook? Handshaken? - return None - - leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) - assert leaf - - # Ok, now we want the DER bytes. - certdata = Security.SecCertificateCopyData(leaf) - assert certdata - - data_length = CoreFoundation.CFDataGetLength(certdata) - data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) - der_bytes = ctypes.string_at(data_buffer, data_length) - finally: - if certdata: - CoreFoundation.CFRelease(certdata) - if trust: - CoreFoundation.CFRelease(trust) - - return der_bytes - - def version(self): - protocol = Security.SSLProtocol() - result = Security.SSLGetNegotiatedProtocolVersion( - self.context, ctypes.byref(protocol) - ) - _assert_no_error(result) - if protocol.value == SecurityConst.kTLSProtocol13: - raise ssl.SSLError("SecureTransport does not support TLS 1.3") - elif protocol.value == SecurityConst.kTLSProtocol12: - return "TLSv1.2" - elif protocol.value == SecurityConst.kTLSProtocol11: - return "TLSv1.1" - elif protocol.value == SecurityConst.kTLSProtocol1: - return "TLSv1" - elif protocol.value == SecurityConst.kSSLProtocol3: - return "SSLv3" - elif protocol.value == SecurityConst.kSSLProtocol2: - return "SSLv2" - else: - raise ssl.SSLError("Unknown TLS version: %r" % protocol) - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) - -else: # Platform-specific: Python 3 - - def makefile(self, mode="r", buffering=None, *args, **kwargs): - # We disable buffering with SecureTransport because it conflicts with - # the buffering that ST does internally (see issue #1153 for more). - buffering = 0 - return backport_makefile(self, mode, buffering, *args, **kwargs) - - -WrappedSocket.makefile = makefile - - -class SecureTransportContext(object): - """ - I am a wrapper class for the SecureTransport library, to translate the - interface of the standard library ``SSLContext`` object to calls into - SecureTransport. - """ - - def __init__(self, protocol): - self._min_version, self._max_version = _protocol_to_min_max[protocol] - self._options = 0 - self._verify = False - self._trust_bundle = None - self._client_cert = None - self._client_key = None - self._client_key_passphrase = None - self._alpn_protocols = None - - @property - def check_hostname(self): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - return True - - @check_hostname.setter - def check_hostname(self, value): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - pass - - @property - def options(self): - # TODO: Well, crap. - # - # So this is the bit of the code that is the most likely to cause us - # trouble. Essentially we need to enumerate all of the SSL options that - # users might want to use and try to see if we can sensibly translate - # them, or whether we should just ignore them. - return self._options - - @options.setter - def options(self, value): - # TODO: Update in line with above. - self._options = value - - @property - def verify_mode(self): - return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE - - @verify_mode.setter - def verify_mode(self, value): - self._verify = True if value == ssl.CERT_REQUIRED else False - - def set_default_verify_paths(self): - # So, this has to do something a bit weird. Specifically, what it does - # is nothing. - # - # This means that, if we had previously had load_verify_locations - # called, this does not undo that. We need to do that because it turns - # out that the rest of the urllib3 code will attempt to load the - # default verify paths if it hasn't been told about any paths, even if - # the context itself was sometime earlier. We resolve that by just - # ignoring it. - pass - - def load_default_certs(self): - return self.set_default_verify_paths() - - def set_ciphers(self, ciphers): - # For now, we just require the default cipher string. - if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError("SecureTransport doesn't support custom cipher strings") - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - # OK, we only really support cadata and cafile. - if capath is not None: - raise ValueError("SecureTransport does not support cert directories") - - # Raise if cafile does not exist. - if cafile is not None: - with open(cafile): - pass - - self._trust_bundle = cafile or cadata - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._client_cert = certfile - self._client_key = keyfile - self._client_cert_passphrase = password - - def set_alpn_protocols(self, protocols): - """ - Sets the ALPN protocols that will later be set on the context. - - Raises a NotImplementedError if ALPN is not supported. - """ - if not hasattr(Security, "SSLSetALPNProtocols"): - raise NotImplementedError( - "SecureTransport supports ALPN only in macOS 10.12+" - ) - self._alpn_protocols = [six.ensure_binary(p) for p in protocols] - - def wrap_socket( - self, - sock, - server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, - ): - # So, what do we do here? Firstly, we assert some properties. This is a - # stripped down shim, so there is some functionality we don't support. - # See PEP 543 for the real deal. - assert not server_side - assert do_handshake_on_connect - assert suppress_ragged_eofs - - # Ok, we're good to go. Now we want to create the wrapped socket object - # and store it in the appropriate place. - wrapped_socket = WrappedSocket(sock) - - # Now we can handshake - wrapped_socket.handshake( - server_hostname, - self._verify, - self._trust_bundle, - self._min_version, - self._max_version, - self._client_cert, - self._client_key, - self._client_key_passphrase, - self._alpn_protocols, - ) - return wrapped_socket diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/examples/block/Makefile b/spaces/CVPR/LIVE/thrust/dependencies/cub/examples/block/Makefile deleted file mode 100644 index b173c2a02f2c77b8b6f51546e2ed422d2d02d4d2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/examples/block/Makefile +++ /dev/null @@ -1,128 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# -# Makefile usage -# -# make [sm=] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>] -# -#------------------------------------------------------------------------------- - -include ../../common.mk - - -#------------------------------------------------------------------------------- -# Includes -#------------------------------------------------------------------------------- - -INC += -I$(CUB_DIR) -I$(CUB_DIR)test - - - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = $(CUB_DEPS) \ - $(CUB_DIR)test/Makefile \ - $(CUB_DIR)test/test_util.h \ - $(CUB_DIR)test/mersenne.h \ - -ALL = example_block_radix_sort \ - example_block_reduce \ - example_block_scan - - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - -#------------------------------------------------------------------------------- -# make all -#------------------------------------------------------------------------------- - -all : $(ALL) - -#------------------------------------------------------------------------------- -# make run -#------------------------------------------------------------------------------- - -run : - for i in $(ALL); do ./bin/$${i}_$(BIN_SUFFIX) --device=$(device) || exit 1; done - - - - -#------------------------------------------------------------------------------- -# make example_block_reduce -#------------------------------------------------------------------------------- - -example_block_reduce: bin/example_block_reduce_$(BIN_SUFFIX) - -bin/example_block_reduce_$(BIN_SUFFIX) : example_block_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_reduce_$(BIN_SUFFIX) example_block_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_block_scan -#------------------------------------------------------------------------------- - -example_block_scan: bin/example_block_scan_$(BIN_SUFFIX) - -bin/example_block_scan_$(BIN_SUFFIX) : example_block_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_scan_$(BIN_SUFFIX) example_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_block_radix_sort -#------------------------------------------------------------------------------- - -example_block_radix_sort: bin/example_block_radix_sort_$(BIN_SUFFIX) - -bin/example_block_radix_sort_$(BIN_SUFFIX) : example_block_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_radix_sort_$(BIN_SUFFIX) example_block_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - diff --git a/spaces/CVPR/LIVE/thrust/internal/benchmark/compare_benchmark_results.py b/spaces/CVPR/LIVE/thrust/internal/benchmark/compare_benchmark_results.py deleted file mode 100644 index 22e7be8cfc20e1de4cfa586258e433f2a93aeb27..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/internal/benchmark/compare_benchmark_results.py +++ /dev/null @@ -1,1308 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash -# -# Distributed under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -############################################################################### - -############################################################################### -# Copyright (c) 2018 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################### - -# XXX Put code shared with `combine_benchmark_results.py` in a common place. - -# XXX Relative uncertainty. - -# XXX Create uncertain value class which is quantity + uncertainty. - -from sys import exit, stdout - -from os.path import splitext - -from itertools import imap # Lazy map. - -from math import sqrt, log10, floor - -from collections import deque - -from argparse import ArgumentParser as argument_parser -from argparse import Action as argument_action - -from csv import DictReader as csv_dict_reader -from csv import DictWriter as csv_dict_writer - -from re import compile as regex_compile - -############################################################################### - -def unpack_tuple(f): - """Return a unary function that calls `f` with its argument unpacked.""" - return lambda args: f(*iter(args)) - -def strip_dict(d): - """Strip leading and trailing whitespace from all keys and values in `d`. - - Returns: - The modified dict `d`. - """ - d.update({key: value.strip() for (key, value) in d.items()}) - return d - -def merge_dicts(d0, d1): - """Create a new `dict` that is the union of `dict`s `d0` and `d1`.""" - d = d0.copy() - d.update(d1) - return d - -def change_key_in_dict(d, old_key, new_key): - """Change the key of the entry in `d` with key `old_key` to `new_key`. If - there is an existing entry - - Returns: - The modified dict `d`. - - Raises: - KeyError : If `old_key` is not in `d`. - """ - d[new_key] = d.pop(old_key) - return d - -def key_from_dict(d): - """Create a hashable key from a `dict` by converting the `dict` to a tuple.""" - return tuple(sorted(d.items())) - -def strip_list(l): - """Strip leading and trailing whitespace from all values in `l`.""" - for i, value in enumerate(l): l[i] = value.strip() - return l - -def remove_from_list(l, item): - """Remove the first occurence of `item` from list `l` and return a tuple of - the index that was removed and the element that was removed. - - Raises: - ValueError : If `item` is not in `l`. - """ - idx = l.index(item) - item = l.pop(idx) - return (idx, item) - -############################################################################### - -def int_or_float(x): - """Convert `x` to either `int` or `float`, preferring `int`. - - Raises: - ValueError : If `x` is not convertible to either `int` or `float` - """ - try: - return int(x) - except ValueError: - return float(x) - -def try_int_or_float(x): - """Try to convert `x` to either `int` or `float`, preferring `int`. `x` is - returned unmodified if conversion fails. - """ - try: - return int_or_float(x) - except ValueError: - return x - -############################################################################### - -def ranges_overlap(x1, x2, y1, y2): - """Returns true if the ranges `[x1, x2]` and `[y1, y2]` overlap, - where `x1 <= x2` and `y1 <= y2`. - - Raises: - AssertionError : If `x1 > x2` or `y1 > y2`. - """ - assert x1 <= x2 - assert y1 <= y2 - return x1 <= y2 and y1 <= x2 - -def ranges_overlap_uncertainty(x, x_unc, y, y_unc): - """Returns true if the ranges `[x - x_unc, x + x_unc]` and - `[y - y_unc, y + y_unc]` overlap, where `x_unc >= 0` and `y_unc >= 0`. - - Raises: - AssertionError : If `x_unc < 0` or `y_unc < 0`. - """ - assert x_unc >= 0 - assert y_unc >= 0 - return ranges_overlap(x - x_unc, x + x_unc, y - y_unc, y + y_unc) - -############################################################################### - -# Formulas for propagation of uncertainty from: -# -# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas -# -# Even though it's Wikipedia, I trust it as I helped write that table. -# -# XXX Replace with a proper reference. - -def uncertainty_multiplicative(f, A, A_abs_unc, B, B_abs_unc): - """Compute the propagated uncertainty from the multiplication of two - uncertain values, `A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = AB` or - `f = A/B`, where `A != 0` and `B != 0`, the uncertainty in `f` is - approximately: - - .. math:: - - \sigma_f = |f| \sqrt{\frac{\sigma_A}{A} ^ 2 + \frac{\sigma_B}{B} ^ 2} - - Raises: - ZeroDivisionError : If `A == 0` or `B == 0`. - """ - return abs(f) * sqrt((A_abs_unc / A) ** 2 + (B_abs_unc / B) ** 2); - -def uncertainty_additive(c, A_abs_unc, d, B_abs_unc): - """Compute the propagated uncertainty from addition of two uncertain values, - `A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = cA + dB`, where `c` and - `d` are certain constants, the uncertainty in `f` is approximately: - - .. math:: - - f_{\sigma} = \sqrt{c ^ 2 * A_{\sigma} ^ 2 + d ^ 2 * B_{\sigma} ^ 2} - """ - return sqrt(((c ** 2) * (A_abs_unc ** 2)) + ((d ** 2) * (B_abs_unc ** 2))) - -############################################################################### - -# XXX Create change class. - -def absolute_change(old, new): - """Computes the absolute change from old to new: - - .. math:: - - absolute_change = new - old - """ - return new - old - -def absolute_change_uncertainty(old, old_unc, new, new_unc): - """Computes the uncertainty in the absolute change from old to new and returns - a tuple of the absolute change and the absolute change uncertainty. - """ - absolute_change = new - old - absolute_change_unc = uncertainty_additive(1.0, new_unc, -1.0, old_unc) - - return (absolute_change, absolute_change_unc) - -def percent_change(old, new): - """Computes the percent change from old to new: - - .. math:: - - percent_change = 100 \frac{new - old}{abs(old)} - """ - return float(new - old) / abs(old) - -def percent_change_uncertainty(old, old_unc, new, new_unc): - """Computes the uncertainty in the percent change from old to new and returns - a tuple of the absolute change, the absolute change uncertainty, the percent - change and the percent change uncertainty. - """ - # Let's break this down into a few sub-operations: - # - # absolute_change = new - old <- Additive propagation. - # relative_change = change / abs(old) <- Multiplicative propagation. - # percent_change = 100 * y <- Multiplicative propagation. - - if old == 0: - # We can't compute relative change because the old value is 0. - return (float("nan"), float("nan"), float("nan"), float("nan")) - - (absolute_change, absolute_change_unc) = absolute_change_uncertainty( - old, old_unc, new, new_unc - ) - - if absolute_change == 0: - # We can't compute relative change uncertainty because the relative - # uncertainty of a value of 0 is undefined. - return (absolute_change, absolute_change_unc, float("nan"), float("nan")) - - relative_change = float(absolute_change) / abs(old) - relative_change_unc = uncertainty_multiplicative( - relative_change, absolute_change, absolute_change_unc, old, old_unc - ) - - percent_change = 100.0 * relative_change - percent_change_unc = uncertainty_multiplicative( - percent_change, 100.0, 0.0, relative_change, relative_change_unc - ) - - return ( - absolute_change, absolute_change_unc, percent_change, percent_change_unc - ) - -############################################################################### - -def find_significant_digit(x): - """Return the significant digit of the number x. The result is the number of - digits after the decimal place to round to (negative numbers indicate rounding - before the decimal place).""" - if x == 0: return 0 - return -int(floor(log10(abs(x)))) - -def round_with_int_conversion(x, ndigits = None): - """Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less - than 1, convert the result to `int`. If `ndigits` is `None`, the significant - digit of `x` is used.""" - if ndigits is None: ndigits = find_significant_digit(x) - x_rounded = round(x, ndigits) - return int(x_rounded) if ndigits < 1 else x_rounded - -############################################################################### - -class measured_variable(object): - """A meta-variable representing measured data. It is composed of three raw - variables plus units meta-data. - - Attributes: - quantity (`str`) : - Name of the quantity variable of this object. - uncertainty (`str`) : - Name of the uncertainty variable of this object. - sample_size (`str`) : - Name of the sample size variable of this object. - units (units class or `None`) : - The units the value is measured in. - """ - - def __init__(self, quantity, uncertainty, sample_size, units = None): - self.quantity = quantity - self.uncertainty = uncertainty - self.sample_size = sample_size - self.units = units - - def as_tuple(self): - return (self.quantity, self.uncertainty, self.sample_size, self.units) - - def __iter__(self): - return iter(self.as_tuple()) - - def __str__(self): - return str(self.as_tuple()) - - def __repr__(self): - return str(self) - -class measured_value(object): - """An object that represents a value determined by multiple measurements. - - Attributes: - quantity (scalar) : - The quantity of the value, e.g. the arithmetic mean. - uncertainty (scalar) : - The measurement uncertainty, e.g. the sample standard deviation. - sample_size (`int`) : - The number of observations contributing to the value. - units (units class or `None`) : - The units the value is measured in. - """ - - def __init__(self, quantity, uncertainty, sample_size = 1, units = None): - self.quantity = quantity - self.uncertainty = uncertainty - self.sample_size = sample_size - self.units = units - - def as_tuple(self): - return (self.quantity, self.uncertainty, self.sample_size, self.units) - - def __iter__(self): - return iter(self.as_tuple()) - - def __str__(self): - return str(self.as_tuple()) - - def __repr__(self): - return str(self) - -############################################################################### - -def arithmetic_mean(X): - """Computes the arithmetic mean of the sequence `X`. - - Let: - - * `n = len(X)`. - * `u` denote the arithmetic mean of `X`. - - .. math:: - - u = \frac{\sum_{i = 0}^{n - 1} X_i}{n} - """ - return sum(X) / len(X) - -def sample_variance(X, u = None): - """Computes the sample variance of the sequence `X`. - - Let: - - * `n = len(X)`. - * `u` denote the arithmetic mean of `X`. - * `s` denote the sample standard deviation of `X`. - - .. math:: - - v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1} - - Args: - X (`Iterable`) : The sequence of values. - u (number) : The arithmetic mean of `X`. - """ - if u is None: u = arithmetic_mean(X) - return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1) - -def sample_standard_deviation(X, u = None, v = None): - """Computes the sample standard deviation of the sequence `X`. - - Let: - - * `n = len(X)`. - * `u` denote the arithmetic mean of `X`. - * `v` denote the sample variance of `X`. - * `s` denote the sample standard deviation of `X`. - - .. math:: - - s &= \sqrt{v} - &= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}} - - Args: - X (`Iterable`) : The sequence of values. - u (number) : The arithmetic mean of `X`. - v (number) : The sample variance of `X`. - """ - if u is None: u = arithmetic_mean(X) - if v is None: v = sample_variance(X, u) - return sqrt(v) - -def combine_sample_size(As): - """Computes the combined sample variance of a group of `measured_value`s. - - Let: - - * `g = len(As)`. - * `n_i = As[i].samples`. - * `n` denote the combined sample size of `As`. - - .. math:: - - n = \sum{i = 0}^{g - 1} n_i - """ - return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As)) - -def combine_arithmetic_mean(As, n = None): - """Computes the combined arithmetic mean of a group of `measured_value`s. - - Let: - - * `g = len(As)`. - * `u_i = As[i].quantity`. - * `n_i = As[i].samples`. - * `n` denote the combined sample size of `As`. - * `u` denote the arithmetic mean of the quantities of `As`. - - .. math:: - - u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n} - """ - if n is None: n = combine_sample_size(As) - return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n - -def combine_sample_variance(As, n = None, u = None): - """Computes the combined sample variance of a group of `measured_value`s. - - Let: - - * `g = len(As)`. - * `u_i = As[i].quantity`. - * `s_i = As[i].uncertainty`. - * `n_i = As[i].samples`. - * `n` denote the combined sample size of `As`. - * `u` denote the arithmetic mean of the quantities of `As`. - * `v` denote the sample variance of `X`. - - .. math:: - - v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1} - - Args: - As (`Iterable` of `measured_value`s) : The sequence of values. - n (number) : The combined sample sizes of `As`. - u (number) : The combined arithmetic mean of `As`. - """ - if n <= 1: return 0 - if n is None: n = combine_sample_size(As) - if u is None: u = combine_arithmetic_mean(As, n) - return sum(imap(unpack_tuple( - lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1) - ), As)) / (n - 1) - -def combine_sample_standard_deviation(As, n = None, u = None, v = None): - """Computes the combined sample standard deviation of a group of - `measured_value`s. - - Let: - - * `g = len(As)`. - * `u_i = As[i].quantity`. - * `s_i = As[i].uncertainty`. - * `n_i = As[i].samples`. - * `n` denote the combined sample size of `As`. - * `u` denote the arithmetic mean of the quantities of `As`. - * `v` denote the sample variance of `X`. - * `s` denote the sample standard deviation of `X`. - - .. math:: - v &= \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1} - - s &= \sqrt{v} - - Args: - As (`Iterable` of `measured_value`s) : The sequence of values. - n (number) : The combined sample sizes of `As`. - u (number) : The combined arithmetic mean of `As`. - v (number) : The combined sample variance of `As`. - """ - if n <= 1: return 0 - if n is None: n = combine_sample_size(As) - if u is None: u = combine_arithmetic_mean(As, n) - if v is None: v = combine_sample_variance(As, n, u) - return sqrt(v) - -############################################################################### - -def store_const_multiple(const, *destinations): - """Returns an `argument_action` class that sets multiple argument - destinations (`destinations`) to `const`.""" - class store_const_multiple_action(argument_action): - def __init__(self, *args, **kwargs): - super(store_const_multiple_action, self).__init__( - metavar = None, nargs = 0, const = const, *args, **kwargs - ) - - def __call__(self, parser, namespace, values, option_string = None): - for destination in destinations: - setattr(namespace, destination, const) - - return store_const_multiple_action - -def store_true_multiple(*destinations): - """Returns an `argument_action` class that sets multiple argument - destinations (`destinations`) to `True`.""" - return store_const_multiple(True, *destinations) - -def store_false_multiple(*destinations): - """Returns an `argument_action` class that sets multiple argument - destinations (`destinations`) to `False`.""" - return store_const_multiple(False, *destinations) - -############################################################################### - -def process_program_arguments(): - ap = argument_parser( - description = ( - "Compares two sets of combined performance results and identifies " - "statistically significant changes." - ) - ) - - ap.add_argument( - "baseline_input_file", - help = ("CSV file containing the baseline performance results. The first " - "two rows should be a header. The 1st header row specifies the " - "name of each variable, and the 2nd header row specifies the units " - "for that variable. The baseline results may be a superset of the " - "observed performance results, but the reverse is not true. The " - "baseline results must contain data for every datapoint in the " - "observed performance results."), - type = str - ) - - ap.add_argument( - "observed_input_file", - help = ("CSV file containing the observed performance results. The first " - "two rows should be a header. The 1st header row specifies the name " - "of header row specifies the units for that variable."), - type = str - ) - - ap.add_argument( - "-o", "--output-file", - help = ("The file that results are written to. If `-`, results are " - "written to stdout."), - action = "store", type = str, default = "-", - metavar = "OUTPUT" - ) - - ap.add_argument( - "-c", "--control-variable", - help = ("Treat the specified variable as a control variable. This means " - "it will be filtered out when forming dataset keys. For example, " - "this could be used to ignore a timestamp variable that is " - "different in the baseline and observed results. May be specified " - "multiple times."), - action = "append", type = str, dest = "control_variables", default = [], - metavar = "QUANTITY" - ) - - ap.add_argument( - "-d", "--dependent-variable", - help = ("Treat the specified three variables as a dependent variable. The " - "1st variable is the measured quantity, the 2nd is the uncertainty " - "of the measurement and the 3rd is the sample size. The defaults " - "are the dependent variables of Thrust's benchmark suite. May be " - "specified multiple times."), - action = "append", type = str, dest = "dependent_variables", default = [], - metavar = "QUANTITY,UNCERTAINTY,SAMPLES" - ) - - ap.add_argument( - "-t", "--change-threshold", - help = ("Treat relative changes less than this amount (a percentage) as " - "statistically insignificant. The default is 5%%."), - action = "store", type = float, default = 5, - metavar = "PERCENTAGE" - ) - - ap.add_argument( - "-p", "--preserve-whitespace", - help = ("Don't trim leading and trailing whitespace from each CSV cell."), - action = "store_true", default = False - ) - - ap.add_argument( - "--output-all-variables", - help = ("Don't omit original absolute values in output."), - action = "store_true", default = False - ) - - ap.add_argument( - "--output-all-datapoints", - help = ("Don't omit datapoints that are statistically indistinguishable " - "in output."), - action = "store_true", default = False - ) - - ap.add_argument( - "-a", "--output-all", - help = ("Equivalent to `--output-all-variables --output-all-datapoints`."), - action = store_true_multiple("output_all_variables", "output_all_datapoints") - ) - - return ap.parse_args() - -############################################################################### - -def filter_comments(f, s = "#"): - """Return an iterator to the file `f` which filters out all lines beginning - with `s`.""" - return filter(lambda line: not line.startswith(s), f) - -############################################################################### - -class io_manager(object): - """Manages I/O operations and represents the input data as an `Iterable` - sequence of `dict`s. - - It is `Iterable` and an `Iterator`. It can be used with `with`. - - Attributes: - preserve_whitespace (`bool`) : - If `False`, leading and trailing whitespace is stripped from each CSV cell. - writer (`csv_dict_writer`) : - CSV writer object that the output is written to. - output_file (`file` or `stdout`) : - The output `file` object. - baseline_reader (`csv_dict_reader`) : - CSV reader object for the baseline results. - observed_reader (`csv_dict_reader`) : - CSV reader object for the observed results. - baseline_input_file (`file`) : - `file` object for the baseline results. - observed_input_file (`file`) : - `file` object for the observed results.. - variable_names (`list` of `str`s) : - Names of the variables, in order. - variable_units (`list` of `str`s) : - Units of the variables, in order. - """ - - def __init__(self, - baseline_input_file, observed_input_file, - output_file, - preserve_whitespace = False): - """Read input files and open the output file and construct a new `io_manager` - object. - - If `preserve_whitespace` is `False`, leading and trailing whitespace is - stripped from each CSV cell. - - Raises - AssertionError : - If `type(preserve_whitespace) != bool`. - """ - assert type(preserve_whitespace) == bool - - self.preserve_whitespace = preserve_whitespace - - # Open baseline results. - self.baseline_input_file = open(baseline_input_file) - self.baseline_reader = csv_dict_reader( - filter_comments(self.baseline_input_file) - ) - - if not self.preserve_whitespace: - strip_list(self.baseline_reader.fieldnames) - - self.variable_names = list(self.baseline_reader.fieldnames) # Copy. - self.variable_units = self.baseline_reader.next() - - if not self.preserve_whitespace: - strip_dict(self.variable_units) - - # Open observed results. - self.observed_input_file = open(observed_input_file) - self.observed_reader = csv_dict_reader( - filter_comments(self.observed_input_file) - ) - - if not self.preserve_whitespace: - strip_list(self.observed_reader.fieldnames) - - # Make sure all inputs have the same variables schema. - assert self.variable_names == self.observed_reader.fieldnames, \ - "Observed results input file (`" + observed_input_file + "`) " + \ - "variable schema `" + str(self.observed_reader.fieldnames) + "` does " + \ - "not match the baseline results input file (`" + baseline_input_file + \ - "`) variable schema `" + str(self.variable_names) + "`." - - # Consume the next row, which should be the second line of the header. - observed_variable_units = self.observed_reader.next() - - if not self.preserve_whitespace: - strip_dict(observed_variable_units) - - # Make sure all inputs have the same units schema. - assert self.variable_units == observed_variable_units, \ - "Observed results input file (`" + observed_input_file + "`) " + \ - "units schema `" + str(observed_variable_units) + "` does not " + \ - "match the baseline results input file (`" + baseline_input_file + \ - "`) units schema `" + str(self.variable_units) + "`." - - if output_file == "-": # Output to stdout. - self.output_file = stdout - else: # Output to user-specified file. - self.output_file = open(output_file, "w") - - self.writer = csv_dict_writer( - self.output_file, fieldnames = self.variable_names - ) - - def __enter__(self): - """Called upon entering a `with` statement.""" - return self - - def __exit__(self, *args): - """Called upon exiting a `with` statement.""" - if self.output_file is stdout: - self.output_file = None - elif self.output_file is not None: - self.output_file.__exit__(*args) - - self.baseline_input_file.__exit__(*args) - self.observed_input_file.__exit__(*args) - - def append_variable(self, name, units): - """Add a new variable to the output schema.""" - self.variable_names.append(name) - self.variable_units.update({name : units}) - - # Update CSV writer field names. - self.writer.fieldnames = self.variable_names - - def insert_variable(self, idx, name, units): - """Insert a new variable into the output schema at index `idx`.""" - self.variable_names.insert(idx, name) - self.variable_units.update({name : units}) - - # Update CSV writer field names. - self.writer.fieldnames = self.variable_names - - def remove_variable(self, name): - """Remove variable from the output schema and return a tuple of the variable - index and the variable units. - - Raises: - ValueError : If `name` is not in the output schema. - """ - # Remove the variable and get its index, which we'll need to remove the - # corresponding units entry. - (idx, item) = remove_from_list(self.variable_names, name) - - # Remove the units entry. - units = self.variable_units.pop(item) - - # Update CSV writer field names. - self.writer.fieldnames = self.variable_names - - return (idx, units) - - ############################################################################# - # Input Stream. - - def baseline(self): - """Return an iterator to the baseline results input sequence.""" - return imap(lambda row: strip_dict(row), self.baseline_reader) - - def observed(self): - """Return an iterator to the observed results input sequence.""" - return imap(lambda row: strip_dict(row), self.observed_reader) - - ############################################################################# - # Output. - - def write_header(self): - """Write the header for the output CSV file.""" - # Write the first line of the header. - self.writer.writeheader() - - # Write the second line of the header. - self.writer.writerow(self.variable_units) - - def write(self, d): - """Write a record (a `dict`) to the output CSV file.""" - self.writer.writerow(d) - -############################################################################### - -class dependent_variable_parser(object): - """Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument.""" - - ############################################################################# - # Grammar - - # Parse a variable_name. - variable_name_rule = r'[^,]+' - - # Parse a variable classification. - dependent_variable_rule = r'(' + variable_name_rule + r')' \ - + r',' \ - + r'(' + variable_name_rule + r')' \ - + r',' \ - + r'(' + variable_name_rule + r')' - - engine = regex_compile(dependent_variable_rule) - - ############################################################################# - - def __call__(self, s): - """Parses the string `s` with the form "AVG,STDEV,TRIALS". - - Returns: - A `measured_variable`. - - Raises: - AssertionError : If parsing fails. - """ - - match = self.engine.match(s) - - assert match is not None, \ - "Dependent variable (-d) `" +s+ "` is invalid, the format is " + \ - "`AVG,STDEV,TRIALS`." - - return measured_variable(match.group(1), match.group(2), match.group(3)) - -############################################################################### - -class record_aggregator(object): - """Consumes and combines records and represents the result as an `Iterable` - sequence of `dict`s. - - It is `Iterable` and an `Iterator`. - - Attributes: - dependent_variables (`list` of `measured_variable`s) : - A list of dependent variables provided on the command line. - control_variables (`list` of `str`s) : - A list of control variables provided on the command line. - dataset (`dict`) : - A mapping of distinguishing (e.g. control + independent) values (`tuple`s - of variable-quantity pairs) to `list`s of dependent values (`dict`s from - variables to lists of cells). - in_order_dataset_keys : - A list of unique dataset keys (e.g. distinguishing variables) in order of - appearance. - """ - - def __init__(self, dependent_variables, control_variables): - """Construct a new `record_aggregator` object. - - Raises: - AssertionError : If parsing of dependent variables fails. - """ - self.dependent_variables = dependent_variables - self.control_variables = control_variables - - self.dataset = {} - - self.in_order_dataset_keys = deque() - - ############################################################################# - # Insertion. - - def key_from_dict(self, d): - """Create a hashable key from a `dict` by filtering out control variables - and then converting the `dict` to a tuple. - - Raises: - AssertionError : If any control variable was not found in `d`. - """ - distinguishing_values = d.copy() - - # Filter out control variables. - for var in self.control_variables: - distinguishing_values.pop(var, None) - - return key_from_dict(distinguishing_values) - - def append(self, record): - """Add `record` to the dataset. - - Raises: - ValueError : If any `str`-to-numeric conversions fail. - """ - # The distinguishing variables are the control and independent variables. - # They form the key for each record in the dataset. Records with the same - # distinguishing variables are treated as observations of the same - # datapoint. - dependent_values = {} - - # To allow the same sample size variable to be used for multiple dependent - # variables, we don't pop sample size variables until we're done processing - # all variables. - sample_size_variables = [] - - # Separate the dependent values from the distinguishing variables and - # perform `str`-to-numeric conversions. - for var in self.dependent_variables: - quantity, uncertainty, sample_size, units = var.as_tuple() - - dependent_values[quantity] = [int_or_float(record.pop(quantity))] - dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))] - dependent_values[sample_size] = [int(record[sample_size])] - - sample_size_variables.append(sample_size) - - # Pop sample size variables. - for var in sample_size_variables: - # Allowed to fail, as we may have duplicates. - record.pop(var, None) - - distinguishing_values = self.key_from_dict(record) - - if distinguishing_values in self.dataset: - # These distinguishing values already exist, so get the `dict` they're - # mapped to, look up each key in `dependent_values` in the `dict`, and - # add the corresponding quantity in `dependent_values` to the list in the - # the `dict`. - for var, columns in dependent_values.iteritems(): - self.dataset[distinguishing_values][var] += columns - else: - # These distinguishing values aren't in the dataset, so add them and - # record them in `in_order_dataset_keys`. - self.dataset[distinguishing_values] = dependent_values - self.in_order_dataset_keys.append(distinguishing_values) - - ############################################################################# - # Postprocessing. - - def combine_dependent_values(self, dependent_values): - """Takes a mapping of dependent variables to lists of cells and returns - a new mapping with the cells combined. - - Raises: - AssertionError : If class invariants were violated. - """ - combined_dependent_values = dependent_values.copy() - - for var in self.dependent_variables: - quantity, uncertainty, sample_size, units = var.as_tuple() - - quantities = dependent_values[quantity] - uncertainties = dependent_values[uncertainty] - sample_sizes = dependent_values[sample_size] - - if type(sample_size) is list: - # Sample size hasn't been combined yet. - assert len(quantities) == len(uncertainties) \ - and len(uncertainties) == len(sample_sizes), \ - "Length of quantities list `(" + str(len(quantities)) + ")`, " + \ - "length of uncertainties list `(" + str(len(uncertainties)) + \ - "),` and length of sample sizes list `(" + str(len(sample_sizes)) + \ - ")` are not the same." - else: - # Another dependent variable that uses our sample size has combined it - # already. - assert len(quantities) == len(uncertainties), \ - "Length of quantities list `(" + str(len(quantities)) + ")` and " + \ - "length of uncertainties list `(" + str(len(uncertainties)) + \ - ")` are not the same." - - # Convert the three separate `list`s into one list of `measured_value`s. - measured_values = [] - - for i in range(len(quantities)): - mv = measured_value( - quantities[i], uncertainties[i], sample_sizes[i], units - ) - - measured_values.append(mv) - - # Combine the `measured_value`s. - combined_sample_size = combine_sample_size( - measured_values - ) - - combined_arithmetic_mean = combine_arithmetic_mean( - measured_values, combined_sample_size - ) - - combined_sample_standard_deviation = combine_sample_standard_deviation( - measured_values, combined_sample_size, combined_arithmetic_mean - ) - - # Round the quantity and uncertainty to the significant digit of - # uncertainty and insert the combined values into the results. - sigdig = find_significant_digit(combined_sample_standard_deviation) - -# combined_arithmetic_mean = round_with_int_conversion( -# combined_arithmetic_mean, sigdig -# ) - -# combined_sample_standard_deviation = round_with_int_conversion( -# combined_sample_standard_deviation, sigdig -# ) - - combined_dependent_values[quantity] = combined_arithmetic_mean - combined_dependent_values[uncertainty] = combined_sample_standard_deviation - combined_dependent_values[sample_size] = combined_sample_size - - return combined_dependent_values - - ############################################################################# - # Output Stream. - - def __iter__(self): - """Return an iterator to the output sequence of separated distinguishing - variables and dependent variables (a tuple of two `dict`s). - - This is a requirement for the `Iterable` protocol. - """ - return self - - def records(self): - """Return an iterator to the output sequence of CSV rows (`dict`s of - variables to values). - """ - return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self) - - def next(self): - """Produce the components of the next output record - a tuple of two - `dict`s. The first `dict` is a mapping of distinguishing variables to - distinguishing values, the second `dict` is a mapping of dependent - variables to combined dependent values. Combining the two dicts forms a - CSV row suitable for output. - - This is a requirement for the `Iterator` protocol. - - Raises: - StopIteration : If there is no more output. - AssertionError : If class invariants were violated. - """ - assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \ - "Number of dataset keys (`" + str(len(self.dataset.keys())) + \ - "`) is not equal to the number of keys in the ordering list (`" + \ - str(len(self.in_order_dataset_keys)) + "`)." - - if len(self.in_order_dataset_keys) == 0: - raise StopIteration() - - # Get the next set of distinguishing values and convert them to a `dict`. - raw_distinguishing_values = self.in_order_dataset_keys.popleft() - distinguishing_values = dict(raw_distinguishing_values) - - dependent_values = self.dataset.pop(raw_distinguishing_values) - - combined_dependent_values = self.combine_dependent_values(dependent_values) - - return (distinguishing_values, combined_dependent_values) - - def __getitem__(self, distinguishing_values): - """Produce the dependent component, a `dict` mapping dependent variables to - combined dependent values, associated with `distinguishing_values`. - - Args: - distinguishing_values (`dict`) : - A `dict` mapping distinguishing variables to distinguishing values. - - Raises: - KeyError : If `distinguishing_values` is not in the dataset. - """ - raw_distinguishing_values = self.key_from_dict(distinguishing_values) - - dependent_values = self.dataset[raw_distinguishing_values] - - combined_dependent_values = self.combine_dependent_values(dependent_values) - - return combined_dependent_values - -############################################################################### - -args = process_program_arguments() - -if len(args.dependent_variables) == 0: - args.dependent_variables = [ - "STL Average Walltime,STL Walltime Uncertainty,STL Trials", - "STL Average Throughput,STL Throughput Uncertainty,STL Trials", - "Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials", - "Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials" - ] - -# Parse dependent variable options. -dependent_variables = [] - -parse_dependent_variable = dependent_variable_parser() - -#if args.dependent_variables is not None: -for var in args.dependent_variables: - dependent_variables.append(parse_dependent_variable(var)) - -# Read input files and open the output file. -with io_manager(args.baseline_input_file, - args.observed_input_file, - args.output_file, - args.preserve_whitespace) as iom: - - # Create record aggregators. - baseline_ra = record_aggregator(dependent_variables, args.control_variables) - observed_ra = record_aggregator(dependent_variables, args.control_variables) - - # Duplicate dependent variables: one for baseline results, one for observed - # results. - baseline_suffix = " - `{0}`".format( - args.baseline_input_file - ) - observed_suffix = " - `{0}`".format( - args.observed_input_file - ) - - for var in dependent_variables: - # Remove the existing quantity variable: - # - # [ ..., a, b, c, ... ] - # ^- remove b at index i - # - (quantity_idx, quantity_units) = iom.remove_variable(var.quantity) - - # If the `--output-all-variables` option was specified, add the new baseline - # and observed quantity variables. Note that we insert in the reverse of - # the order we desire (which is baseline then observed): - # - # [ ..., a, b_1, c, ... ] - # ^- insert b_1 at index i - # - # [ ..., a, b_0, b_1, c, ... ] - # ^- insert b_0 at index i - # - if args.output_all_variables: - iom.insert_variable( - quantity_idx, var.quantity + observed_suffix, quantity_units - ) - iom.insert_variable( - quantity_idx, var.quantity + baseline_suffix, quantity_units - ) - - # Remove the existing uncertainty variable. - (uncertainty_idx, uncertainty_units) = iom.remove_variable(var.uncertainty) - - # If the `--output-all-variables` option was specified, add the new baseline - # and observed uncertainty variables. - if args.output_all_variables: - iom.insert_variable( - uncertainty_idx, var.uncertainty + observed_suffix, uncertainty_units - ) - iom.insert_variable( - uncertainty_idx, var.uncertainty + baseline_suffix, uncertainty_units - ) - - try: - # Remove the existing sample size variable. - (sample_size_idx, sample_size_units) = iom.remove_variable(var.sample_size) - - # If the `--output-all-variables` option was specified, add the new - # baseline and observed sample size variables. - if args.output_all_variables: - iom.insert_variable( - sample_size_idx, var.sample_size + observed_suffix, sample_size_units - ) - iom.insert_variable( - sample_size_idx, var.sample_size + baseline_suffix, sample_size_units - ) - except ValueError: - # This is alright, because dependent variables may share the same sample - # size variable. - pass - - for var in args.control_variables: - iom.remove_variable(var) - - # Add change variables. - absolute_change_suffix = " - Change (`{0}` - `{1}`)".format( - args.observed_input_file, args.baseline_input_file - ) - - percent_change_suffix = " - % Change (`{0}` to `{1}`)".format( - args.observed_input_file, args.baseline_input_file - ) - - for var in dependent_variables: - iom.append_variable(var.quantity + absolute_change_suffix, var.units) - iom.append_variable(var.uncertainty + absolute_change_suffix, var.units) - iom.append_variable(var.quantity + percent_change_suffix, "") - iom.append_variable(var.uncertainty + percent_change_suffix, "") - - # Add all baseline input data to the `record_aggregator`. - for record in iom.baseline(): - baseline_ra.append(record) - - for record in iom.observed(): - observed_ra.append(record) - - iom.write_header() - - # Compare and output results. - for distinguishing_values, observed_dependent_values in observed_ra: - try: - baseline_dependent_values = baseline_ra[distinguishing_values] - except KeyError: - assert False, \ - "Distinguishing value `" + \ - str(baseline_ra.key_from_dict(distinguishing_values)) + \ - "` was not found in the baseline results." - - statistically_significant_change = False - - record = distinguishing_values.copy() - - # Compute changes, add the values and changes to the record, and identify - # changes that are statistically significant. - for var in dependent_variables: - # Compute changes. - baseline_quantity = baseline_dependent_values[var.quantity] - baseline_uncertainty = baseline_dependent_values[var.uncertainty] - baseline_sample_size = baseline_dependent_values[var.sample_size] - - observed_quantity = observed_dependent_values[var.quantity] - observed_uncertainty = observed_dependent_values[var.uncertainty] - observed_sample_size = observed_dependent_values[var.sample_size] - - (abs_change, abs_change_unc, per_change, per_change_unc) = \ - percent_change_uncertainty( - baseline_quantity, baseline_uncertainty, - observed_quantity, observed_uncertainty - ) - - # Round the change quantities and uncertainties to the significant digit - # of uncertainty. - try: - abs_change_sigdig = max( - find_significant_digit(abs_change), - find_significant_digit(abs_change_unc), - ) - -# abs_change = round_with_int_conversion( -# abs_change, abs_change_sigdig -# ) -# abs_change_unc = round_with_int_conversion( -# abs_change_unc, abs_change_sigdig -# ) - except: - # Any value errors should be due to NaNs returned by - # `percent_change_uncertainty` because quantities or change in - # quantities was 0. We can ignore these. - pass - - try: - per_change_sigdig = max( - find_significant_digit(per_change), - find_significant_digit(per_change_unc) - ) - -# per_change = round_with_int_conversion( -# per_change, per_change_sigdig -# ) -# per_change_unc = round_with_int_conversion( -# per_change_unc, per_change_sigdig -# ) - except: - # Any value errors should be due to NaNs returned by - # `percent_change_uncertainty` because quantities or change in - # quantities was 0. We can ignore these. - pass - - # Add the values (if the `--output-all-variables` option was specified) - # and the changes to the record. Note that the record's schema is - # different from the original schema. If multiple dependent variables - # share the same sample size variable, it's fine - they will overwrite - # each other, but with the same value. - if args.output_all_variables: - record[var.quantity + baseline_suffix] = baseline_quantity - record[var.uncertainty + baseline_suffix] = baseline_uncertainty - record[var.sample_size + baseline_suffix] = baseline_sample_size - record[var.quantity + observed_suffix] = observed_quantity - record[var.uncertainty + observed_suffix] = observed_uncertainty - record[var.sample_size + observed_suffix] = observed_sample_size - - record[var.quantity + absolute_change_suffix] = abs_change - record[var.uncertainty + absolute_change_suffix] = abs_change_unc - record[var.quantity + percent_change_suffix] = per_change - record[var.uncertainty + percent_change_suffix] = per_change_unc - - # If the range of uncertainties overlap don't overlap and the percentage - # change is greater than the change threshold, then change is - # statistically significant. - overlap = ranges_overlap_uncertainty( - baseline_quantity, baseline_uncertainty, - observed_quantity, observed_uncertainty - ) - if not overlap and per_change >= args.change_threshold: - statistically_significant_change = True - - # Print the record if a statistically significant change was found or if the - # `--output-all-datapoints` option was specified. - if args.output_all_datapoints or statistically_significant_change: - iom.write(record) - diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/minmax.h b/spaces/CVPR/LIVE/thrust/thrust/detail/minmax.h deleted file mode 100644 index f59c649629006e606c8b293a2301ab19bff2d7a8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/minmax.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -namespace thrust -{ - - -template -__host__ __device__ - T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp) -{ - return comp(rhs, lhs) ? rhs : lhs; -} // end min() - -template -__host__ __device__ - T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs) -{ - return rhs < lhs ? rhs : lhs; -} // end min() - -template -__host__ __device__ - T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp) -{ - return comp(lhs,rhs) ? rhs : lhs; -} // end max() - -template -__host__ __device__ - T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs) -{ - return lhs < rhs ? rhs : lhs; -} // end max() - - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/for_each.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/for_each.h deleted file mode 100644 index 750b7e829b58f26c8cdd2433cac26817afbad6d4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/for_each.h +++ /dev/null @@ -1,159 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -// TODO: Move into system::cuda - -#pragma once - -#include -#include - -#if THRUST_CPP_DIALECT >= 2014 - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC - -#include - -#include -#include -#include -#include -#include - -#include - -namespace thrust -{ - -namespace system { namespace cuda { namespace detail -{ - -template -struct async_for_each_fn -{ - ForwardIt first; - UnaryFunction f; - - __host__ __device__ - async_for_each_fn(ForwardIt&& first_, UnaryFunction&& f_) - : first(std::move(first_)), f(std::move(f_)) - {} - - template - __host__ __device__ - void operator()(Index idx) - { - f(thrust::raw_reference_cast(first[idx])); - } -}; - -template < - typename DerivedPolicy -, typename ForwardIt, typename Size, typename UnaryFunction -> -auto async_for_each_n( - execution_policy& policy, - ForwardIt first, - Size n, - UnaryFunction func -) -> unique_eager_event -{ - unique_eager_event e; - - // Set up stream with dependencies. - - cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); - - if (thrust::cuda_cub::default_stream() != user_raw_stream) - { - e = make_dependent_event( - std::tuple_cat( - std::make_tuple( - unique_stream(nonowning, user_raw_stream) - ) - , extract_dependencies( - std::move(thrust::detail::derived_cast(policy)) - ) - ) - ); - } - else - { - e = make_dependent_event( - extract_dependencies( - std::move(thrust::detail::derived_cast(policy)) - ) - ); - } - - // Run for_each. - - async_for_each_fn wrapped( - std::move(first), std::move(func) - ); - - thrust::cuda_cub::throw_on_error( - thrust::cuda_cub::__parallel_for::parallel_for( - n, std::move(wrapped), e.stream().native_handle() - ) - , "after for_each launch" - ); - - return e; -} - -}}} // namespace system::cuda::detail - -namespace cuda_cub -{ - -// ADL entry point. -template < - typename DerivedPolicy -, typename ForwardIt, typename Sentinel, typename UnaryFunction -> -auto async_for_each( - execution_policy& policy, - ForwardIt first, - Sentinel last, - UnaryFunction&& func -) -THRUST_RETURNS( - thrust::system::cuda::detail::async_for_each_n( - policy, first, distance(first, last), THRUST_FWD(func) - ) -); - -} // cuda_cub - -} // end namespace thrust - -#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC - -#endif - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sort.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sort.h deleted file mode 100644 index 1f6118c90bf6345a1fd4d6eb2f05d2630911fa64..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sort.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// the purpose of this header is to #include the sort.h header -// of the sequential, host, and device systems. It should be #included in any -// code which uses adl to dispatch sort - -#include - -// SCons can't see through the #defines below to figure out what this header -// includes, so we fake it out by specifying all possible files we might end up -// including inside an #if 0. -#if 0 -#include -#include -#include -#include -#endif - -#define __THRUST_HOST_SYSTEM_SORT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/sort.h> -#include __THRUST_HOST_SYSTEM_SORT_HEADER -#undef __THRUST_HOST_SYSTEM_SORT_HEADER - -#define __THRUST_DEVICE_SYSTEM_SORT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/sort.h> -#include __THRUST_DEVICE_SYSTEM_SORT_HEADER -#undef __THRUST_DEVICE_SYSTEM_SORT_HEADER - diff --git a/spaces/CVPR/LIVE/thrust/thrust/type_traits/is_execution_policy.h b/spaces/CVPR/LIVE/thrust/thrust/type_traits/is_execution_policy.h deleted file mode 100644 index 3f2f7ef80a702cacce5d544f4e7d58691a3b3d92..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/type_traits/is_execution_policy.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -#include -#include - -namespace thrust -{ - -/// Unary metafunction that is \c true if \c T is an \a ExecutionPolicy and -/// \c false otherwise. -template -#if THRUST_CPP_DIALECT >= 2011 -using is_execution_policy = -#else -struct is_execution_policy : -#endif - detail::is_base_of -#if THRUST_CPP_DIALECT < 2011 -{} -#endif -; - -/// constexpr bool that is \c true if \c T is an \a ExecutionPolicy -/// and \c false otherwise. -#if THRUST_CPP_DIALECT >= 2014 -template -constexpr bool is_execution_policy_v = is_execution_policy::value; -#endif - -} // end namespace thrust - - diff --git a/spaces/CVPR/Text2Human/Text2Human/train_vqvae.py b/spaces/CVPR/Text2Human/Text2Human/train_vqvae.py deleted file mode 100644 index 107702af553e9acb6281586b447279006b304e24..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/train_vqvae.py +++ /dev/null @@ -1,132 +0,0 @@ -import argparse -import logging -import os -import os.path as osp -import random -import time - -import torch - -from data.segm_attr_dataset import DeepFashionAttrSegmDataset -from models import create_model -from utils.logger import MessageLogger, get_root_logger, init_tb_logger -from utils.options import dict2str, dict_to_nonedict, parse -from utils.util import make_exp_dirs - - -def main(): - # options - parser = argparse.ArgumentParser() - parser.add_argument('-opt', type=str, help='Path to option YAML file.') - args = parser.parse_args() - opt = parse(args.opt, is_train=True) - - # mkdir and loggers - make_exp_dirs(opt) - log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log") - logger = get_root_logger( - logger_name='base', log_level=logging.INFO, log_file=log_file) - logger.info(dict2str(opt)) - # initialize tensorboard logger - tb_logger = None - if opt['use_tb_logger'] and 'debug' not in opt['name']: - tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name']) - - # convert to NoneDict, which returns None for missing keys - opt = dict_to_nonedict(opt) - - # set up data loader - train_dataset = DeepFashionAttrSegmDataset( - img_dir=opt['train_img_dir'], - segm_dir=opt['segm_dir'], - pose_dir=opt['pose_dir'], - ann_dir=opt['train_ann_file'], - xflip=True) - train_loader = torch.utils.data.DataLoader( - dataset=train_dataset, - batch_size=opt['batch_size'], - shuffle=True, - num_workers=opt['num_workers'], - persistent_workers=True, - drop_last=True) - logger.info(f'Number of train set: {len(train_dataset)}.') - opt['max_iters'] = opt['num_epochs'] * len( - train_dataset) // opt['batch_size'] - - val_dataset = DeepFashionAttrSegmDataset( - img_dir=opt['train_img_dir'], - segm_dir=opt['segm_dir'], - pose_dir=opt['pose_dir'], - ann_dir=opt['val_ann_file']) - val_loader = torch.utils.data.DataLoader( - dataset=val_dataset, batch_size=1, shuffle=False) - logger.info(f'Number of val set: {len(val_dataset)}.') - - test_dataset = DeepFashionAttrSegmDataset( - img_dir=opt['test_img_dir'], - segm_dir=opt['segm_dir'], - pose_dir=opt['pose_dir'], - ann_dir=opt['test_ann_file']) - test_loader = torch.utils.data.DataLoader( - dataset=test_dataset, batch_size=1, shuffle=False) - logger.info(f'Number of test set: {len(test_dataset)}.') - - current_iter = 0 - best_epoch = None - best_loss = 100000 - - model = create_model(opt) - - data_time, iter_time = 0, 0 - current_iter = 0 - - # create message logger (formatted outputs) - msg_logger = MessageLogger(opt, current_iter, tb_logger) - - for epoch in range(opt['num_epochs']): - lr = model.update_learning_rate(epoch) - - for _, batch_data in enumerate(train_loader): - data_time = time.time() - data_time - - current_iter += 1 - - model.optimize_parameters(batch_data, current_iter) - - iter_time = time.time() - iter_time - if current_iter % opt['print_freq'] == 0: - log_vars = {'epoch': epoch, 'iter': current_iter} - log_vars.update({'lrs': [lr]}) - log_vars.update({'time': iter_time, 'data_time': data_time}) - log_vars.update(model.get_current_log()) - msg_logger(log_vars) - - data_time = time.time() - iter_time = time.time() - - if epoch % opt['val_freq'] == 0: - save_dir = f'{opt["path"]["visualization"]}/valset/epoch_{epoch:03d}' # noqa - os.makedirs(save_dir, exist_ok=opt['debug']) - val_loss_total = model.inference(val_loader, save_dir) - - save_dir = f'{opt["path"]["visualization"]}/testset/epoch_{epoch:03d}' # noqa - os.makedirs(save_dir, exist_ok=opt['debug']) - test_loss_total = model.inference(test_loader, save_dir) - - logger.info(f'Epoch: {epoch}, ' - f'val_loss_total: {val_loss_total}, ' - f'test_loss_total: {test_loss_total}.') - - if test_loss_total < best_loss: - best_epoch = epoch - best_loss = test_loss_total - - logger.info(f'Best epoch: {best_epoch}, ' - f'Best test loss: {best_loss: .4f}.') - - # save model - model.save_network(f'{opt["path"]["models"]}/epoch{epoch}.pth') - - -if __name__ == '__main__': - main() diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/spaces/CVPR/WALT/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py deleted file mode 100644 index f275e430d1b57c4d9df57387b8f3ae6f0ff68cf1..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +++ /dev/null @@ -1,157 +0,0 @@ -import numpy as np -import torch - -from ..builder import BBOX_SAMPLERS -from .random_sampler import RandomSampler - - -@BBOX_SAMPLERS.register_module() -class IoUBalancedNegSampler(RandomSampler): - """IoU Balanced Sampling. - - arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) - - Sampling proposals according to their IoU. `floor_fraction` of needed RoIs - are sampled from proposals whose IoU are lower than `floor_thr` randomly. - The others are sampled from proposals whose IoU are higher than - `floor_thr`. These proposals are sampled from some bins evenly, which are - split by `num_bins` via IoU evenly. - - Args: - num (int): number of proposals. - pos_fraction (float): fraction of positive proposals. - floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, - set to -1 if all using IoU balanced sampling. - floor_fraction (float): sampling fraction of proposals under floor_thr. - num_bins (int): number of bins in IoU balanced sampling. - """ - - def __init__(self, - num, - pos_fraction, - floor_thr=-1, - floor_fraction=0, - num_bins=3, - **kwargs): - super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, - **kwargs) - assert floor_thr >= 0 or floor_thr == -1 - assert 0 <= floor_fraction <= 1 - assert num_bins >= 1 - - self.floor_thr = floor_thr - self.floor_fraction = floor_fraction - self.num_bins = num_bins - - def sample_via_interval(self, max_overlaps, full_set, num_expected): - """Sample according to the iou interval. - - Args: - max_overlaps (torch.Tensor): IoU between bounding boxes and ground - truth boxes. - full_set (set(int)): A full set of indices of boxes。 - num_expected (int): Number of expected samples。 - - Returns: - np.ndarray: Indices of samples - """ - max_iou = max_overlaps.max() - iou_interval = (max_iou - self.floor_thr) / self.num_bins - per_num_expected = int(num_expected / self.num_bins) - - sampled_inds = [] - for i in range(self.num_bins): - start_iou = self.floor_thr + i * iou_interval - end_iou = self.floor_thr + (i + 1) * iou_interval - tmp_set = set( - np.where( - np.logical_and(max_overlaps >= start_iou, - max_overlaps < end_iou))[0]) - tmp_inds = list(tmp_set & full_set) - if len(tmp_inds) > per_num_expected: - tmp_sampled_set = self.random_choice(tmp_inds, - per_num_expected) - else: - tmp_sampled_set = np.array(tmp_inds, dtype=np.int) - sampled_inds.append(tmp_sampled_set) - - sampled_inds = np.concatenate(sampled_inds) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array(list(full_set - set(sampled_inds))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - sampled_inds = np.concatenate([sampled_inds, extra_inds]) - - return sampled_inds - - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Sample negative boxes. - - Args: - assign_result (:obj:`AssignResult`): The assigned results of boxes. - num_expected (int): The number of expected negative samples - - Returns: - Tensor or ndarray: sampled indices. - """ - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - max_overlaps = assign_result.max_overlaps.cpu().numpy() - # balance sampling for negative samples - neg_set = set(neg_inds.cpu().numpy()) - - if self.floor_thr > 0: - floor_set = set( - np.where( - np.logical_and(max_overlaps >= 0, - max_overlaps < self.floor_thr))[0]) - iou_sampling_set = set( - np.where(max_overlaps >= self.floor_thr)[0]) - elif self.floor_thr == 0: - floor_set = set(np.where(max_overlaps == 0)[0]) - iou_sampling_set = set( - np.where(max_overlaps > self.floor_thr)[0]) - else: - floor_set = set() - iou_sampling_set = set( - np.where(max_overlaps > self.floor_thr)[0]) - # for sampling interval calculation - self.floor_thr = 0 - - floor_neg_inds = list(floor_set & neg_set) - iou_sampling_neg_inds = list(iou_sampling_set & neg_set) - num_expected_iou_sampling = int(num_expected * - (1 - self.floor_fraction)) - if len(iou_sampling_neg_inds) > num_expected_iou_sampling: - if self.num_bins >= 2: - iou_sampled_inds = self.sample_via_interval( - max_overlaps, set(iou_sampling_neg_inds), - num_expected_iou_sampling) - else: - iou_sampled_inds = self.random_choice( - iou_sampling_neg_inds, num_expected_iou_sampling) - else: - iou_sampled_inds = np.array( - iou_sampling_neg_inds, dtype=np.int) - num_expected_floor = num_expected - len(iou_sampled_inds) - if len(floor_neg_inds) > num_expected_floor: - sampled_floor_inds = self.random_choice( - floor_neg_inds, num_expected_floor) - else: - sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) - sampled_inds = np.concatenate( - (sampled_floor_inds, iou_sampled_inds)) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array(list(neg_set - set(sampled_inds))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - sampled_inds = np.concatenate((sampled_inds, extra_inds)) - sampled_inds = torch.from_numpy(sampled_inds).long().to( - assign_result.gt_inds.device) - return sampled_inds diff --git a/spaces/CVPR/regionclip-demo/detectron2/evaluation/panoptic_evaluation.py b/spaces/CVPR/regionclip-demo/detectron2/evaluation/panoptic_evaluation.py deleted file mode 100644 index 9fb3462b7f9abf6feaa499976bfed526ebd17e31..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/evaluation/panoptic_evaluation.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import io -import itertools -import json -import logging -import numpy as np -import os -import tempfile -from collections import OrderedDict -from typing import Optional -from PIL import Image -from tabulate import tabulate - -from detectron2.data import MetadataCatalog -from detectron2.utils import comm -from detectron2.utils.file_io import PathManager - -from .evaluator import DatasetEvaluator - -logger = logging.getLogger(__name__) - - -class COCOPanopticEvaluator(DatasetEvaluator): - """ - Evaluate Panoptic Quality metrics on COCO using PanopticAPI. - It saves panoptic segmentation prediction in `output_dir` - - It contains a synchronize call and has to be called from all workers. - """ - - def __init__(self, dataset_name: str, output_dir: Optional[str] = None): - """ - Args: - dataset_name: name of the dataset - output_dir: output directory to save results for evaluation. - """ - self._metadata = MetadataCatalog.get(dataset_name) - self._thing_contiguous_id_to_dataset_id = { - v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() - } - self._stuff_contiguous_id_to_dataset_id = { - v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() - } - - self._output_dir = output_dir - if self._output_dir is not None: - PathManager.mkdirs(self._output_dir) - - def reset(self): - self._predictions = [] - - def _convert_category_id(self, segment_info): - isthing = segment_info.pop("isthing", None) - if isthing is None: - # the model produces panoptic category id directly. No more conversion needed - return segment_info - if isthing is True: - segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ - segment_info["category_id"] - ] - else: - segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ - segment_info["category_id"] - ] - return segment_info - - def process(self, inputs, outputs): - from panopticapi.utils import id2rgb - - for input, output in zip(inputs, outputs): - panoptic_img, segments_info = output["panoptic_seg"] - panoptic_img = panoptic_img.cpu().numpy() - if segments_info is None: - # If "segments_info" is None, we assume "panoptic_img" is a - # H*W int32 image storing the panoptic_id in the format of - # category_id * label_divisor + instance_id. We reserve -1 for - # VOID label, and add 1 to panoptic_img since the official - # evaluation script uses 0 for VOID label. - label_divisor = self._metadata.label_divisor - segments_info = [] - for panoptic_label in np.unique(panoptic_img): - if panoptic_label == -1: - # VOID region. - continue - pred_class = panoptic_label // label_divisor - isthing = ( - pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values() - ) - segments_info.append( - { - "id": int(panoptic_label) + 1, - "category_id": int(pred_class), - "isthing": bool(isthing), - } - ) - # Official evaluation script uses 0 for VOID label. - panoptic_img += 1 - - file_name = os.path.basename(input["file_name"]) - file_name_png = os.path.splitext(file_name)[0] + ".png" - with io.BytesIO() as out: - Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") - segments_info = [self._convert_category_id(x) for x in segments_info] - self._predictions.append( - { - "image_id": input["image_id"], - "file_name": file_name_png, - "png_string": out.getvalue(), - "segments_info": segments_info, - } - ) - - def evaluate(self): - comm.synchronize() - - self._predictions = comm.gather(self._predictions) - self._predictions = list(itertools.chain(*self._predictions)) - if not comm.is_main_process(): - return - - # PanopticApi requires local files - gt_json = PathManager.get_local_path(self._metadata.panoptic_json) - gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) - - with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: - logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) - for p in self._predictions: - with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: - f.write(p.pop("png_string")) - - with open(gt_json, "r") as f: - json_data = json.load(f) - json_data["annotations"] = self._predictions - - output_dir = self._output_dir or pred_dir - predictions_json = os.path.join(output_dir, "predictions.json") - with PathManager.open(predictions_json, "w") as f: - f.write(json.dumps(json_data)) - - from panopticapi.evaluation import pq_compute - - with contextlib.redirect_stdout(io.StringIO()): - pq_res = pq_compute( - gt_json, - PathManager.get_local_path(predictions_json), - gt_folder=gt_folder, - pred_folder=pred_dir, - ) - - res = {} - res["PQ"] = 100 * pq_res["All"]["pq"] - res["SQ"] = 100 * pq_res["All"]["sq"] - res["RQ"] = 100 * pq_res["All"]["rq"] - res["PQ_th"] = 100 * pq_res["Things"]["pq"] - res["SQ_th"] = 100 * pq_res["Things"]["sq"] - res["RQ_th"] = 100 * pq_res["Things"]["rq"] - res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] - res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] - res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] - - results = OrderedDict({"panoptic_seg": res}) - _print_panoptic_results(pq_res) - - return results - - -def _print_panoptic_results(pq_res): - headers = ["", "PQ", "SQ", "RQ", "#categories"] - data = [] - for name in ["All", "Things", "Stuff"]: - row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] - data.append(row) - table = tabulate( - data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" - ) - logger.info("Panoptic Evaluation Results:\n" + table) - - -if __name__ == "__main__": - from detectron2.utils.logger import setup_logger - - logger = setup_logger() - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("--gt-json") - parser.add_argument("--gt-dir") - parser.add_argument("--pred-json") - parser.add_argument("--pred-dir") - args = parser.parse_args() - - from panopticapi.evaluation import pq_compute - - with contextlib.redirect_stdout(io.StringIO()): - pq_res = pq_compute( - args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir - ) - _print_panoptic_results(pq_res) diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/regnet.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/regnet.py deleted file mode 100644 index 3533d63385d1324cfc1559eae9576b3fa52585af..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/regnet.py +++ /dev/null @@ -1,452 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Implementation of RegNet models from :paper:`dds` and :paper:`scaling`. - -This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications. -Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify -model loading. -""" - -import numpy as np -from torch import nn - -from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm - -from .backbone import Backbone - -__all__ = [ - "AnyNet", - "RegNet", - "ResStem", - "SimpleStem", - "VanillaBlock", - "ResBasicBlock", - "ResBottleneckBlock", -] - - -def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): - """Helper for building a conv2d layer.""" - assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." - s, p, g, b = stride, (k - 1) // 2, groups, bias - return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b) - - -def gap2d(): - """Helper for building a global average pooling layer.""" - return nn.AdaptiveAvgPool2d((1, 1)) - - -def pool2d(k, *, stride=1): - """Helper for building a pool2d layer.""" - assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." - return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2) - - -def init_weights(m): - """Performs ResNet-style weight initialization.""" - if isinstance(m, nn.Conv2d): - # Note that there is no bias due to BN - fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1.0) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - m.weight.data.normal_(mean=0.0, std=0.01) - m.bias.data.zero_() - - -class ResStem(CNNBlockBase): - """ResNet stem for ImageNet: 7x7, BN, AF, MaxPool.""" - - def __init__(self, w_in, w_out, norm, activation_class): - super().__init__(w_in, w_out, 4) - self.conv = conv2d(w_in, w_out, 7, stride=2) - self.bn = get_norm(norm, w_out) - self.af = activation_class() - self.pool = pool2d(3, stride=2) - - def forward(self, x): - for layer in self.children(): - x = layer(x) - return x - - -class SimpleStem(CNNBlockBase): - """Simple stem for ImageNet: 3x3, BN, AF.""" - - def __init__(self, w_in, w_out, norm, activation_class): - super().__init__(w_in, w_out, 2) - self.conv = conv2d(w_in, w_out, 3, stride=2) - self.bn = get_norm(norm, w_out) - self.af = activation_class() - - def forward(self, x): - for layer in self.children(): - x = layer(x) - return x - - -class SE(nn.Module): - """Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.""" - - def __init__(self, w_in, w_se, activation_class): - super().__init__() - self.avg_pool = gap2d() - self.f_ex = nn.Sequential( - conv2d(w_in, w_se, 1, bias=True), - activation_class(), - conv2d(w_se, w_in, 1, bias=True), - nn.Sigmoid(), - ) - - def forward(self, x): - return x * self.f_ex(self.avg_pool(x)) - - -class VanillaBlock(CNNBlockBase): - """Vanilla block: [3x3 conv, BN, Relu] x2.""" - - def __init__(self, w_in, w_out, stride, norm, activation_class, _params): - super().__init__(w_in, w_out, stride) - self.a = conv2d(w_in, w_out, 3, stride=stride) - self.a_bn = get_norm(norm, w_out) - self.a_af = activation_class() - self.b = conv2d(w_out, w_out, 3) - self.b_bn = get_norm(norm, w_out) - self.b_af = activation_class() - - def forward(self, x): - for layer in self.children(): - x = layer(x) - return x - - -class BasicTransform(nn.Module): - """Basic transformation: [3x3 conv, BN, Relu] x2.""" - - def __init__(self, w_in, w_out, stride, norm, activation_class, _params): - super().__init__() - self.a = conv2d(w_in, w_out, 3, stride=stride) - self.a_bn = get_norm(norm, w_out) - self.a_af = activation_class() - self.b = conv2d(w_out, w_out, 3) - self.b_bn = get_norm(norm, w_out) - self.b_bn.final_bn = True - - def forward(self, x): - for layer in self.children(): - x = layer(x) - return x - - -class ResBasicBlock(CNNBlockBase): - """Residual basic block: x + f(x), f = basic transform.""" - - def __init__(self, w_in, w_out, stride, norm, activation_class, params): - super().__init__(w_in, w_out, stride) - self.proj, self.bn = None, None - if (w_in != w_out) or (stride != 1): - self.proj = conv2d(w_in, w_out, 1, stride=stride) - self.bn = get_norm(norm, w_out) - self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params) - self.af = activation_class() - - def forward(self, x): - x_p = self.bn(self.proj(x)) if self.proj else x - return self.af(x_p + self.f(x)) - - -class BottleneckTransform(nn.Module): - """Bottleneck transformation: 1x1, 3x3 [+SE], 1x1.""" - - def __init__(self, w_in, w_out, stride, norm, activation_class, params): - super().__init__() - w_b = int(round(w_out * params["bot_mul"])) - w_se = int(round(w_in * params["se_r"])) - groups = w_b // params["group_w"] - self.a = conv2d(w_in, w_b, 1) - self.a_bn = get_norm(norm, w_b) - self.a_af = activation_class() - self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups) - self.b_bn = get_norm(norm, w_b) - self.b_af = activation_class() - self.se = SE(w_b, w_se, activation_class) if w_se else None - self.c = conv2d(w_b, w_out, 1) - self.c_bn = get_norm(norm, w_out) - self.c_bn.final_bn = True - - def forward(self, x): - for layer in self.children(): - x = layer(x) - return x - - -class ResBottleneckBlock(CNNBlockBase): - """Residual bottleneck block: x + f(x), f = bottleneck transform.""" - - def __init__(self, w_in, w_out, stride, norm, activation_class, params): - super().__init__(w_in, w_out, stride) - self.proj, self.bn = None, None - if (w_in != w_out) or (stride != 1): - self.proj = conv2d(w_in, w_out, 1, stride=stride) - self.bn = get_norm(norm, w_out) - self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params) - self.af = activation_class() - - def forward(self, x): - x_p = self.bn(self.proj(x)) if self.proj else x - return self.af(x_p + self.f(x)) - - -class AnyStage(nn.Module): - """AnyNet stage (sequence of blocks w/ the same output shape).""" - - def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params): - super().__init__() - for i in range(d): - block = block_class(w_in, w_out, stride, norm, activation_class, params) - self.add_module("b{}".format(i + 1), block) - stride, w_in = 1, w_out - - def forward(self, x): - for block in self.children(): - x = block(x) - return x - - -class AnyNet(Backbone): - """AnyNet model. See :paper:`dds`.""" - - def __init__( - self, - *, - stem_class, - stem_width, - block_class, - depths, - widths, - group_widths, - strides, - bottleneck_ratios, - se_ratio, - activation_class, - freeze_at=0, - norm="BN", - out_features=None, - ): - """ - Args: - stem_class (callable): A callable taking 4 arguments (channels in, channels out, - normalization, callable returning an activation function) that returns another - callable implementing the stem module. - stem_width (int): The number of output channels that the stem produces. - block_class (callable): A callable taking 6 arguments (channels in, channels out, - stride, normalization, callable returning an activation function, a dict of - block-specific parameters) that returns another callable implementing the repeated - block module. - depths (list[int]): Number of blocks in each stage. - widths (list[int]): For each stage, the number of output channels of each block. - group_widths (list[int]): For each stage, the number of channels per group in group - convolution, if the block uses group convolution. - strides (list[int]): The stride that each network stage applies to its input. - bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck - channels to the number of block input channels (or, equivalently, output channels), - if the block uses a bottleneck. - se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation - (SE) module to it number of input channels, if SE the block uses SE. - activation_class (callable): A callable taking no arguments that returns another - callable implementing an activation function. - freeze_at (int): The number of stages at the beginning to freeze. - see :meth:`freeze` for detailed explanation. - norm (str or callable): normalization for all conv layers. - See :func:`layers.get_norm` for supported format. - out_features (list[str]): name of the layers whose outputs should - be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after - the stem. If None, will return the output of the last layer. - """ - super().__init__() - self.stem = stem_class(3, stem_width, norm, activation_class) - - current_stride = self.stem.stride - self._out_feature_strides = {"stem": current_stride} - self._out_feature_channels = {"stem": self.stem.out_channels} - self.stages_and_names = [] - prev_w = stem_width - - for i, (d, w, s, b, g) in enumerate( - zip(depths, widths, strides, bottleneck_ratios, group_widths) - ): - params = {"bot_mul": b, "group_w": g, "se_r": se_ratio} - stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params) - name = "s{}".format(i + 1) - self.add_module(name, stage) - self.stages_and_names.append((stage, name)) - self._out_feature_strides[name] = current_stride = int( - current_stride * np.prod([k.stride for k in stage.children()]) - ) - self._out_feature_channels[name] = list(stage.children())[-1].out_channels - prev_w = w - - self.apply(init_weights) - - if out_features is None: - out_features = [name] - self._out_features = out_features - assert len(self._out_features) - children = [x[0] for x in self.named_children()] - for out_feature in self._out_features: - assert out_feature in children, "Available children: {} does not include {}".format( - ", ".join(children), out_feature - ) - self.freeze(freeze_at) - - def forward(self, x): - """ - Args: - x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. - - Returns: - dict[str->Tensor]: names and the corresponding features - """ - assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!" - outputs = {} - x = self.stem(x) - if "stem" in self._out_features: - outputs["stem"] = x - for stage, name in self.stages_and_names: - x = stage(x) - if name in self._out_features: - outputs[name] = x - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - def freeze(self, freeze_at=0): - """ - Freeze the first several stages of the model. Commonly used in fine-tuning. - - Layers that produce the same feature map spatial size are defined as one - "stage" by :paper:`FPN`. - - Args: - freeze_at (int): number of stages to freeze. - `1` means freezing the stem. `2` means freezing the stem and - one residual stage, etc. - - Returns: - nn.Module: this model itself - """ - if freeze_at >= 1: - self.stem.freeze() - for idx, (stage, _) in enumerate(self.stages_and_names, start=2): - if freeze_at >= idx: - for block in stage.children(): - block.freeze() - return self - - -def adjust_block_compatibility(ws, bs, gs): - """Adjusts the compatibility of widths, bottlenecks, and groups.""" - assert len(ws) == len(bs) == len(gs) - assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) - vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] - gs = [int(min(g, v)) for g, v in zip(gs, vs)] - ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] - vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] - ws = [int(v / b) for v, b in zip(vs, bs)] - assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) - return ws, bs, gs - - -def generate_regnet_parameters(w_a, w_0, w_m, d, q=8): - """Generates per stage widths and depths from RegNet parameters.""" - assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 - # Generate continuous per-block ws - ws_cont = np.arange(d) * w_a + w_0 - # Generate quantized per-block ws - ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) - ws_all = w_0 * np.power(w_m, ks) - ws_all = np.round(np.divide(ws_all, q)).astype(int) * q - # Generate per stage ws and ds (assumes ws_all are sorted) - ws, ds = np.unique(ws_all, return_counts=True) - # Compute number of actual stages and total possible stages - num_stages, total_stages = len(ws), ks.max() + 1 - # Convert numpy arrays to lists and return - ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont)) - return ws, ds, num_stages, total_stages, ws_all, ws_cont - - -class RegNet(AnyNet): - """RegNet model. See :paper:`dds`.""" - - def __init__( - self, - *, - stem_class, - stem_width, - block_class, - depth, - w_a, - w_0, - w_m, - group_width, - stride=2, - bottleneck_ratio=1.0, - se_ratio=0.0, - activation_class=None, - freeze_at=0, - norm="BN", - out_features=None, - ): - """ - Build a RegNet from the parameterization described in :paper:`dds` Section 3.3. - - Args: - See :class:`AnyNet` for arguments that are not listed here. - depth (int): Total number of blocks in the RegNet. - w_a (float): Factor by which block width would increase prior to quantizing block widths - by stage. See :paper:`dds` Section 3.3. - w_0 (int): Initial block width. See :paper:`dds` Section 3.3. - w_m (float): Parameter controlling block width quantization. - See :paper:`dds` Section 3.3. - group_width (int): Number of channels per group in group convolution, if the block uses - group convolution. - bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number - of block input channels (or, equivalently, output channels), if the block uses a - bottleneck. - stride (int): The stride that each network stage applies to its input. - """ - ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2] - ss = [stride for _ in ws] - bs = [bottleneck_ratio for _ in ws] - gs = [group_width for _ in ws] - ws, bs, gs = adjust_block_compatibility(ws, bs, gs) - - def default_activation_class(): - return nn.ReLU(inplace=True) - - super().__init__( - stem_class=stem_class, - stem_width=stem_width, - block_class=block_class, - depths=ds, - widths=ws, - strides=ss, - group_widths=gs, - bottleneck_ratios=bs, - se_ratio=se_ratio, - activation_class=default_activation_class - if activation_class is None - else activation_class, - freeze_at=freeze_at, - norm=norm, - out_features=out_features, - ) diff --git a/spaces/CofAI/chat.v2/Dockerfile b/spaces/CofAI/chat.v2/Dockerfile deleted file mode 100644 index 8a3000e46b2706d40cae6585898ee88696022bde..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.v2/Dockerfile +++ /dev/null @@ -1,133 +0,0 @@ -# Use the official Python 3.9 image as the base image -FROM python:3.9 - -# Expose the port -EXPOSE 7860 - -# Keeps Python from generating .pyc files in the container -ENV PYTHONDONTWRITEBYTECODE=1 - -# Turns off buffering for easier container logging -ENV PYTHONUNBUFFERED=1 - -# Set the PYNGROK_CONFIG environment variable -ENV PYNGROK_CONFIG /tmp/pyngrok.yml - -# Set the NGROK_PATH environment variable to a writable location -ENV NGROK_PATH /tmp/ngrok - -# Copy requirements.txt into the container -COPY requirements.txt . - -# RUN apt-get update -# RUN apt-get install -y wget -# RUN wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -# RUN apt-get install ./google-chrome-stable_current_amd64.deb -y - - -# RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - -# RUN sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list' -# RUN apt-get -y update -# RUN apt-get install -y google-chrome-stable - -# # install chromedriver -# RUN apt-get install -yqq unzip -# RUN wget -O /tmp/chromedriver.zip http://chromedriver.storage.googleapis.com/`curl -sS chromedriver.storage.googleapis.com/LATEST_RELEASE`/chromedriver_linux64.zip -# RUN unzip /tmp/chromedriver.zip chromedriver -d /usr/local/bin/ - - - - - -# RUN apt install wget -y -# RUN wget https://github.com/mozilla/geckodriver/releases/download/v0.32.0/geckodriver-v0.32.0-linux64.tar.gz -# RUN tar -xzvf geckodriver-v0.32.0-linux64.tar.gz -C /usr/local/bin -# RUN chmod +x /usr/local/bin/geckodriver -# RUN geckodriver -V - - -# RUN apt install firefox-esr -y -# RUN apt-get install firefox-geckodriver - -# Upgrade pip and install the required packages -RUN pip install --upgrade pip && \ - pip install -r requirements.txt - -# Install sudo and create the necessary directories before copying the files -RUN apt-get update && \ - apt-get install -y sudo && \ - mkdir -p /code/image - -# Creates a non-root user with an explicit UID and adds permission to access the /code folder -RUN adduser -u 5678 --disabled-password --gecos "" appuser && \ - usermod -aG sudo appuser && \ - usermod -aG root appuser && \ - chown -R appuser:appuser /code - -# Create the pyngrok bin directory and set the ownership and permissions for appuser -RUN mkdir -p /usr/local/lib/python3.9/site-packages/pyngrok/bin && \ - chown -R appuser:appuser /usr/local/lib/python3.9/site-packages/pyngrok/bin && \ - chmod -R 777 /usr/local/lib/python3.9/site-packages/pyngrok/bin - -RUN mkdir -p /.ngrok2 && \ - chown -R appuser:appuser /.ngrok2 && \ - chmod -R 777 /.ngrok2 - -RUN apt-get update && \ - apt-get install -y curl - -RUN echo "deb http://deb.debian.org/debian/ unstable main contrib non-free" >> /etc/apt/sources.list.d/debian.list - - -# RUN apt install firefox-esr && \ -# apt install geckodriver - -# Set the working directory and copy the files -WORKDIR /code - -# Set the ownership and permissions for the /code directory and its contents -RUN chown -R appuser:appuser /code && \ - chmod -R 777 /code - -COPY . /code - -# RUN chown -R appuser:appuser /code/data.csv && \ -# chmod -R 777 /code/data.csv - -# Copy the pyngrok.yml configuration file -COPY pyngrok.yml /tmp/pyngrok.yml - -# Set the TRANSFORMERS_CACHE environment variable to a cache directory inside /tmp -ENV TRANSFORMERS_CACHE /tmp/transformers_cache -ENV TORCH_HOME /tmp/torch_cache - -USER appuser - - -RUN git clone https://github.com/rphrp1985/gpt4f -# WORKDIR /gpt4f -# COPY . /gpt4f -# RUN cd gpt4f -# RUN ls - -# cp -R / /root/dest-folder -RUN cp -R gpt4f/* /code -RUN ls -CMD python run.py - - - - - - - -# Start the application using pyngrok -# CMD python main.py -# Get the public IP address and display it -# RUN curl -s https://api.ipify.org | xargs echo "Public IP:" -RUN pip install gunicorn - -# Start the Uvicorn server -# ENTRYPOINT ["python", "main.py"] -# CMD ["sh", "-c", "python main.py & sleep infinity"] -CMD ["gunicorn", "--bind", "0.0.0.0:7860","run:app"] \ No newline at end of file diff --git a/spaces/Cong723/gpt-academic-public/Dockerfile b/spaces/Cong723/gpt-academic-public/Dockerfile deleted file mode 100644 index da5053dbc7fc0accbd7b10fab87ca72feced8fe8..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic . -# 如何运行: docker run --rm -it --net=host gpt-academic -FROM python:3.11 - -RUN echo '[global]' > /etc/pip.conf && \ - echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ - echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf - - -WORKDIR /gpt -COPY requirements.txt . -RUN pip3 install -r requirements.txt - -COPY . . - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -CMD ["python3", "-u", "main.py"] diff --git a/spaces/CristianGonzalez281098/Cheto/app.py b/spaces/CristianGonzalez281098/Cheto/app.py deleted file mode 100644 index c135ec52602719e91b5fcc585ba2626efd4c1740..0000000000000000000000000000000000000000 --- a/spaces/CristianGonzalez281098/Cheto/app.py +++ /dev/null @@ -1,5 +0,0 @@ -import gradio as gr - -examples = [["The Moon's orbit around Earth has"], ["There once was a pineapple"]] - -gr.Interface.load("huggingface/gpt2", title = "Mi Demo Cristian", examples=examples).launch(); \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/merge/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/merge/__init__.py deleted file mode 100644 index 10eff133fae5d025f940b962c232a39bd0c23a74..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/merge/__init__.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - -from fontTools import ttLib -import fontTools.merge.base -from fontTools.merge.cmap import ( - computeMegaGlyphOrder, - computeMegaCmap, - renameCFFCharStrings, -) -from fontTools.merge.layout import layoutPreMerge, layoutPostMerge -from fontTools.merge.options import Options -import fontTools.merge.tables -from fontTools.misc.loggingTools import Timer -from functools import reduce -import sys -import logging - - -log = logging.getLogger("fontTools.merge") -timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO) - - -class Merger(object): - """Font merger. - - This class merges multiple files into a single OpenType font, taking into - account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and - cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across - all the fonts). - - If multiple glyphs map to the same Unicode value, and the glyphs are considered - sufficiently different (that is, they differ in any of paths, widths, or - height), then subsequent glyphs are renamed and a lookup in the ``locl`` - feature will be created to disambiguate them. For example, if the arguments - are an Arabic font and a Latin font and both contain a set of parentheses, - the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``, - and a lookup will be inserted into the to ``locl`` feature (creating it if - necessary) under the ``latn`` script to substitute ``parenleft`` with - ``parenleft#1`` etc. - - Restrictions: - - - All fonts must have the same units per em. - - If duplicate glyph disambiguation takes place as described above then the - fonts must have a ``GSUB`` table. - - Attributes: - options: Currently unused. - """ - - def __init__(self, options=None): - - if not options: - options = Options() - - self.options = options - - def _openFonts(self, fontfiles): - fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] - for font, fontfile in zip(fonts, fontfiles): - font._merger__fontfile = fontfile - font._merger__name = font["name"].getDebugName(4) - return fonts - - def merge(self, fontfiles): - """Merges fonts together. - - Args: - fontfiles: A list of file names to be merged - - Returns: - A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on - this to write it out to an OTF file. - """ - # - # Settle on a mega glyph order. - # - fonts = self._openFonts(fontfiles) - glyphOrders = [list(font.getGlyphOrder()) for font in fonts] - computeMegaGlyphOrder(self, glyphOrders) - - # Take first input file sfntVersion - sfntVersion = fonts[0].sfntVersion - - # Reload fonts and set new glyph names on them. - fonts = self._openFonts(fontfiles) - for font, glyphOrder in zip(fonts, glyphOrders): - font.setGlyphOrder(glyphOrder) - if "CFF " in font: - renameCFFCharStrings(self, glyphOrder, font["CFF "]) - - cmaps = [font["cmap"] for font in fonts] - self.duplicateGlyphsPerFont = [{} for _ in fonts] - computeMegaCmap(self, cmaps) - - mega = ttLib.TTFont(sfntVersion=sfntVersion) - mega.setGlyphOrder(self.glyphOrder) - - for font in fonts: - self._preMerge(font) - - self.fonts = fonts - - allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) - allTags.remove("GlyphOrder") - - for tag in sorted(allTags): - if tag in self.options.drop_tables: - continue - - with timer("merge '%s'" % tag): - tables = [font.get(tag, NotImplemented) for font in fonts] - - log.info("Merging '%s'.", tag) - clazz = ttLib.getTableClass(tag) - table = clazz(tag).merge(self, tables) - # XXX Clean this up and use: table = mergeObjects(tables) - - if table is not NotImplemented and table is not False: - mega[tag] = table - log.info("Merged '%s'.", tag) - else: - log.info("Dropped '%s'.", tag) - - del self.duplicateGlyphsPerFont - del self.fonts - - self._postMerge(mega) - - return mega - - def mergeObjects(self, returnTable, logic, tables): - # Right now we don't use self at all. Will use in the future - # for options and logging. - - allKeys = set.union( - set(), - *(vars(table).keys() for table in tables if table is not NotImplemented), - ) - for key in allKeys: - try: - mergeLogic = logic[key] - except KeyError: - try: - mergeLogic = logic["*"] - except KeyError: - raise Exception( - "Don't know how to merge key %s of class %s" - % (key, returnTable.__class__.__name__) - ) - if mergeLogic is NotImplemented: - continue - value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) - if value is not NotImplemented: - setattr(returnTable, key, value) - - return returnTable - - def _preMerge(self, font): - layoutPreMerge(font) - - def _postMerge(self, font): - layoutPostMerge(font) - - if "OS/2" in font: - # https://github.com/fonttools/fonttools/issues/2538 - # TODO: Add an option to disable this? - font["OS/2"].recalcAvgCharWidth(font) - - -__all__ = ["Options", "Merger", "main"] - - -@timer("make one with everything (TOTAL TIME)") -def main(args=None): - """Merge multiple fonts into one""" - from fontTools import configLogger - - if args is None: - args = sys.argv[1:] - - options = Options() - args = options.parse_opts(args, ignore_unknown=["output-file"]) - outfile = "merged.ttf" - fontfiles = [] - for g in args: - if g.startswith("--output-file="): - outfile = g[14:] - continue - fontfiles.append(g) - - if len(args) < 1: - print("usage: pyftmerge font...", file=sys.stderr) - return 1 - - configLogger(level=logging.INFO if options.verbose else logging.WARNING) - if options.timing: - timer.logger.setLevel(logging.DEBUG) - else: - timer.logger.disabled = True - - merger = Merger(options=options) - font = merger.merge(fontfiles) - with timer("compile and save font"): - font.save(outfile) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py deleted file mode 100644 index 03e7561b60f126bc19ff8b49ed2ebe7d6898286e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py +++ /dev/null @@ -1,96 +0,0 @@ -__all__ = ["maxCtxFont"] - - -def maxCtxFont(font): - """Calculate the usMaxContext value for an entire font.""" - - maxCtx = 0 - for tag in ("GSUB", "GPOS"): - if tag not in font: - continue - table = font[tag].table - if not table.LookupList: - continue - for lookup in table.LookupList.Lookup: - for st in lookup.SubTable: - maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) - return maxCtx - - -def maxCtxSubtable(maxCtx, tag, lookupType, st): - """Calculate usMaxContext based on a single lookup table (and an existing - max value). - """ - - # single positioning, single / multiple substitution - if (tag == "GPOS" and lookupType == 1) or ( - tag == "GSUB" and lookupType in (1, 2, 3) - ): - maxCtx = max(maxCtx, 1) - - # pair positioning - elif tag == "GPOS" and lookupType == 2: - maxCtx = max(maxCtx, 2) - - # ligatures - elif tag == "GSUB" and lookupType == 4: - for ligatures in st.ligatures.values(): - for ligature in ligatures: - maxCtx = max(maxCtx, ligature.CompCount) - - # context - elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5): - maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub") - - # chained context - elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6): - maxCtx = maxCtxContextualSubtable( - maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain" - ) - - # extensions - elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7): - maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable) - - # reverse-chained context - elif tag == "GSUB" and lookupType == 8: - maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse") - - return maxCtx - - -def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""): - """Calculate usMaxContext based on a contextual feature subtable.""" - - if st.Format == 1: - for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)): - if ruleset is None: - continue - for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)): - if rule is None: - continue - maxCtx = maxCtxContextualRule(maxCtx, rule, chain) - - elif st.Format == 2: - for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)): - if ruleset is None: - continue - for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)): - if rule is None: - continue - maxCtx = maxCtxContextualRule(maxCtx, rule, chain) - - elif st.Format == 3: - maxCtx = maxCtxContextualRule(maxCtx, st, chain) - - return maxCtx - - -def maxCtxContextualRule(maxCtx, st, chain): - """Calculate usMaxContext based on a contextual feature rule.""" - - if not chain: - return max(maxCtx, st.GlyphCount) - elif chain == "Reverse": - return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) - return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/qtPen.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/qtPen.py deleted file mode 100644 index eb13d03d2f611de4ce0b29ce3995f85e8f9e491a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/qtPen.py +++ /dev/null @@ -1,29 +0,0 @@ -from fontTools.pens.basePen import BasePen - - -__all__ = ["QtPen"] - - -class QtPen(BasePen): - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - from PyQt5.QtGui import QPainterPath - - path = QPainterPath() - self.path = path - - def _moveTo(self, p): - self.path.moveTo(*p) - - def _lineTo(self, p): - self.path.lineTo(*p) - - def _curveToOne(self, p1, p2, p3): - self.path.cubicTo(*p1, *p2, *p3) - - def _qCurveToOne(self, p1, p2): - self.path.quadTo(*p1, *p2) - - def _closePath(self): - self.path.closeSubpath() diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-37e7aa9b.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-37e7aa9b.js deleted file mode 100644 index f50296d02354829f57b2f3558d9985cdd815a4ac..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-37e7aa9b.js +++ /dev/null @@ -1,346 +0,0 @@ -import{S as Or,e as qr,s as Pr,J as _n,K as de,p as Xe,M as Gt,n as ut,A as $e,al as Ir,g as Gi,N as ct,B as Vi,am as Wi,a7 as Is,h as Yi,O as O0,aj as Ls,z as Re,u as y0,v as Ye,y as x0,an as Os,k as H0,o as U0,x as G0,G as q0,m as Pn,V as Hn,_ as ui,F as At,U as I0,Q as b0,P as ji,R as Xi,T as P0,a1 as $i,E as qs,ae as Ps,q as Hs,r as Us}from"./index-1d65707a.js";import{u as Gs,S as Vs}from"./ShareButton-8cd3d8f6.js";import{B as Ws}from"./Button-f155035a.js";import{B as Ys}from"./BlockLabel-66866176.js";import{n as ci}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import"./IconButton-d42f3661.js";function js(v){let i,s,o;return{c(){i=_n("svg"),s=_n("path"),o=_n("path"),de(s,"fill","currentColor"),de(s,"d","M17.74 30L16 29l4-7h6a2 2 0 0 0 2-2V8a2 2 0 0 0-2-2H6a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h9v2H6a4 4 0 0 1-4-4V8a4 4 0 0 1 4-4h20a4 4 0 0 1 4 4v12a4 4 0 0 1-4 4h-4.84Z"),de(o,"fill","currentColor"),de(o,"d","M8 10h16v2H8zm0 6h10v2H8z"),de(i,"xmlns","http://www.w3.org/2000/svg"),de(i,"xmlns:xlink","http://www.w3.org/1999/xlink"),de(i,"aria-hidden","true"),de(i,"role","img"),de(i,"class","iconify iconify--carbon"),de(i,"width","100%"),de(i,"height","100%"),de(i,"preserveAspectRatio","xMidYMid meet"),de(i,"viewBox","0 0 32 32")},m(m,p){Xe(m,i,p),Gt(i,s),Gt(i,o)},p:ut,i:ut,o:ut,d(m){m&&$e(i)}}}class Xs extends Or{constructor(i){super(),qr(this,i,null,js,Pr,{})}}function Zi(){return{async:!1,baseUrl:null,breaks:!1,extensions:null,gfm:!0,headerIds:!0,headerPrefix:"",highlight:null,hooks:null,langPrefix:"language-",mangle:!0,pedantic:!1,renderer:null,sanitize:!1,sanitizer:null,silent:!1,smartypants:!1,tokenizer:null,walkTokens:null,xhtml:!1}}let w0=Zi();function $s(v){w0=v}const Ki=/[&<>"']/,Zs=new RegExp(Ki.source,"g"),Qi=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,Ks=new RegExp(Qi.source,"g"),Qs={"&":"&","<":"<",">":">",'"':""","'":"'"},hi=v=>Qs[v];function et(v,i){if(i){if(Ki.test(v))return v.replace(Zs,hi)}else if(Qi.test(v))return v.replace(Ks,hi);return v}const Js=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;function Ji(v){return v.replace(Js,(i,s)=>(s=s.toLowerCase(),s==="colon"?":":s.charAt(0)==="#"?s.charAt(1)==="x"?String.fromCharCode(parseInt(s.substring(2),16)):String.fromCharCode(+s.substring(1)):""))}const eo=/(^|[^\[])\^/g;function De(v,i){v=typeof v=="string"?v:v.source,i=i||"";const s={replace:(o,m)=>(m=m.source||m,m=m.replace(eo,"$1"),v=v.replace(o,m),s),getRegex:()=>new RegExp(v,i)};return s}const to=/[^\w:]/g,ro=/^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;function mi(v,i,s){if(v){let o;try{o=decodeURIComponent(Ji(s)).replace(to,"").toLowerCase()}catch{return null}if(o.indexOf("javascript:")===0||o.indexOf("vbscript:")===0||o.indexOf("data:")===0)return null}i&&!ro.test(s)&&(s=lo(i,s));try{s=encodeURI(s).replace(/%25/g,"%")}catch{return null}return s}const Cr={},no=/^[^:]+:\/*[^/]*$/,ao=/^([^:]+:)[\s\S]*$/,io=/^([^:]+:\/*[^/]*)[\s\S]*$/;function lo(v,i){Cr[" "+v]||(no.test(v)?Cr[" "+v]=v+"/":Cr[" "+v]=Rr(v,"/",!0)),v=Cr[" "+v];const s=v.indexOf(":")===-1;return i.substring(0,2)==="//"?s?i:v.replace(ao,"$1")+i:i.charAt(0)==="/"?s?i:v.replace(io,"$1")+i:v+i}const Lr={exec:function(){}};function di(v,i){const s=v.replace(/\|/g,(p,x,w)=>{let z=!1,L=x;for(;--L>=0&&w[L]==="\\";)z=!z;return z?"|":" |"}),o=s.split(/ \|/);let m=0;if(o[0].trim()||o.shift(),o.length>0&&!o[o.length-1].trim()&&o.pop(),o.length>i)o.splice(i);else for(;o.length1;)i&1&&(s+=v),i>>=1,v+=v;return s+v}function pi(v,i,s,o){const m=i.href,p=i.title?et(i.title):null,x=v[1].replace(/\\([\[\]])/g,"$1");if(v[0].charAt(0)!=="!"){o.state.inLink=!0;const w={type:"link",raw:s,href:m,title:p,text:x,tokens:o.inlineTokens(x)};return o.state.inLink=!1,w}return{type:"image",raw:s,href:m,title:p,text:et(x)}}function uo(v,i){const s=v.match(/^(\s+)(?:```)/);if(s===null)return i;const o=s[1];return i.split(` -`).map(m=>{const p=m.match(/^\s+/);if(p===null)return m;const[x]=p;return x.length>=o.length?m.slice(o.length):m}).join(` -`)}class Un{constructor(i){this.options=i||w0}space(i){const s=this.rules.block.newline.exec(i);if(s&&s[0].length>0)return{type:"space",raw:s[0]}}code(i){const s=this.rules.block.code.exec(i);if(s){const o=s[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:s[0],codeBlockStyle:"indented",text:this.options.pedantic?o:Rr(o,` -`)}}}fences(i){const s=this.rules.block.fences.exec(i);if(s){const o=s[0],m=uo(o,s[3]||"");return{type:"code",raw:o,lang:s[2]?s[2].trim().replace(this.rules.inline._escapes,"$1"):s[2],text:m}}}heading(i){const s=this.rules.block.heading.exec(i);if(s){let o=s[2].trim();if(/#$/.test(o)){const m=Rr(o,"#");(this.options.pedantic||!m||/ $/.test(m))&&(o=m.trim())}return{type:"heading",raw:s[0],depth:s[1].length,text:o,tokens:this.lexer.inline(o)}}}hr(i){const s=this.rules.block.hr.exec(i);if(s)return{type:"hr",raw:s[0]}}blockquote(i){const s=this.rules.block.blockquote.exec(i);if(s){const o=s[0].replace(/^ *>[ \t]?/gm,""),m=this.lexer.state.top;this.lexer.state.top=!0;const p=this.lexer.blockTokens(o);return this.lexer.state.top=m,{type:"blockquote",raw:s[0],tokens:p,text:o}}}list(i){let s=this.rules.block.list.exec(i);if(s){let o,m,p,x,w,z,L,G,K,ne,V,xe,ge=s[1].trim();const le=ge.length>1,q={type:"list",raw:"",ordered:le,start:le?+ge.slice(0,-1):"",loose:!1,items:[]};ge=le?`\\d{1,9}\\${ge.slice(-1)}`:`\\${ge}`,this.options.pedantic&&(ge=le?ge:"[*+-]");const C=new RegExp(`^( {0,3}${ge})((?:[ ][^\\n]*)?(?:\\n|$))`);for(;i&&(xe=!1,!(!(s=C.exec(i))||this.rules.block.hr.test(i)));){if(o=s[0],i=i.substring(o.length),G=s[2].split(` -`,1)[0].replace(/^\t+/,N=>" ".repeat(3*N.length)),K=i.split(` -`,1)[0],this.options.pedantic?(x=2,V=G.trimLeft()):(x=s[2].search(/[^ ]/),x=x>4?1:x,V=G.slice(x),x+=s[1].length),z=!1,!G&&/^ *$/.test(K)&&(o+=K+` -`,i=i.substring(K.length+1),xe=!0),!xe){const N=new RegExp(`^ {0,${Math.min(3,x-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ ][^\\n]*)?(?:\\n|$))`),D=new RegExp(`^ {0,${Math.min(3,x-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),I=new RegExp(`^ {0,${Math.min(3,x-1)}}(?:\`\`\`|~~~)`),j=new RegExp(`^ {0,${Math.min(3,x-1)}}#`);for(;i&&(ne=i.split(` -`,1)[0],K=ne,this.options.pedantic&&(K=K.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),!(I.test(K)||j.test(K)||N.test(K)||D.test(i)));){if(K.search(/[^ ]/)>=x||!K.trim())V+=` -`+K.slice(x);else{if(z||G.search(/[^ ]/)>=4||I.test(G)||j.test(G)||D.test(G))break;V+=` -`+K}!z&&!K.trim()&&(z=!0),o+=ne+` -`,i=i.substring(ne.length+1),G=K.slice(x)}}q.loose||(L?q.loose=!0:/\n *\n *$/.test(o)&&(L=!0)),this.options.gfm&&(m=/^\[[ xX]\] /.exec(V),m&&(p=m[0]!=="[ ] ",V=V.replace(/^\[[ xX]\] +/,""))),q.items.push({type:"list_item",raw:o,task:!!m,checked:p,loose:!1,text:V}),q.raw+=o}q.items[q.items.length-1].raw=o.trimRight(),q.items[q.items.length-1].text=V.trimRight(),q.raw=q.raw.trimRight();const _=q.items.length;for(w=0;w<_;w++)if(this.lexer.state.top=!1,q.items[w].tokens=this.lexer.blockTokens(q.items[w].text,[]),!q.loose){const N=q.items[w].tokens.filter(I=>I.type==="space"),D=N.length>0&&N.some(I=>/\n.*\n/.test(I.raw));q.loose=D}if(q.loose)for(w=0;w<_;w++)q.items[w].loose=!0;return q}}html(i){const s=this.rules.block.html.exec(i);if(s){const o={type:"html",block:!0,raw:s[0],pre:!this.options.sanitizer&&(s[1]==="pre"||s[1]==="script"||s[1]==="style"),text:s[0]};if(this.options.sanitize){const m=this.options.sanitizer?this.options.sanitizer(s[0]):et(s[0]);o.type="paragraph",o.text=m,o.tokens=this.lexer.inline(m)}return o}}def(i){const s=this.rules.block.def.exec(i);if(s){const o=s[1].toLowerCase().replace(/\s+/g," "),m=s[2]?s[2].replace(/^<(.*)>$/,"$1").replace(this.rules.inline._escapes,"$1"):"",p=s[3]?s[3].substring(1,s[3].length-1).replace(this.rules.inline._escapes,"$1"):s[3];return{type:"def",tag:o,raw:s[0],href:m,title:p}}}table(i){const s=this.rules.block.table.exec(i);if(s){const o={type:"table",header:di(s[1]).map(m=>({text:m})),align:s[2].replace(/^ *|\| *$/g,"").split(/ *\| */),rows:s[3]&&s[3].trim()?s[3].replace(/\n[ \t]*$/,"").split(` -`):[]};if(o.header.length===o.align.length){o.raw=s[0];let m=o.align.length,p,x,w,z;for(p=0;p({text:L}));for(m=o.header.length,x=0;x/i.test(s[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(s[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(s[0])&&(this.lexer.state.inRawBlock=!1),{type:this.options.sanitize?"text":"html",raw:s[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:this.options.sanitize?this.options.sanitizer?this.options.sanitizer(s[0]):et(s[0]):s[0]}}link(i){const s=this.rules.inline.link.exec(i);if(s){const o=s[2].trim();if(!this.options.pedantic&&/^$/.test(o))return;const x=Rr(o.slice(0,-1),"\\");if((o.length-x.length)%2===0)return}else{const x=so(s[2],"()");if(x>-1){const z=(s[0].indexOf("!")===0?5:4)+s[1].length+x;s[2]=s[2].substring(0,x),s[0]=s[0].substring(0,z).trim(),s[3]=""}}let m=s[2],p="";if(this.options.pedantic){const x=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(m);x&&(m=x[1],p=x[3])}else p=s[3]?s[3].slice(1,-1):"";return m=m.trim(),/^$/.test(o)?m=m.slice(1):m=m.slice(1,-1)),pi(s,{href:m&&m.replace(this.rules.inline._escapes,"$1"),title:p&&p.replace(this.rules.inline._escapes,"$1")},s[0],this.lexer)}}reflink(i,s){let o;if((o=this.rules.inline.reflink.exec(i))||(o=this.rules.inline.nolink.exec(i))){let m=(o[2]||o[1]).replace(/\s+/g," ");if(m=s[m.toLowerCase()],!m){const p=o[0].charAt(0);return{type:"text",raw:p,text:p}}return pi(o,m,o[0],this.lexer)}}emStrong(i,s,o=""){let m=this.rules.inline.emStrong.lDelim.exec(i);if(!m||m[3]&&o.match(/[\p{L}\p{N}]/u))return;const p=m[1]||m[2]||"";if(!p||p&&(o===""||this.rules.inline.punctuation.exec(o))){const x=m[0].length-1;let w,z,L=x,G=0;const K=m[0][0]==="*"?this.rules.inline.emStrong.rDelimAst:this.rules.inline.emStrong.rDelimUnd;for(K.lastIndex=0,s=s.slice(-1*i.length+x);(m=K.exec(s))!=null;){if(w=m[1]||m[2]||m[3]||m[4]||m[5]||m[6],!w)continue;if(z=w.length,m[3]||m[4]){L+=z;continue}else if((m[5]||m[6])&&x%3&&!((x+z)%3)){G+=z;continue}if(L-=z,L>0)continue;z=Math.min(z,z+L+G);const ne=i.slice(0,x+m.index+(m[0].length-w.length)+z);if(Math.min(x,z)%2){const xe=ne.slice(1,-1);return{type:"em",raw:ne,text:xe,tokens:this.lexer.inlineTokens(xe)}}const V=ne.slice(2,-2);return{type:"strong",raw:ne,text:V,tokens:this.lexer.inlineTokens(V)}}}}codespan(i){const s=this.rules.inline.code.exec(i);if(s){let o=s[2].replace(/\n/g," ");const m=/[^ ]/.test(o),p=/^ /.test(o)&&/ $/.test(o);return m&&p&&(o=o.substring(1,o.length-1)),o=et(o,!0),{type:"codespan",raw:s[0],text:o}}}br(i){const s=this.rules.inline.br.exec(i);if(s)return{type:"br",raw:s[0]}}del(i){const s=this.rules.inline.del.exec(i);if(s)return{type:"del",raw:s[0],text:s[2],tokens:this.lexer.inlineTokens(s[2])}}autolink(i,s){const o=this.rules.inline.autolink.exec(i);if(o){let m,p;return o[2]==="@"?(m=et(this.options.mangle?s(o[1]):o[1]),p="mailto:"+m):(m=et(o[1]),p=m),{type:"link",raw:o[0],text:m,href:p,tokens:[{type:"text",raw:m,text:m}]}}}url(i,s){let o;if(o=this.rules.inline.url.exec(i)){let m,p;if(o[2]==="@")m=et(this.options.mangle?s(o[0]):o[0]),p="mailto:"+m;else{let x;do x=o[0],o[0]=this.rules.inline._backpedal.exec(o[0])[0];while(x!==o[0]);m=et(o[0]),o[1]==="www."?p="http://"+o[0]:p=o[0]}return{type:"link",raw:o[0],text:m,href:p,tokens:[{type:"text",raw:m,text:m}]}}}inlineText(i,s){const o=this.rules.inline.text.exec(i);if(o){let m;return this.lexer.state.inRawBlock?m=this.options.sanitize?this.options.sanitizer?this.options.sanitizer(o[0]):et(o[0]):o[0]:m=et(this.options.smartypants?s(o[0]):o[0]),{type:"text",raw:o[0],text:m}}}}const me={newline:/^(?: *(?:\n|$))+/,code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,hr:/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,blockquote:/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,list:/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/,html:"^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))",def:/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/,table:Lr,lheading:/^((?:.|\n(?!\n))+?)\n {0,3}(=+|-+) *(?:\n+|$)/,_paragraph:/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,text:/^[^\n]+/};me._label=/(?!\s*\])(?:\\.|[^\[\]\\])+/;me._title=/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;me.def=De(me.def).replace("label",me._label).replace("title",me._title).getRegex();me.bullet=/(?:[*+-]|\d{1,9}[.)])/;me.listItemStart=De(/^( *)(bull) */).replace("bull",me.bullet).getRegex();me.list=De(me.list).replace(/bull/g,me.bullet).replace("hr","\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))").replace("def","\\n+(?="+me.def.source+")").getRegex();me._tag="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul";me._comment=/|$)/;me.html=De(me.html,"i").replace("comment",me._comment).replace("tag",me._tag).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex();me.paragraph=De(me._paragraph).replace("hr",me.hr).replace("heading"," {0,3}#{1,6} ").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",me._tag).getRegex();me.blockquote=De(me.blockquote).replace("paragraph",me.paragraph).getRegex();me.normal={...me};me.gfm={...me.normal,table:"^ *([^\\n ].*\\|.*)\\n {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)"};me.gfm.table=De(me.gfm.table).replace("hr",me.hr).replace("heading"," {0,3}#{1,6} ").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",me._tag).getRegex();me.gfm.paragraph=De(me._paragraph).replace("hr",me.hr).replace("heading"," {0,3}#{1,6} ").replace("|lheading","").replace("table",me.gfm.table).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",me._tag).getRegex();me.pedantic={...me.normal,html:De(`^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))`).replace("comment",me._comment).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:Lr,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:De(me.normal._paragraph).replace("hr",me.hr).replace("heading",` *#{1,6} *[^ -]`).replace("lheading",me.lheading).replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").getRegex()};const ie={escape:/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,autolink:/^<(scheme:[^\s\x00-\x1f<>]*|email)>/,url:Lr,tag:"^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^",link:/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,reflink:/^!?\[(label)\]\[(ref)\]/,nolink:/^!?\[(ref)\](?:\[\])?/,reflinkSearch:"reflink|nolink(?!\\()",emStrong:{lDelim:/^(?:\*+(?:([punct_])|[^\s*]))|^_+(?:([punct*])|([^\s_]))/,rDelimAst:/^(?:[^_*\\]|\\.)*?\_\_(?:[^_*\\]|\\.)*?\*(?:[^_*\\]|\\.)*?(?=\_\_)|(?:[^*\\]|\\.)+(?=[^*])|[punct_](\*+)(?=[\s]|$)|(?:[^punct*_\s\\]|\\.)(\*+)(?=[punct_\s]|$)|[punct_\s](\*+)(?=[^punct*_\s])|[\s](\*+)(?=[punct_])|[punct_](\*+)(?=[punct_])|(?:[^punct*_\s\\]|\\.)(\*+)(?=[^punct*_\s])/,rDelimUnd:/^(?:[^_*\\]|\\.)*?\*\*(?:[^_*\\]|\\.)*?\_(?:[^_*\\]|\\.)*?(?=\*\*)|(?:[^_\\]|\\.)+(?=[^_])|[punct*](\_+)(?=[\s]|$)|(?:[^punct*_\s\\]|\\.)(\_+)(?=[punct*\s]|$)|[punct*\s](\_+)(?=[^punct*_\s])|[\s](\_+)(?=[punct*])|[punct*](\_+)(?=[punct*])/},code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,br:/^( {2,}|\\)\n(?!\s*$)/,del:Lr,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\?@\\[\\]`^{|}~";ie.punctuation=De(ie.punctuation).replace(/punctuation/g,ie._punctuation).getRegex();ie.blockSkip=/\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g;ie.escapedEmSt=/(?:^|[^\\])(?:\\\\)*\\[*_]/g;ie._comment=De(me._comment).replace("(?:-->|$)","-->").getRegex();ie.emStrong.lDelim=De(ie.emStrong.lDelim).replace(/punct/g,ie._punctuation).getRegex();ie.emStrong.rDelimAst=De(ie.emStrong.rDelimAst,"g").replace(/punct/g,ie._punctuation).getRegex();ie.emStrong.rDelimUnd=De(ie.emStrong.rDelimUnd,"g").replace(/punct/g,ie._punctuation).getRegex();ie._escapes=/\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g;ie._scheme=/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/;ie._email=/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/;ie.autolink=De(ie.autolink).replace("scheme",ie._scheme).replace("email",ie._email).getRegex();ie._attribute=/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/;ie.tag=De(ie.tag).replace("comment",ie._comment).replace("attribute",ie._attribute).getRegex();ie._label=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;ie._href=/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/;ie._title=/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/;ie.link=De(ie.link).replace("label",ie._label).replace("href",ie._href).replace("title",ie._title).getRegex();ie.reflink=De(ie.reflink).replace("label",ie._label).replace("ref",me._label).getRegex();ie.nolink=De(ie.nolink).replace("ref",me._label).getRegex();ie.reflinkSearch=De(ie.reflinkSearch,"g").replace("reflink",ie.reflink).replace("nolink",ie.nolink).getRegex();ie.normal={...ie};ie.pedantic={...ie.normal,strong:{start:/^__|\*\*/,middle:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,endAst:/\*\*(?!\*)/g,endUnd:/__(?!_)/g},em:{start:/^_|\*/,middle:/^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,endAst:/\*(?!\*)/g,endUnd:/_(?!_)/g},link:De(/^!?\[(label)\]\((.*?)\)/).replace("label",ie._label).getRegex(),reflink:De(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace("label",ie._label).getRegex()};ie.gfm={...ie.normal,escape:De(ie.escape).replace("])","~|])").getRegex(),_extended_email:/[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,url:/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,_backpedal:/(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/,del:/^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,text:/^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\.5&&(o="x"+o.toString(16)),i+="&#"+o+";";return i}class l0{constructor(i){this.tokens=[],this.tokens.links=Object.create(null),this.options=i||w0,this.options.tokenizer=this.options.tokenizer||new Un,this.tokenizer=this.options.tokenizer,this.tokenizer.options=this.options,this.tokenizer.lexer=this,this.inlineQueue=[],this.state={inLink:!1,inRawBlock:!1,top:!0};const s={block:me.normal,inline:ie.normal};this.options.pedantic?(s.block=me.pedantic,s.inline=ie.pedantic):this.options.gfm&&(s.block=me.gfm,this.options.breaks?s.inline=ie.breaks:s.inline=ie.gfm),this.tokenizer.rules=s}static get rules(){return{block:me,inline:ie}}static lex(i,s){return new l0(s).lex(i)}static lexInline(i,s){return new l0(s).inlineTokens(i)}lex(i){i=i.replace(/\r\n|\r/g,` -`),this.blockTokens(i,this.tokens);let s;for(;s=this.inlineQueue.shift();)this.inlineTokens(s.src,s.tokens);return this.tokens}blockTokens(i,s=[]){this.options.pedantic?i=i.replace(/\t/g," ").replace(/^ +$/gm,""):i=i.replace(/^( *)(\t+)/gm,(w,z,L)=>z+" ".repeat(L.length));let o,m,p,x;for(;i;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some(w=>(o=w.call({lexer:this},i,s))?(i=i.substring(o.raw.length),s.push(o),!0):!1))){if(o=this.tokenizer.space(i)){i=i.substring(o.raw.length),o.raw.length===1&&s.length>0?s[s.length-1].raw+=` -`:s.push(o);continue}if(o=this.tokenizer.code(i)){i=i.substring(o.raw.length),m=s[s.length-1],m&&(m.type==="paragraph"||m.type==="text")?(m.raw+=` -`+o.raw,m.text+=` -`+o.text,this.inlineQueue[this.inlineQueue.length-1].src=m.text):s.push(o);continue}if(o=this.tokenizer.fences(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.heading(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.hr(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.blockquote(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.list(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.html(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.def(i)){i=i.substring(o.raw.length),m=s[s.length-1],m&&(m.type==="paragraph"||m.type==="text")?(m.raw+=` -`+o.raw,m.text+=` -`+o.raw,this.inlineQueue[this.inlineQueue.length-1].src=m.text):this.tokens.links[o.tag]||(this.tokens.links[o.tag]={href:o.href,title:o.title});continue}if(o=this.tokenizer.table(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.lheading(i)){i=i.substring(o.raw.length),s.push(o);continue}if(p=i,this.options.extensions&&this.options.extensions.startBlock){let w=1/0;const z=i.slice(1);let L;this.options.extensions.startBlock.forEach(function(G){L=G.call({lexer:this},z),typeof L=="number"&&L>=0&&(w=Math.min(w,L))}),w<1/0&&w>=0&&(p=i.substring(0,w+1))}if(this.state.top&&(o=this.tokenizer.paragraph(p))){m=s[s.length-1],x&&m.type==="paragraph"?(m.raw+=` -`+o.raw,m.text+=` -`+o.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=m.text):s.push(o),x=p.length!==i.length,i=i.substring(o.raw.length);continue}if(o=this.tokenizer.text(i)){i=i.substring(o.raw.length),m=s[s.length-1],m&&m.type==="text"?(m.raw+=` -`+o.raw,m.text+=` -`+o.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=m.text):s.push(o);continue}if(i){const w="Infinite loop on byte: "+i.charCodeAt(0);if(this.options.silent){console.error(w);break}else throw new Error(w)}}return this.state.top=!0,s}inline(i,s=[]){return this.inlineQueue.push({src:i,tokens:s}),s}inlineTokens(i,s=[]){let o,m,p,x=i,w,z,L;if(this.tokens.links){const G=Object.keys(this.tokens.links);if(G.length>0)for(;(w=this.tokenizer.rules.inline.reflinkSearch.exec(x))!=null;)G.includes(w[0].slice(w[0].lastIndexOf("[")+1,-1))&&(x=x.slice(0,w.index)+"["+fi("a",w[0].length-2)+"]"+x.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;(w=this.tokenizer.rules.inline.blockSkip.exec(x))!=null;)x=x.slice(0,w.index)+"["+fi("a",w[0].length-2)+"]"+x.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;(w=this.tokenizer.rules.inline.escapedEmSt.exec(x))!=null;)x=x.slice(0,w.index+w[0].length-2)+"++"+x.slice(this.tokenizer.rules.inline.escapedEmSt.lastIndex),this.tokenizer.rules.inline.escapedEmSt.lastIndex--;for(;i;)if(z||(L=""),z=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some(G=>(o=G.call({lexer:this},i,s))?(i=i.substring(o.raw.length),s.push(o),!0):!1))){if(o=this.tokenizer.escape(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.tag(i)){i=i.substring(o.raw.length),m=s[s.length-1],m&&o.type==="text"&&m.type==="text"?(m.raw+=o.raw,m.text+=o.text):s.push(o);continue}if(o=this.tokenizer.link(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.reflink(i,this.tokens.links)){i=i.substring(o.raw.length),m=s[s.length-1],m&&o.type==="text"&&m.type==="text"?(m.raw+=o.raw,m.text+=o.text):s.push(o);continue}if(o=this.tokenizer.emStrong(i,x,L)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.codespan(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.br(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.del(i)){i=i.substring(o.raw.length),s.push(o);continue}if(o=this.tokenizer.autolink(i,gi)){i=i.substring(o.raw.length),s.push(o);continue}if(!this.state.inLink&&(o=this.tokenizer.url(i,gi))){i=i.substring(o.raw.length),s.push(o);continue}if(p=i,this.options.extensions&&this.options.extensions.startInline){let G=1/0;const K=i.slice(1);let ne;this.options.extensions.startInline.forEach(function(V){ne=V.call({lexer:this},K),typeof ne=="number"&&ne>=0&&(G=Math.min(G,ne))}),G<1/0&&G>=0&&(p=i.substring(0,G+1))}if(o=this.tokenizer.inlineText(p,co)){i=i.substring(o.raw.length),o.raw.slice(-1)!=="_"&&(L=o.raw.slice(-1)),z=!0,m=s[s.length-1],m&&m.type==="text"?(m.raw+=o.raw,m.text+=o.text):s.push(o);continue}if(i){const G="Infinite loop on byte: "+i.charCodeAt(0);if(this.options.silent){console.error(G);break}else throw new Error(G)}}return s}}class Gn{constructor(i){this.options=i||w0}code(i,s,o){const m=(s||"").match(/\S*/)[0];if(this.options.highlight){const p=this.options.highlight(i,m);p!=null&&p!==i&&(o=!0,i=p)}return i=i.replace(/\n$/,"")+` -`,m?'
    '+(o?i:et(i,!0))+`
    -`:"
    "+(o?i:et(i,!0))+`
    -`}blockquote(i){return`
    -${i}
    -`}html(i,s){return i}heading(i,s,o,m){if(this.options.headerIds){const p=this.options.headerPrefix+m.slug(o);return`${i} -`}return`${i} -`}hr(){return this.options.xhtml?`
    -`:`
    -`}list(i,s,o){const m=s?"ol":"ul",p=s&&o!==1?' start="'+o+'"':"";return"<"+m+p+`> -`+i+" -`}listitem(i){return`
  • ${i}
  • -`}checkbox(i){return" "}paragraph(i){return`

    ${i}

    -`}table(i,s){return s&&(s=`${s}`),` - -`+i+` -`+s+`
    -`}tablerow(i){return` -${i} -`}tablecell(i,s){const o=s.header?"th":"td";return(s.align?`<${o} align="${s.align}">`:`<${o}>`)+i+` -`}strong(i){return`${i}`}em(i){return`${i}`}codespan(i){return`${i}`}br(){return this.options.xhtml?"
    ":"
    "}del(i){return`${i}`}link(i,s,o){if(i=mi(this.options.sanitize,this.options.baseUrl,i),i===null)return o;let m='",m}image(i,s,o){if(i=mi(this.options.sanitize,this.options.baseUrl,i),i===null)return o;let m=`${o}":">",m}text(i){return i}}class el{strong(i){return i}em(i){return i}codespan(i){return i}del(i){return i}html(i){return i}text(i){return i}link(i,s,o){return""+o}image(i,s,o){return""+o}br(){return""}}class tl{constructor(){this.seen={}}serialize(i){return i.toLowerCase().trim().replace(/<[!\/a-z].*?>/ig,"").replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g,"").replace(/\s/g,"-")}getNextSafeSlug(i,s){let o=i,m=0;if(this.seen.hasOwnProperty(o)){m=this.seen[i];do m++,o=i+"-"+m;while(this.seen.hasOwnProperty(o))}return s||(this.seen[i]=m,this.seen[o]=0),o}slug(i,s={}){const o=this.serialize(i);return this.getNextSafeSlug(o,s.dryrun)}}class s0{constructor(i){this.options=i||w0,this.options.renderer=this.options.renderer||new Gn,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new el,this.slugger=new tl}static parse(i,s){return new s0(s).parse(i)}static parseInline(i,s){return new s0(s).parseInline(i)}parse(i,s=!0){let o="",m,p,x,w,z,L,G,K,ne,V,xe,ge,le,q,C,_,N,D,I;const j=i.length;for(m=0;m0&&C.tokens[0].type==="paragraph"?(C.tokens[0].text=D+" "+C.tokens[0].text,C.tokens[0].tokens&&C.tokens[0].tokens.length>0&&C.tokens[0].tokens[0].type==="text"&&(C.tokens[0].tokens[0].text=D+" "+C.tokens[0].tokens[0].text)):C.tokens.unshift({type:"text",text:D}):q+=D),q+=this.parse(C.tokens,le),ne+=this.renderer.listitem(q,N,_);o+=this.renderer.list(ne,xe,ge);continue}case"html":{o+=this.renderer.html(V.text,V.block);continue}case"paragraph":{o+=this.renderer.paragraph(this.parseInline(V.tokens));continue}case"text":{for(ne=V.tokens?this.parseInline(V.tokens):V.text;m+1{if(o.message+=` -Please report this to https://github.com/markedjs/marked.`,v){const m="

    An error occurred:

    "+et(o.message+"",!0)+"
    ";if(i)return Promise.resolve(m);if(s){s(null,m);return}return m}if(i)return Promise.reject(o);if(s){s(o);return}throw o}}function rl(v,i){return(s,o,m)=>{typeof o=="function"&&(m=o,o=null);const p={...o};o={...he.defaults,...p};const x=ho(o.silent,o.async,m);if(typeof s>"u"||s===null)return x(new Error("marked(): input parameter is undefined or null"));if(typeof s!="string")return x(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(s)+", string expected"));if(oo(o,m),o.hooks&&(o.hooks.options=o),m){const w=o.highlight;let z;try{o.hooks&&(s=o.hooks.preprocess(s)),z=v(s,o)}catch(K){return x(K)}const L=function(K){let ne;if(!K)try{o.walkTokens&&he.walkTokens(z,o.walkTokens),ne=i(z,o),o.hooks&&(ne=o.hooks.postprocess(ne))}catch(V){K=V}return o.highlight=w,K?x(K):m(null,ne)};if(!w||w.length<3||(delete o.highlight,!z.length))return L();let G=0;he.walkTokens(z,function(K){K.type==="code"&&(G++,setTimeout(()=>{w(K.text,K.lang,function(ne,V){if(ne)return L(ne);V!=null&&V!==K.text&&(K.text=V,K.escaped=!0),G--,G===0&&L()})},0))}),G===0&&L();return}if(o.async)return Promise.resolve(o.hooks?o.hooks.preprocess(s):s).then(w=>v(w,o)).then(w=>o.walkTokens?Promise.all(he.walkTokens(w,o.walkTokens)).then(()=>w):w).then(w=>i(w,o)).then(w=>o.hooks?o.hooks.postprocess(w):w).catch(x);try{o.hooks&&(s=o.hooks.preprocess(s));const w=v(s,o);o.walkTokens&&he.walkTokens(w,o.walkTokens);let z=i(w,o);return o.hooks&&(z=o.hooks.postprocess(z)),z}catch(w){return x(w)}}}function he(v,i,s){return rl(l0.lex,s0.parse)(v,i,s)}he.options=he.setOptions=function(v){return he.defaults={...he.defaults,...v},$s(he.defaults),he};he.getDefaults=Zi;he.defaults=w0;he.use=function(...v){const i=he.defaults.extensions||{renderers:{},childTokens:{}};v.forEach(s=>{const o={...s};if(o.async=he.defaults.async||o.async||!1,s.extensions&&(s.extensions.forEach(m=>{if(!m.name)throw new Error("extension name required");if(m.renderer){const p=i.renderers[m.name];p?i.renderers[m.name]=function(...x){let w=m.renderer.apply(this,x);return w===!1&&(w=p.apply(this,x)),w}:i.renderers[m.name]=m.renderer}if(m.tokenizer){if(!m.level||m.level!=="block"&&m.level!=="inline")throw new Error("extension level must be 'block' or 'inline'");i[m.level]?i[m.level].unshift(m.tokenizer):i[m.level]=[m.tokenizer],m.start&&(m.level==="block"?i.startBlock?i.startBlock.push(m.start):i.startBlock=[m.start]:m.level==="inline"&&(i.startInline?i.startInline.push(m.start):i.startInline=[m.start]))}m.childTokens&&(i.childTokens[m.name]=m.childTokens)}),o.extensions=i),s.renderer){const m=he.defaults.renderer||new Gn;for(const p in s.renderer){const x=m[p];m[p]=(...w)=>{let z=s.renderer[p].apply(m,w);return z===!1&&(z=x.apply(m,w)),z}}o.renderer=m}if(s.tokenizer){const m=he.defaults.tokenizer||new Un;for(const p in s.tokenizer){const x=m[p];m[p]=(...w)=>{let z=s.tokenizer[p].apply(m,w);return z===!1&&(z=x.apply(m,w)),z}}o.tokenizer=m}if(s.hooks){const m=he.defaults.hooks||new Ln;for(const p in s.hooks){const x=m[p];Ln.passThroughHooks.has(p)?m[p]=w=>{if(he.defaults.async)return Promise.resolve(s.hooks[p].call(m,w)).then(L=>x.call(m,L));const z=s.hooks[p].call(m,w);return x.call(m,z)}:m[p]=(...w)=>{let z=s.hooks[p].apply(m,w);return z===!1&&(z=x.apply(m,w)),z}}o.hooks=m}if(s.walkTokens){const m=he.defaults.walkTokens;o.walkTokens=function(p){let x=[];return x.push(s.walkTokens.call(this,p)),m&&(x=x.concat(m.call(this,p))),x}}he.setOptions(o)})};he.walkTokens=function(v,i){let s=[];for(const o of v)switch(s=s.concat(i.call(he,o)),o.type){case"table":{for(const m of o.header)s=s.concat(he.walkTokens(m.tokens,i));for(const m of o.rows)for(const p of m)s=s.concat(he.walkTokens(p.tokens,i));break}case"list":{s=s.concat(he.walkTokens(o.items,i));break}default:he.defaults.extensions&&he.defaults.extensions.childTokens&&he.defaults.extensions.childTokens[o.type]?he.defaults.extensions.childTokens[o.type].forEach(function(m){s=s.concat(he.walkTokens(o[m],i))}):o.tokens&&(s=s.concat(he.walkTokens(o.tokens,i)))}return s};he.parseInline=rl(l0.lexInline,s0.parseInline);he.Parser=s0;he.parser=s0.parse;he.Renderer=Gn;he.TextRenderer=el;he.Lexer=l0;he.lexer=l0.lex;he.Tokenizer=Un;he.Slugger=tl;he.Hooks=Ln;he.parse=he;he.options;he.setOptions;he.use;he.walkTokens;he.parseInline;s0.parse;l0.lex;function mo(v){if(typeof v=="function"&&(v={highlight:v}),!v||typeof v.highlight!="function")throw new Error("Must provide highlight function");return typeof v.langPrefix!="string"&&(v.langPrefix="language-"),{async:!!v.async,walkTokens(i){if(i.type!=="code")return;const s=fo(i);if(v.async)return Promise.resolve(v.highlight(i.text,s)).then(vi(i));const o=v.highlight(i.text,s);vi(i)(o)},renderer:{code(i,s,o){const m=(s||"").match(/\S*/)[0],p=m?` class="${v.langPrefix}${yi(m)}"`:"";return i=i.replace(/\n$/,""),`
    ${o?i:yi(i,!0)}
    -
    `}}}}function fo(v){return(v.lang||"").match(/\S*/)[0]}function vi(v){return i=>{typeof i=="string"&&i!==v.text&&(v.escaped=!0,v.text=i)}}const nl=/[&<>"']/,po=new RegExp(nl.source,"g"),al=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,go=new RegExp(al.source,"g"),vo={"&":"&","<":"<",">":">",'"':""","'":"'"},bi=v=>vo[v];function yi(v,i){if(i){if(nl.test(v))return v.replace(po,bi)}else if(al.test(v))return v.replace(go,bi);return v}var il={exports:{}};(function(v){var i=typeof window<"u"?window:typeof WorkerGlobalScope<"u"&&self instanceof WorkerGlobalScope?self:{};/** - * Prism: Lightweight, robust, elegant syntax highlighting - * - * @license MIT - * @author Lea Verou - * @namespace - * @public - */var s=function(o){var m=/(?:^|\s)lang(?:uage)?-([\w-]+)(?=\s|$)/i,p=0,x={},w={manual:o.Prism&&o.Prism.manual,disableWorkerMessageHandler:o.Prism&&o.Prism.disableWorkerMessageHandler,util:{encode:function C(_){return _ instanceof z?new z(_.type,C(_.content),_.alias):Array.isArray(_)?_.map(C):_.replace(/&/g,"&").replace(/"u")return null;if("currentScript"in document&&1<2)return document.currentScript;try{throw new Error}catch(D){var C=(/at [^(\r\n]*\((.*):[^:]+:[^:]+\)$/i.exec(D.stack)||[])[1];if(C){var _=document.getElementsByTagName("script");for(var N in _)if(_[N].src==C)return _[N]}return null}},isActive:function(C,_,N){for(var D="no-"+_;C;){var I=C.classList;if(I.contains(_))return!0;if(I.contains(D))return!1;C=C.parentElement}return!!N}},languages:{plain:x,plaintext:x,text:x,txt:x,extend:function(C,_){var N=w.util.clone(w.languages[C]);for(var D in _)N[D]=_[D];return N},insertBefore:function(C,_,N,D){D=D||w.languages;var I=D[C],j={};for(var re in I)if(I.hasOwnProperty(re)){if(re==_)for(var Z in N)N.hasOwnProperty(Z)&&(j[Z]=N[Z]);N.hasOwnProperty(re)||(j[re]=I[re])}var U=D[C];return D[C]=j,w.languages.DFS(w.languages,function(fe,se){se===U&&fe!=C&&(this[fe]=j)}),j},DFS:function C(_,N,D,I){I=I||{};var j=w.util.objId;for(var re in _)if(_.hasOwnProperty(re)){N.call(_,re,_[re],D||re);var Z=_[re],U=w.util.type(Z);U==="Object"&&!I[j(Z)]?(I[j(Z)]=!0,C(Z,N,null,I)):U==="Array"&&!I[j(Z)]&&(I[j(Z)]=!0,C(Z,N,re,I))}}},plugins:{},highlightAll:function(C,_){w.highlightAllUnder(document,C,_)},highlightAllUnder:function(C,_,N){var D={callback:N,container:C,selector:'code[class*="language-"], [class*="language-"] code, code[class*="lang-"], [class*="lang-"] code'};w.hooks.run("before-highlightall",D),D.elements=Array.prototype.slice.apply(D.container.querySelectorAll(D.selector)),w.hooks.run("before-all-elements-highlight",D);for(var I=0,j;j=D.elements[I++];)w.highlightElement(j,_===!0,D.callback)},highlightElement:function(C,_,N){var D=w.util.getLanguage(C),I=w.languages[D];w.util.setLanguage(C,D);var j=C.parentElement;j&&j.nodeName.toLowerCase()==="pre"&&w.util.setLanguage(j,D);var re=C.textContent,Z={element:C,language:D,grammar:I,code:re};function U(se){Z.highlightedCode=se,w.hooks.run("before-insert",Z),Z.element.innerHTML=Z.highlightedCode,w.hooks.run("after-highlight",Z),w.hooks.run("complete",Z),N&&N.call(Z.element)}if(w.hooks.run("before-sanity-check",Z),j=Z.element.parentElement,j&&j.nodeName.toLowerCase()==="pre"&&!j.hasAttribute("tabindex")&&j.setAttribute("tabindex","0"),!Z.code){w.hooks.run("complete",Z),N&&N.call(Z.element);return}if(w.hooks.run("before-highlight",Z),!Z.grammar){U(w.util.encode(Z.code));return}if(_&&o.Worker){var fe=new Worker(w.filename);fe.onmessage=function(se){U(se.data)},fe.postMessage(JSON.stringify({language:Z.language,code:Z.code,immediateClose:!0}))}else U(w.highlight(Z.code,Z.grammar,Z.language))},highlight:function(C,_,N){var D={code:C,grammar:_,language:N};if(w.hooks.run("before-tokenize",D),!D.grammar)throw new Error('The language "'+D.language+'" has no grammar.');return D.tokens=w.tokenize(D.code,D.grammar),w.hooks.run("after-tokenize",D),z.stringify(w.util.encode(D.tokens),D.language)},tokenize:function(C,_){var N=_.rest;if(N){for(var D in N)_[D]=N[D];delete _.rest}var I=new K;return ne(I,I.head,C),G(C,I,_,I.head,0),xe(I)},hooks:{all:{},add:function(C,_){var N=w.hooks.all;N[C]=N[C]||[],N[C].push(_)},run:function(C,_){var N=w.hooks.all[C];if(!(!N||!N.length))for(var D=0,I;I=N[D++];)I(_)}},Token:z};o.Prism=w;function z(C,_,N,D){this.type=C,this.content=_,this.alias=N,this.length=(D||"").length|0}z.stringify=function C(_,N){if(typeof _=="string")return _;if(Array.isArray(_)){var D="";return _.forEach(function(U){D+=C(U,N)}),D}var I={type:_.type,content:C(_.content,N),tag:"span",classes:["token",_.type],attributes:{},language:N},j=_.alias;j&&(Array.isArray(j)?Array.prototype.push.apply(I.classes,j):I.classes.push(j)),w.hooks.run("wrap",I);var re="";for(var Z in I.attributes)re+=" "+Z+'="'+(I.attributes[Z]||"").replace(/"/g,""")+'"';return"<"+I.tag+' class="'+I.classes.join(" ")+'"'+re+">"+I.content+""};function L(C,_,N,D){C.lastIndex=_;var I=C.exec(N);if(I&&D&&I[1]){var j=I[1].length;I.index+=j,I[0]=I[0].slice(j)}return I}function G(C,_,N,D,I,j){for(var re in N)if(!(!N.hasOwnProperty(re)||!N[re])){var Z=N[re];Z=Array.isArray(Z)?Z:[Z];for(var U=0;U=j.reach);Te+=He.value.length,He=He.next){var Mt=He.value;if(_.length>C.length)return;if(!(Mt instanceof z)){var J=1,Ze;if(Pe){if(Ze=L(o0,Te,C,ze),!Ze||Ze.index>=C.length)break;var at=Ze.index,Fe=Ze.index+Ze[0].length,Ve=Te;for(Ve+=He.value.length;at>=Ve;)He=He.next,Ve+=He.value.length;if(Ve-=He.value.length,Te=Ve,He.value instanceof z)continue;for(var pt=He;pt!==_.tail&&(Vej.reach&&(j.reach=gt);var it=He.prev;Vt&&(it=ne(_,it,Vt),Te+=Vt.length),V(_,it,J);var c0=new z(re,se?w.tokenize(Ct,se):Ct,V0,Ct);if(He=ne(_,it,c0),u0&&ne(_,He,u0),J>1){var Lt={cause:re+","+U,reach:gt};G(C,_,N,He.prev,Te,Lt),j&&Lt.reach>j.reach&&(j.reach=Lt.reach)}}}}}}function K(){var C={value:null,prev:null,next:null},_={value:null,prev:C,next:null};C.next=_,this.head=C,this.tail=_,this.length=0}function ne(C,_,N){var D=_.next,I={value:N,prev:_,next:D};return _.next=I,D.prev=I,C.length++,I}function V(C,_,N){for(var D=_.next,I=0;I/,greedy:!0},prolog:{pattern:/<\?[\s\S]+?\?>/,greedy:!0},doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(^[^\[]*\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/i,name:/[^\s<>'"]+/}},cdata:{pattern://i,greedy:!0},tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"special-attr":[],"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},{pattern:/^(\s*)["']|["']$/,lookbehind:!0}]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},s.languages.markup.tag.inside["attr-value"].inside.entity=s.languages.markup.entity,s.languages.markup.doctype.inside["internal-subset"].inside=s.languages.markup,s.hooks.add("wrap",function(o){o.type==="entity"&&(o.attributes.title=o.content.replace(/&/,"&"))}),Object.defineProperty(s.languages.markup.tag,"addInlined",{value:function(m,p){var x={};x["language-"+p]={pattern:/(^$)/i,lookbehind:!0,inside:s.languages[p]},x.cdata=/^$/i;var w={"included-cdata":{pattern://i,inside:x}};w["language-"+p]={pattern:/[\s\S]+/,inside:s.languages[p]};var z={};z[m]={pattern:RegExp(/(<__[^>]*>)(?:))*\]\]>|(?!)/.source.replace(/__/g,function(){return m}),"i"),lookbehind:!0,greedy:!0,inside:w},s.languages.insertBefore("markup","cdata",z)}}),Object.defineProperty(s.languages.markup.tag,"addAttribute",{value:function(o,m){s.languages.markup.tag.inside["special-attr"].push({pattern:RegExp(/(^|["'\s])/.source+"(?:"+o+")"+/\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))/.source,"i"),lookbehind:!0,inside:{"attr-name":/^[^\s=]+/,"attr-value":{pattern:/=[\s\S]+/,inside:{value:{pattern:/(^=\s*(["']|(?!["'])))\S[\s\S]*(?=\2$)/,lookbehind:!0,alias:[m,"language-"+m],inside:s.languages[m]},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}}}})}}),s.languages.html=s.languages.markup,s.languages.mathml=s.languages.markup,s.languages.svg=s.languages.markup,s.languages.xml=s.languages.extend("markup",{}),s.languages.ssml=s.languages.xml,s.languages.atom=s.languages.xml,s.languages.rss=s.languages.xml,function(o){var m=/(?:"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n])*')/;o.languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:RegExp("@[\\w-](?:"+/[^;{\s"']|\s+(?!\s)/.source+"|"+m.source+")*?"+/(?:;|(?=\s*\{))/.source),inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+m.source+"|"+/(?:[^\\\r\n()"']|\\[\s\S])*/.source+")\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+m.source+"$"),alias:"url"}}},selector:{pattern:RegExp(`(^|[{}\\s])[^{}\\s](?:[^{};"'\\s]|\\s+(?![\\s{])|`+m.source+")*(?=\\s*\\{)"),lookbehind:!0},string:{pattern:m,greedy:!0},property:{pattern:/(^|[^-\w\xA0-\uFFFF])(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,lookbehind:!0},important:/!important\b/i,function:{pattern:/(^|[^-a-z0-9])[-a-z0-9]+(?=\()/i,lookbehind:!0},punctuation:/[(){};:,]/},o.languages.css.atrule.inside.rest=o.languages.css;var p=o.languages.markup;p&&(p.tag.addInlined("style","css"),p.tag.addAttribute("style","css"))}(s),s.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/(\b(?:class|extends|implements|instanceof|interface|new|trait)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:break|catch|continue|do|else|finally|for|function|if|in|instanceof|new|null|return|throw|try|while)\b/,boolean:/\b(?:false|true)\b/,function:/\b\w+(?=\()/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/,punctuation:/[{}[\];(),.:]/},s.languages.javascript=s.languages.extend("clike",{"class-name":[s.languages.clike["class-name"],{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$A-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\.(?:constructor|prototype))/,lookbehind:!0}],keyword:[{pattern:/((?:^|\})\s*)catch\b/,lookbehind:!0},{pattern:/(^|[^.]|\.\.\.\s*)\b(?:as|assert(?=\s*\{)|async(?=\s*(?:function\b|\(|[$\w\xA0-\uFFFF]|$))|await|break|case|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally(?=\s*(?:\{|$))|for|from(?=\s*(?:['"]|$))|function|(?:get|set)(?=\s*(?:[#\[$\w\xA0-\uFFFF]|$))|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)\b/,lookbehind:!0}],function:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*(?:\.\s*(?:apply|bind|call)\s*)?\()/,number:{pattern:RegExp(/(^|[^\w$])/.source+"(?:"+(/NaN|Infinity/.source+"|"+/0[bB][01]+(?:_[01]+)*n?/.source+"|"+/0[oO][0-7]+(?:_[0-7]+)*n?/.source+"|"+/0[xX][\dA-Fa-f]+(?:_[\dA-Fa-f]+)*n?/.source+"|"+/\d+(?:_\d+)*n/.source+"|"+/(?:\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\.\d+(?:_\d+)*)(?:[Ee][+-]?\d+(?:_\d+)*)?/.source)+")"+/(?![\w$])/.source),lookbehind:!0},operator:/--|\+\+|\*\*=?|=>|&&=?|\|\|=?|[!=]==|<<=?|>>>?=?|[-+*/%&|^!=<>]=?|\.{3}|\?\?=?|\?\.?|[~:]/}),s.languages.javascript["class-name"][0].pattern=/(\b(?:class|extends|implements|instanceof|interface|new)\s+)[\w.\\]+/,s.languages.insertBefore("javascript","keyword",{regex:{pattern:RegExp(/((?:^|[^$\w\xA0-\uFFFF."'\])\s]|\b(?:return|yield))\s*)/.source+/\//.source+"(?:"+/(?:\[(?:[^\]\\\r\n]|\\.)*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}/.source+"|"+/(?:\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.)*\])*\])*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}v[dgimyus]{0,7}/.source+")"+/(?=(?:\s|\/\*(?:[^*]|\*(?!\/))*\*\/)*(?:$|[\r\n,.;:})\]]|\/\/))/.source),lookbehind:!0,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:s.languages.regex},"regex-delimiter":/^\/|\/$/,"regex-flags":/^[a-z]+$/}},"function-variable":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"},parameter:[{pattern:/(function(?:\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)?\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\))/,lookbehind:!0,inside:s.languages.javascript},{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=>)/i,lookbehind:!0,inside:s.languages.javascript},{pattern:/(\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*=>)/,lookbehind:!0,inside:s.languages.javascript},{pattern:/((?:\b|\s|^)(?!(?:as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)(?![$\w\xA0-\uFFFF]))(?:(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*)\(\s*|\]\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*\{)/,lookbehind:!0,inside:s.languages.javascript}],constant:/\b[A-Z](?:[A-Z_]|\dx?)*\b/}),s.languages.insertBefore("javascript","string",{hashbang:{pattern:/^#!.*/,greedy:!0,alias:"comment"},"template-string":{pattern:/`(?:\\[\s\S]|\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}|(?!\$\{)[^\\`])*`/,greedy:!0,inside:{"template-punctuation":{pattern:/^`|`$/,alias:"string"},interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}/,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:s.languages.javascript}},string:/[\s\S]+/}},"string-property":{pattern:/((?:^|[,{])[ \t]*)(["'])(?:\\(?:\r\n|[\s\S])|(?!\2)[^\\\r\n])*\2(?=\s*:)/m,lookbehind:!0,greedy:!0,alias:"property"}}),s.languages.insertBefore("javascript","operator",{"literal-property":{pattern:/((?:^|[,{])[ \t]*)(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*:)/m,lookbehind:!0,alias:"property"}}),s.languages.markup&&(s.languages.markup.tag.addInlined("script","javascript"),s.languages.markup.tag.addAttribute(/on(?:abort|blur|change|click|composition(?:end|start|update)|dblclick|error|focus(?:in|out)?|key(?:down|up)|load|mouse(?:down|enter|leave|move|out|over|up)|reset|resize|scroll|select|slotchange|submit|unload|wheel)/.source,"javascript")),s.languages.js=s.languages.javascript,function(){if(typeof s>"u"||typeof document>"u")return;Element.prototype.matches||(Element.prototype.matches=Element.prototype.msMatchesSelector||Element.prototype.webkitMatchesSelector);var o="Loading…",m=function(ge,le){return"✖ Error "+ge+" while fetching file: "+le},p="✖ Error: File does not exist or is empty",x={js:"javascript",py:"python",rb:"ruby",ps1:"powershell",psm1:"powershell",sh:"bash",bat:"batch",h:"c",tex:"latex"},w="data-src-status",z="loading",L="loaded",G="failed",K="pre[data-src]:not(["+w+'="'+L+'"]):not(['+w+'="'+z+'"])';function ne(ge,le,q){var C=new XMLHttpRequest;C.open("GET",ge,!0),C.onreadystatechange=function(){C.readyState==4&&(C.status<400&&C.responseText?le(C.responseText):C.status>=400?q(m(C.status,C.statusText)):q(p))},C.send(null)}function V(ge){var le=/^\s*(\d+)\s*(?:(,)\s*(?:(\d+)\s*)?)?$/.exec(ge||"");if(le){var q=Number(le[1]),C=le[2],_=le[3];return C?_?[q,Number(_)]:[q,void 0]:[q,q]}}s.hooks.add("before-highlightall",function(ge){ge.selector+=", "+K}),s.hooks.add("before-sanity-check",function(ge){var le=ge.element;if(le.matches(K)){ge.code="",le.setAttribute(w,z);var q=le.appendChild(document.createElement("CODE"));q.textContent=o;var C=le.getAttribute("data-src"),_=ge.language;if(_==="none"){var N=(/\.(\w+)$/.exec(C)||[,"none"])[1];_=x[N]||N}s.util.setLanguage(q,_),s.util.setLanguage(le,_);var D=s.plugins.autoloader;D&&D.loadLanguages(_),ne(C,function(I){le.setAttribute(w,L);var j=V(le.getAttribute("data-range"));if(j){var re=I.split(/\r\n?|\n/g),Z=j[0],U=j[1]==null?re.length:j[1];Z<0&&(Z+=re.length),Z=Math.max(0,Math.min(Z-1,re.length)),U<0&&(U+=re.length),U=Math.max(0,Math.min(U,re.length)),I=re.slice(Z,U).join(` -`),le.hasAttribute("data-start")||le.setAttribute("data-start",String(Z+1))}q.textContent=I,s.highlightElement(q)},function(I){le.setAttribute(w,G),q.textContent=I})}}),s.plugins.fileHighlight={highlight:function(le){for(var q=(le||document).querySelectorAll(K),C=0,_;_=q[C++];)s.highlightElement(_)}};var xe=!1;s.fileHighlight=function(){xe||(console.warn("Prism.fileHighlight is deprecated. Use `Prism.plugins.fileHighlight.highlight` instead."),xe=!0),s.plugins.fileHighlight.highlight.apply(this,arguments)}}()})(il);var bo=il.exports;const En=Gi(bo);Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/};Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python;Prism.languages.py=Prism.languages.python;(function(v){var i=/\\(?:[^a-z()[\]]|[a-z*]+)/i,s={"equation-command":{pattern:i,alias:"regex"}};v.languages.latex={comment:/%.*/,cdata:{pattern:/(\\begin\{((?:lstlisting|verbatim)\*?)\})[\s\S]*?(?=\\end\{\2\})/,lookbehind:!0},equation:[{pattern:/\$\$(?:\\[\s\S]|[^\\$])+\$\$|\$(?:\\[\s\S]|[^\\$])+\$|\\\([\s\S]*?\\\)|\\\[[\s\S]*?\\\]/,inside:s,alias:"string"},{pattern:/(\\begin\{((?:align|eqnarray|equation|gather|math|multline)\*?)\})[\s\S]*?(?=\\end\{\2\})/,lookbehind:!0,inside:s,alias:"string"}],keyword:{pattern:/(\\(?:begin|cite|documentclass|end|label|ref|usepackage)(?:\[[^\]]+\])?\{)[^}]+(?=\})/,lookbehind:!0},url:{pattern:/(\\url\{)[^}]+(?=\})/,lookbehind:!0},headline:{pattern:/(\\(?:chapter|frametitle|paragraph|part|section|subparagraph|subsection|subsubparagraph|subsubsection|subsubsubparagraph)\*?(?:\[[^\]]+\])?\{)[^}]+(?=\})/,lookbehind:!0,alias:"class-name"},function:{pattern:i,alias:"selector"},punctuation:/[[\]{}&]/},v.languages.tex=v.languages.latex,v.languages.context=v.languages.latex})(Prism);const yo=``,xo=``,xi=``,ll=/[&<>"']/,wo=new RegExp(ll.source,"g"),sl=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,ko=new RegExp(sl.source,"g"),So={"&":"&","<":"<",">":">",'"':""","'":"'"},wi=v=>So[v]||"";function Bn(v,i){if(i){if(ll.test(v))return v.replace(wo,wi)}else if(sl.test(v))return v.replace(ko,wi);return v}const Ao={code(v,i,s){const o=(i??"").match(/\S*/)?.[0]??"";if(this.options.highlight){const m=this.options.highlight(v,o);m!=null&&m!==v&&(s=!0,v=m)}return v=v.replace(/\n$/,"")+` -`,o?'
    '+xi+(s?v:Bn(v,!0))+`
    -`:"
    "+xi+(s?v:Bn(v,!0))+`
    -`}};he.use({gfm:!0,breaks:!0,pedantic:!1,headerIds:!1,mangle:!1},mo({highlight:(v,i)=>En.languages[i]?En.highlight(v,En.languages[i],i):v}),{renderer:Ao});function To(v){v.addEventListener("click",i);async function i(s){const o=s.composedPath(),[m]=o.filter(p=>p?.tagName==="BUTTON"&&p.classList.contains("copy_code_button"));if(m){let p=function(L){L.style.opacity="1",setTimeout(()=>{L.style.opacity="0"},2e3)};s.stopImmediatePropagation();const x=m.parentElement.innerText.trim(),w=Array.from(m.children)[1];await Mo(x)&&p(w)}}return{destroy(){v.removeEventListener("click",i)}}}async function Mo(v){let i=!1;if("clipboard"in navigator)await navigator.clipboard.writeText(v),i=!0;else{const s=document.createElement("textarea");s.value=v,s.style.position="absolute",s.style.left="-999999px",document.body.prepend(s),s.select();try{document.execCommand("copy"),i=!0}catch(o){console.error(o),i=!1}finally{s.remove()}}return i}const zo=async v=>(await Promise.all(v.map(async s=>await Promise.all(s.map(async(o,m)=>{if(o===null)return"";let p=m===0?"😃":"🤖",x="";if(typeof o=="string")x=o;else{const w=await Gs(o.data,"url");o.mime_type?.includes("audio")?x=``:o.mime_type?.includes("video")?x=w:o.mime_type?.includes("image")&&(x=``)}return`${p}: ${x}`}))))).map(s=>s.join(s[0]!==""&&s[1]!==""?` -`:"")).join(` -`);/*! @license DOMPurify 3.0.3 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.3/LICENSE */const{entries:ol,setPrototypeOf:ki,isFrozen:_o,getPrototypeOf:Eo,getOwnPropertyDescriptor:Bo}=Object;let{freeze:nt,seal:Bt,create:Co}=Object,{apply:On,construct:qn}=typeof Reflect<"u"&&Reflect;On||(On=function(i,s,o){return i.apply(s,o)});nt||(nt=function(i){return i});Bt||(Bt=function(i){return i});qn||(qn=function(i,s){return new i(...s)});const Do=Tt(Array.prototype.forEach),Si=Tt(Array.prototype.pop),rr=Tt(Array.prototype.push),Fr=Tt(String.prototype.toLowerCase),Cn=Tt(String.prototype.toString),No=Tt(String.prototype.match),Et=Tt(String.prototype.replace),Ro=Tt(String.prototype.indexOf),Fo=Tt(String.prototype.trim),ft=Tt(RegExp.prototype.test),nr=Io(TypeError);function Tt(v){return function(i){for(var s=arguments.length,o=new Array(s>1?s-1:0),m=1;m/gm),Ho=Bt(/\${[\w\W]*}/gm),Uo=Bt(/^data-[\-\w.\u00B7-\uFFFF]/),Go=Bt(/^aria-[\-\w]+$/),ul=Bt(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),Vo=Bt(/^(?:\w+script|data):/i),Wo=Bt(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),cl=Bt(/^html$/i);var _i=Object.freeze({__proto__:null,MUSTACHE_EXPR:qo,ERB_EXPR:Po,TMPLIT_EXPR:Ho,DATA_ATTR:Uo,ARIA_ATTR:Go,IS_ALLOWED_URI:ul,IS_SCRIPT_OR_DATA:Vo,ATTR_WHITESPACE:Wo,DOCTYPE_NAME:cl});const Yo=()=>typeof window>"u"?null:window,jo=function(i,s){if(typeof i!="object"||typeof i.createPolicy!="function")return null;let o=null;const m="data-tt-policy-suffix";s&&s.hasAttribute(m)&&(o=s.getAttribute(m));const p="dompurify"+(o?"#"+o:"");try{return i.createPolicy(p,{createHTML(x){return x},createScriptURL(x){return x}})}catch{return console.warn("TrustedTypes policy "+p+" could not be created."),null}};function hl(){let v=arguments.length>0&&arguments[0]!==void 0?arguments[0]:Yo();const i=oe=>hl(oe);if(i.version="3.0.3",i.removed=[],!v||!v.document||v.document.nodeType!==9)return i.isSupported=!1,i;const s=v.document,o=s.currentScript;let{document:m}=v;const{DocumentFragment:p,HTMLTemplateElement:x,Node:w,Element:z,NodeFilter:L,NamedNodeMap:G=v.NamedNodeMap||v.MozNamedAttrMap,HTMLFormElement:K,DOMParser:ne,trustedTypes:V}=v,xe=z.prototype,ge=Dr(xe,"cloneNode"),le=Dr(xe,"nextSibling"),q=Dr(xe,"childNodes"),C=Dr(xe,"parentNode");if(typeof x=="function"){const oe=m.createElement("template");oe.content&&oe.content.ownerDocument&&(m=oe.content.ownerDocument)}let _,N="";const{implementation:D,createNodeIterator:I,createDocumentFragment:j,getElementsByTagName:re}=m,{importNode:Z}=s;let U={};i.isSupported=typeof ol=="function"&&typeof C=="function"&&D&&D.createHTMLDocument!==void 0;const{MUSTACHE_EXPR:fe,ERB_EXPR:se,TMPLIT_EXPR:ze,DATA_ATTR:Pe,ARIA_ATTR:V0,IS_SCRIPT_OR_DATA:W0,ATTR_WHITESPACE:o0}=_i;let{IS_ALLOWED_URI:He}=_i,Te=null;const Mt=ye({},[...Ai,...Dn,...Nn,...Rn,...Ti]);let J=null;const Ze=ye({},[...Mi,...Fn,...zi,...Nr]);let Fe=Object.seal(Object.create(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ve=null,pt=null,at=!0,Ct=!0,Vt=!1,u0=!0,gt=!1,it=!1,c0=!1,Lt=!1,Wt=!1,k0=!1,h0=!1,ar=!0,Yt=!1;const vt="user-content-";let jt=!0,Xt=!1,$t={},Dt=null;const S0=ye({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let ir=null;const lr=ye({},["audio","video","img","source","image","track"]);let A0=null;const Y0=ye({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),m0="http://www.w3.org/1998/Math/MathML",T0="http://www.w3.org/2000/svg",ht="http://www.w3.org/1999/xhtml";let Zt=ht,M0=!1,Ee=null;const X=ye({},[m0,T0,ht],Cn);let Je;const sr=["application/xhtml+xml","text/html"],or="text/html";let Ue,bt=null;const j0=m.createElement("form"),ur=function(A){return A instanceof RegExp||A instanceof Function},X0=function(A){if(!(bt&&bt===A)){if((!A||typeof A!="object")&&(A={}),A=L0(A),Je=sr.indexOf(A.PARSER_MEDIA_TYPE)===-1?Je=or:Je=A.PARSER_MEDIA_TYPE,Ue=Je==="application/xhtml+xml"?Cn:Fr,Te="ALLOWED_TAGS"in A?ye({},A.ALLOWED_TAGS,Ue):Mt,J="ALLOWED_ATTR"in A?ye({},A.ALLOWED_ATTR,Ue):Ze,Ee="ALLOWED_NAMESPACES"in A?ye({},A.ALLOWED_NAMESPACES,Cn):X,A0="ADD_URI_SAFE_ATTR"in A?ye(L0(Y0),A.ADD_URI_SAFE_ATTR,Ue):Y0,ir="ADD_DATA_URI_TAGS"in A?ye(L0(lr),A.ADD_DATA_URI_TAGS,Ue):lr,Dt="FORBID_CONTENTS"in A?ye({},A.FORBID_CONTENTS,Ue):S0,Ve="FORBID_TAGS"in A?ye({},A.FORBID_TAGS,Ue):{},pt="FORBID_ATTR"in A?ye({},A.FORBID_ATTR,Ue):{},$t="USE_PROFILES"in A?A.USE_PROFILES:!1,at=A.ALLOW_ARIA_ATTR!==!1,Ct=A.ALLOW_DATA_ATTR!==!1,Vt=A.ALLOW_UNKNOWN_PROTOCOLS||!1,u0=A.ALLOW_SELF_CLOSE_IN_ATTR!==!1,gt=A.SAFE_FOR_TEMPLATES||!1,it=A.WHOLE_DOCUMENT||!1,Wt=A.RETURN_DOM||!1,k0=A.RETURN_DOM_FRAGMENT||!1,h0=A.RETURN_TRUSTED_TYPE||!1,Lt=A.FORCE_BODY||!1,ar=A.SANITIZE_DOM!==!1,Yt=A.SANITIZE_NAMED_PROPS||!1,jt=A.KEEP_CONTENT!==!1,Xt=A.IN_PLACE||!1,He=A.ALLOWED_URI_REGEXP||ul,Zt=A.NAMESPACE||ht,Fe=A.CUSTOM_ELEMENT_HANDLING||{},A.CUSTOM_ELEMENT_HANDLING&&ur(A.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(Fe.tagNameCheck=A.CUSTOM_ELEMENT_HANDLING.tagNameCheck),A.CUSTOM_ELEMENT_HANDLING&&ur(A.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(Fe.attributeNameCheck=A.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),A.CUSTOM_ELEMENT_HANDLING&&typeof A.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements=="boolean"&&(Fe.allowCustomizedBuiltInElements=A.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),gt&&(Ct=!1),k0&&(Wt=!0),$t&&(Te=ye({},[...Ti]),J=[],$t.html===!0&&(ye(Te,Ai),ye(J,Mi)),$t.svg===!0&&(ye(Te,Dn),ye(J,Fn),ye(J,Nr)),$t.svgFilters===!0&&(ye(Te,Nn),ye(J,Fn),ye(J,Nr)),$t.mathMl===!0&&(ye(Te,Rn),ye(J,zi),ye(J,Nr))),A.ADD_TAGS&&(Te===Mt&&(Te=L0(Te)),ye(Te,A.ADD_TAGS,Ue)),A.ADD_ATTR&&(J===Ze&&(J=L0(J)),ye(J,A.ADD_ATTR,Ue)),A.ADD_URI_SAFE_ATTR&&ye(A0,A.ADD_URI_SAFE_ATTR,Ue),A.FORBID_CONTENTS&&(Dt===S0&&(Dt=L0(Dt)),ye(Dt,A.FORBID_CONTENTS,Ue)),jt&&(Te["#text"]=!0),it&&ye(Te,["html","head","body"]),Te.table&&(ye(Te,["tbody"]),delete Ve.tbody),A.TRUSTED_TYPES_POLICY){if(typeof A.TRUSTED_TYPES_POLICY.createHTML!="function")throw nr('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if(typeof A.TRUSTED_TYPES_POLICY.createScriptURL!="function")throw nr('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');_=A.TRUSTED_TYPES_POLICY,N=_.createHTML("")}else _===void 0&&(_=jo(V,o)),_!==null&&typeof N=="string"&&(N=_.createHTML(""));nt&&nt(A),bt=A}},tt=ye({},["mi","mo","mn","ms","mtext"]),yt=ye({},["foreignobject","desc","title","annotation-xml"]),Nt=ye({},["title","style","font","a","script"]),Kt=ye({},Dn);ye(Kt,Nn),ye(Kt,Lo);const z0=ye({},Rn);ye(z0,Oo);const Hr=function(A){let P=C(A);(!P||!P.tagName)&&(P={namespaceURI:Zt,tagName:"template"});const $=Fr(A.tagName),we=Fr(P.tagName);return Ee[A.namespaceURI]?A.namespaceURI===T0?P.namespaceURI===ht?$==="svg":P.namespaceURI===m0?$==="svg"&&(we==="annotation-xml"||tt[we]):!!Kt[$]:A.namespaceURI===m0?P.namespaceURI===ht?$==="math":P.namespaceURI===T0?$==="math"&&yt[we]:!!z0[$]:A.namespaceURI===ht?P.namespaceURI===T0&&!yt[we]||P.namespaceURI===m0&&!tt[we]?!1:!z0[$]&&(Nt[$]||!Kt[$]):!!(Je==="application/xhtml+xml"&&Ee[A.namespaceURI]):!1},Ot=function(A){rr(i.removed,{element:A});try{A.parentNode.removeChild(A)}catch{A.remove()}},$0=function(A,P){try{rr(i.removed,{attribute:P.getAttributeNode(A),from:P})}catch{rr(i.removed,{attribute:null,from:P})}if(P.removeAttribute(A),A==="is"&&!J[A])if(Wt||k0)try{Ot(P)}catch{}else try{P.setAttribute(A,"")}catch{}},d0=function(A){let P,$;if(Lt)A=""+A;else{const qe=No(A,/^[\r\n\t ]+/);$=qe&&qe[0]}Je==="application/xhtml+xml"&&Zt===ht&&(A=''+A+"");const we=_?_.createHTML(A):A;if(Zt===ht)try{P=new ne().parseFromString(we,Je)}catch{}if(!P||!P.documentElement){P=D.createDocument(Zt,"template",null);try{P.documentElement.innerHTML=M0?N:we}catch{}}const k=P.body||P.documentElement;return A&&$&&k.insertBefore(m.createTextNode($),k.childNodes[0]||null),Zt===ht?re.call(P,it?"html":"body")[0]:it?P.documentElement:k},Ne=function(A){return I.call(A.ownerDocument||A,A,L.SHOW_ELEMENT|L.SHOW_COMMENT|L.SHOW_TEXT,null,!1)},l=function(A){return A instanceof K&&(typeof A.nodeName!="string"||typeof A.textContent!="string"||typeof A.removeChild!="function"||!(A.attributes instanceof G)||typeof A.removeAttribute!="function"||typeof A.setAttribute!="function"||typeof A.namespaceURI!="string"||typeof A.insertBefore!="function"||typeof A.hasChildNodes!="function")},h=function(A){return typeof w=="object"?A instanceof w:A&&typeof A=="object"&&typeof A.nodeType=="number"&&typeof A.nodeName=="string"},H=function(A,P,$){U[A]&&Do(U[A],we=>{we.call(i,P,$,bt)})},f=function(A){let P;if(H("beforeSanitizeElements",A,null),l(A))return Ot(A),!0;const $=Ue(A.nodeName);if(H("uponSanitizeElement",A,{tagName:$,allowedTags:Te}),A.hasChildNodes()&&!h(A.firstElementChild)&&(!h(A.content)||!h(A.content.firstElementChild))&&ft(/<[/\w]/g,A.innerHTML)&&ft(/<[/\w]/g,A.textContent))return Ot(A),!0;if(!Te[$]||Ve[$]){if(!Ve[$]&&Be($)&&(Fe.tagNameCheck instanceof RegExp&&ft(Fe.tagNameCheck,$)||Fe.tagNameCheck instanceof Function&&Fe.tagNameCheck($)))return!1;if(jt&&!Dt[$]){const we=C(A)||A.parentNode,k=q(A)||A.childNodes;if(k&&we){const qe=k.length;for(let M=qe-1;M>=0;--M)we.insertBefore(ge(k[M],!0),le(A))}}return Ot(A),!0}return A instanceof z&&!Hr(A)||($==="noscript"||$==="noembed")&&ft(/<\/no(script|embed)/i,A.innerHTML)?(Ot(A),!0):(gt&&A.nodeType===3&&(P=A.textContent,P=Et(P,fe," "),P=Et(P,se," "),P=Et(P,ze," "),A.textContent!==P&&(rr(i.removed,{element:A.cloneNode()}),A.textContent=P)),H("afterSanitizeElements",A,null),!1)},S=function(A,P,$){if(ar&&(P==="id"||P==="name")&&($ in m||$ in j0))return!1;if(!(Ct&&!pt[P]&&ft(Pe,P))){if(!(at&&ft(V0,P))){if(!J[P]||pt[P]){if(!(Be(A)&&(Fe.tagNameCheck instanceof RegExp&&ft(Fe.tagNameCheck,A)||Fe.tagNameCheck instanceof Function&&Fe.tagNameCheck(A))&&(Fe.attributeNameCheck instanceof RegExp&&ft(Fe.attributeNameCheck,P)||Fe.attributeNameCheck instanceof Function&&Fe.attributeNameCheck(P))||P==="is"&&Fe.allowCustomizedBuiltInElements&&(Fe.tagNameCheck instanceof RegExp&&ft(Fe.tagNameCheck,$)||Fe.tagNameCheck instanceof Function&&Fe.tagNameCheck($))))return!1}else if(!A0[P]){if(!ft(He,Et($,o0,""))){if(!((P==="src"||P==="xlink:href"||P==="href")&&A!=="script"&&Ro($,"data:")===0&&ir[A])){if(!(Vt&&!ft(W0,Et($,o0,"")))){if($)return!1}}}}}}return!0},Be=function(A){return A.indexOf("-")>0},te=function(A){let P,$,we,k;H("beforeSanitizeAttributes",A,null);const{attributes:qe}=A;if(!qe)return;const M={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:J};for(k=qe.length;k--;){P=qe[k];const{name:mt,namespaceURI:_0}=P;if($=mt==="value"?P.value:Fo(P.value),we=Ue(mt),M.attrName=we,M.attrValue=$,M.keepAttr=!0,M.forceKeepAttr=void 0,H("uponSanitizeAttribute",A,M),$=M.attrValue,M.forceKeepAttr||($0(mt,A),!M.keepAttr))continue;if(!u0&&ft(/\/>/i,$)){$0(mt,A);continue}gt&&($=Et($,fe," "),$=Et($,se," "),$=Et($,ze," "));const E0=Ue(A.nodeName);if(S(E0,we,$)){if(Yt&&(we==="id"||we==="name")&&($0(mt,A),$=vt+$),_&&typeof V=="object"&&typeof V.getAttributeType=="function"&&!_0)switch(V.getAttributeType(E0,we)){case"TrustedHTML":{$=_.createHTML($);break}case"TrustedScriptURL":{$=_.createScriptURL($);break}}try{_0?A.setAttributeNS(_0,mt,$):A.setAttribute(mt,$),Si(i.removed)}catch{}}}H("afterSanitizeAttributes",A,null)},Ke=function oe(A){let P;const $=Ne(A);for(H("beforeSanitizeShadowDOM",A,null);P=$.nextNode();)H("uponSanitizeShadowNode",P,null),!f(P)&&(P.content instanceof p&&oe(P.content),te(P));H("afterSanitizeShadowDOM",A,null)};return i.sanitize=function(oe){let A=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},P,$,we,k;if(M0=!oe,M0&&(oe=""),typeof oe!="string"&&!h(oe))if(typeof oe.toString=="function"){if(oe=oe.toString(),typeof oe!="string")throw nr("dirty is not a string, aborting")}else throw nr("toString is not a function");if(!i.isSupported)return oe;if(c0||X0(A),i.removed=[],typeof oe=="string"&&(Xt=!1),Xt){if(oe.nodeName){const mt=Ue(oe.nodeName);if(!Te[mt]||Ve[mt])throw nr("root node is forbidden and cannot be sanitized in-place")}}else if(oe instanceof w)P=d0(""),$=P.ownerDocument.importNode(oe,!0),$.nodeType===1&&$.nodeName==="BODY"||$.nodeName==="HTML"?P=$:P.appendChild($);else{if(!Wt&&!gt&&!it&&oe.indexOf("<")===-1)return _&&h0?_.createHTML(oe):oe;if(P=d0(oe),!P)return Wt?null:h0?N:""}P&&Lt&&Ot(P.firstChild);const qe=Ne(Xt?oe:P);for(;we=qe.nextNode();)f(we)||(we.content instanceof p&&Ke(we.content),te(we));if(Xt)return oe;if(Wt){if(k0)for(k=j.call(P.ownerDocument);P.firstChild;)k.appendChild(P.firstChild);else k=P;return(J.shadowroot||J.shadowrootmod)&&(k=Z.call(s,k,!0)),k}let M=it?P.outerHTML:P.innerHTML;return it&&Te["!doctype"]&&P.ownerDocument&&P.ownerDocument.doctype&&P.ownerDocument.doctype.name&&ft(cl,P.ownerDocument.doctype.name)&&(M=" -`+M),gt&&(M=Et(M,fe," "),M=Et(M,se," "),M=Et(M,ze," ")),_&&h0?_.createHTML(M):M},i.setConfig=function(oe){X0(oe),c0=!0},i.clearConfig=function(){bt=null,c0=!1},i.isValidAttribute=function(oe,A,P){bt||X0({});const $=Ue(oe),we=Ue(A);return S($,we,P)},i.addHook=function(oe,A){typeof A=="function"&&(U[oe]=U[oe]||[],rr(U[oe],A))},i.removeHook=function(oe){if(U[oe])return Si(U[oe])},i.removeHooks=function(oe){U[oe]&&(U[oe]=[])},i.removeAllHooks=function(){U={}},i}var Ei=hl(),ml={exports:{}},In={exports:{}},Bi;function Xo(){return Bi||(Bi=1,function(v,i){(function(o,m){v.exports=m()})(typeof self<"u"?self:Ir,function(){return function(){var s={};(function(){s.d=function(u,e){for(var t in e)s.o(e,t)&&!s.o(u,t)&&Object.defineProperty(u,t,{enumerable:!0,get:e[t]})}})(),function(){s.o=function(u,e){return Object.prototype.hasOwnProperty.call(u,e)}}();var o={};s.d(o,{default:function(){return Ds}});var m=function u(e,t){this.position=void 0;var r="KaTeX parse error: "+e,n,a=t&&t.loc;if(a&&a.start<=a.end){var c=a.lexer.input;n=a.start;var d=a.end;n===c.length?r+=" at end of input: ":r+=" at position "+(n+1)+": ";var g=c.slice(n,d).replace(/[^]/g,"$&̲"),y;n>15?y="…"+c.slice(n-15,n):y=c.slice(0,n);var T;d+15":">","<":"<",'"':""","'":"'"},K=/[&><"']/g;function ne(u){return String(u).replace(K,function(e){return G[e]})}var V=function u(e){return e.type==="ordgroup"||e.type==="color"?e.body.length===1?u(e.body[0]):e:e.type==="font"?u(e.body):e},xe=function(e){var t=V(e);return t.type==="mathord"||t.type==="textord"||t.type==="atom"},ge=function(e){if(!e)throw new Error("Expected non-null, but got "+String(e));return e},le=function(e){var t=/^\s*([^\\/#]*?)(?::|�*58|�*3a)/i.exec(e);return t!=null?t[1]:"_relative"},q={contains:x,deflt:w,escape:ne,hyphenate:L,getBaseElem:V,isCharacterBox:xe,protocolFromUrl:le},C={displayMode:{type:"boolean",description:"Render math in display mode, which puts the math in display style (so \\int and \\sum are large, for example), and centers the math on the page on its own line.",cli:"-d, --display-mode"},output:{type:{enum:["htmlAndMathml","html","mathml"]},description:"Determines the markup language of the output.",cli:"-F, --format "},leqno:{type:"boolean",description:"Render display math in leqno style (left-justified tags)."},fleqn:{type:"boolean",description:"Render display math flush left."},throwOnError:{type:"boolean",default:!0,cli:"-t, --no-throw-on-error",cliDescription:"Render errors (in the color given by --error-color) instead of throwing a ParseError exception when encountering an error."},errorColor:{type:"string",default:"#cc0000",cli:"-c, --error-color ",cliDescription:"A color string given in the format 'rgb' or 'rrggbb' (no #). This option determines the color of errors rendered by the -t option.",cliProcessor:function(e){return"#"+e}},macros:{type:"object",cli:"-m, --macro ",cliDescription:"Define custom macro of the form '\\foo:expansion' (use multiple -m arguments for multiple macros).",cliDefault:[],cliProcessor:function(e,t){return t.push(e),t}},minRuleThickness:{type:"number",description:"Specifies a minimum thickness, in ems, for fraction lines, `\\sqrt` top lines, `{array}` vertical lines, `\\hline`, `\\hdashline`, `\\underline`, `\\overline`, and the borders of `\\fbox`, `\\boxed`, and `\\fcolorbox`.",processor:function(e){return Math.max(0,e)},cli:"--min-rule-thickness ",cliProcessor:parseFloat},colorIsTextColor:{type:"boolean",description:"Makes \\color behave like LaTeX's 2-argument \\textcolor, instead of LaTeX's one-argument \\color mode change.",cli:"-b, --color-is-text-color"},strict:{type:[{enum:["warn","ignore","error"]},"boolean","function"],description:"Turn on strict / LaTeX faithfulness mode, which throws an error if the input uses features that are not supported by LaTeX.",cli:"-S, --strict",cliDefault:!1},trust:{type:["boolean","function"],description:"Trust the input, enabling all HTML features such as \\url.",cli:"-T, --trust"},maxSize:{type:"number",default:1/0,description:"If non-zero, all user-specified sizes, e.g. in \\rule{500em}{500em}, will be capped to maxSize ems. Otherwise, elements and spaces can be arbitrarily large",processor:function(e){return Math.max(0,e)},cli:"-s, --max-size ",cliProcessor:parseInt},maxExpand:{type:"number",default:1e3,description:"Limit the number of macro expansions to the specified number, to prevent e.g. infinite macro loops. If set to Infinity, the macro expander will try to fully expand as in LaTeX.",processor:function(e){return Math.max(0,e)},cli:"-e, --max-expand ",cliProcessor:function(e){return e==="Infinity"?1/0:parseInt(e)}},globalGroup:{type:"boolean",cli:!1}};function _(u){if(u.default)return u.default;var e=u.type,t=Array.isArray(e)?e[0]:e;if(typeof t!="string")return t.enum[0];switch(t){case"boolean":return!1;case"string":return"";case"number":return 0;case"object":return{}}}var N=function(){function u(t){this.displayMode=void 0,this.output=void 0,this.leqno=void 0,this.fleqn=void 0,this.throwOnError=void 0,this.errorColor=void 0,this.macros=void 0,this.minRuleThickness=void 0,this.colorIsTextColor=void 0,this.strict=void 0,this.trust=void 0,this.maxSize=void 0,this.maxExpand=void 0,this.globalGroup=void 0,t=t||{};for(var r in C)if(C.hasOwnProperty(r)){var n=C[r];this[r]=t[r]!==void 0?n.processor?n.processor(t[r]):t[r]:_(n)}}var e=u.prototype;return e.reportNonstrict=function(r,n,a){var c=this.strict;if(typeof c=="function"&&(c=c(r,n,a)),!(!c||c==="ignore")){if(c===!0||c==="error")throw new p("LaTeX-incompatible input and strict mode is set to 'error': "+(n+" ["+r+"]"),a);c==="warn"?typeof console<"u"&&console.warn("LaTeX-incompatible input and strict mode is set to 'warn': "+(n+" ["+r+"]")):typeof console<"u"&&console.warn("LaTeX-incompatible input and strict mode is set to "+("unrecognized '"+c+"': "+n+" ["+r+"]"))}},e.useStrictBehavior=function(r,n,a){var c=this.strict;if(typeof c=="function")try{c=c(r,n,a)}catch{c="error"}return!c||c==="ignore"?!1:c===!0||c==="error"?!0:c==="warn"?(typeof console<"u"&&console.warn("LaTeX-incompatible input and strict mode is set to 'warn': "+(n+" ["+r+"]")),!1):(typeof console<"u"&&console.warn("LaTeX-incompatible input and strict mode is set to "+("unrecognized '"+c+"': "+n+" ["+r+"]")),!1)},e.isTrusted=function(r){r.url&&!r.protocol&&(r.protocol=q.protocolFromUrl(r.url));var n=typeof this.trust=="function"?this.trust(r):this.trust;return!!n},u}(),D=function(){function u(t,r,n){this.id=void 0,this.size=void 0,this.cramped=void 0,this.id=t,this.size=r,this.cramped=n}var e=u.prototype;return e.sup=function(){return Pe[V0[this.id]]},e.sub=function(){return Pe[W0[this.id]]},e.fracNum=function(){return Pe[o0[this.id]]},e.fracDen=function(){return Pe[He[this.id]]},e.cramp=function(){return Pe[Te[this.id]]},e.text=function(){return Pe[Mt[this.id]]},e.isTight=function(){return this.size>=2},u}(),I=0,j=1,re=2,Z=3,U=4,fe=5,se=6,ze=7,Pe=[new D(I,0,!1),new D(j,0,!0),new D(re,1,!1),new D(Z,1,!0),new D(U,2,!1),new D(fe,2,!0),new D(se,3,!1),new D(ze,3,!0)],V0=[U,fe,U,fe,se,ze,se,ze],W0=[fe,fe,fe,fe,ze,ze,ze,ze],o0=[re,Z,U,fe,se,ze,se,ze],He=[Z,Z,fe,fe,ze,ze,ze,ze],Te=[j,j,Z,Z,fe,fe,ze,ze],Mt=[I,j,re,Z,re,Z,re,Z],J={DISPLAY:Pe[I],TEXT:Pe[re],SCRIPT:Pe[U],SCRIPTSCRIPT:Pe[se]},Ze=[{name:"latin",blocks:[[256,591],[768,879]]},{name:"cyrillic",blocks:[[1024,1279]]},{name:"armenian",blocks:[[1328,1423]]},{name:"brahmic",blocks:[[2304,4255]]},{name:"georgian",blocks:[[4256,4351]]},{name:"cjk",blocks:[[12288,12543],[19968,40879],[65280,65376]]},{name:"hangul",blocks:[[44032,55215]]}];function Fe(u){for(var e=0;e=n[0]&&u<=n[1])return t.name}return null}var Ve=[];Ze.forEach(function(u){return u.blocks.forEach(function(e){return Ve.push.apply(Ve,e)})});function pt(u){for(var e=0;e=Ve[e]&&u<=Ve[e+1])return!0;return!1}var at=80,Ct=function(e,t){return"M95,"+(622+e+t)+` -c-2.7,0,-7.17,-2.7,-13.5,-8c-5.8,-5.3,-9.5,-10,-9.5,-14 -c0,-2,0.3,-3.3,1,-4c1.3,-2.7,23.83,-20.7,67.5,-54 -c44.2,-33.3,65.8,-50.3,66.5,-51c1.3,-1.3,3,-2,5,-2c4.7,0,8.7,3.3,12,10 -s173,378,173,378c0.7,0,35.3,-71,104,-213c68.7,-142,137.5,-285,206.5,-429 -c69,-144,104.5,-217.7,106.5,-221 -l`+e/2.075+" -"+e+` -c5.3,-9.3,12,-14,20,-14 -H400000v`+(40+e)+`H845.2724 -s-225.272,467,-225.272,467s-235,486,-235,486c-2.7,4.7,-9,7,-19,7 -c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z -M`+(834+e)+" "+t+"h400000v"+(40+e)+"h-400000z"},Vt=function(e,t){return"M263,"+(601+e+t)+`c0.7,0,18,39.7,52,119 -c34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120 -c340,-704.7,510.7,-1060.3,512,-1067 -l`+e/2.084+" -"+e+` -c4.7,-7.3,11,-11,19,-11 -H40000v`+(40+e)+`H1012.3 -s-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232 -c-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1 -s-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26 -c-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z -M`+(1001+e)+" "+t+"h400000v"+(40+e)+"h-400000z"},u0=function(e,t){return"M983 "+(10+e+t)+` -l`+e/3.13+" -"+e+` -c4,-6.7,10,-10,18,-10 H400000v`+(40+e)+` -H1013.1s-83.4,268,-264.1,840c-180.7,572,-277,876.3,-289,913c-4.7,4.7,-12.7,7,-24,7 -s-12,0,-12,0c-1.3,-3.3,-3.7,-11.7,-7,-25c-35.3,-125.3,-106.7,-373.3,-214,-744 -c-10,12,-21,25,-33,39s-32,39,-32,39c-6,-5.3,-15,-14,-27,-26s25,-30,25,-30 -c26.7,-32.7,52,-63,76,-91s52,-60,52,-60s208,722,208,722 -c56,-175.3,126.3,-397.3,211,-666c84.7,-268.7,153.8,-488.2,207.5,-658.5 -c53.7,-170.3,84.5,-266.8,92.5,-289.5z -M`+(1001+e)+" "+t+"h400000v"+(40+e)+"h-400000z"},gt=function(e,t){return"M424,"+(2398+e+t)+` -c-1.3,-0.7,-38.5,-172,-111.5,-514c-73,-342,-109.8,-513.3,-110.5,-514 -c0,-2,-10.7,14.3,-32,49c-4.7,7.3,-9.8,15.7,-15.5,25c-5.7,9.3,-9.8,16,-12.5,20 -s-5,7,-5,7c-4,-3.3,-8.3,-7.7,-13,-13s-13,-13,-13,-13s76,-122,76,-122s77,-121,77,-121 -s209,968,209,968c0,-2,84.7,-361.7,254,-1079c169.3,-717.3,254.7,-1077.7,256,-1081 -l`+e/4.223+" -"+e+`c4,-6.7,10,-10,18,-10 H400000 -v`+(40+e)+`H1014.6 -s-87.3,378.7,-272.6,1166c-185.3,787.3,-279.3,1182.3,-282,1185 -c-2,6,-10,9,-24,9 -c-8,0,-12,-0.7,-12,-2z M`+(1001+e)+" "+t+` -h400000v`+(40+e)+"h-400000z"},it=function(e,t){return"M473,"+(2713+e+t)+` -c339.3,-1799.3,509.3,-2700,510,-2702 l`+e/5.298+" -"+e+` -c3.3,-7.3,9.3,-11,18,-11 H400000v`+(40+e)+`H1017.7 -s-90.5,478,-276.2,1466c-185.7,988,-279.5,1483,-281.5,1485c-2,6,-10,9,-24,9 -c-8,0,-12,-0.7,-12,-2c0,-1.3,-5.3,-32,-16,-92c-50.7,-293.3,-119.7,-693.3,-207,-1200 -c0,-1.3,-5.3,8.7,-16,30c-10.7,21.3,-21.3,42.7,-32,64s-16,33,-16,33s-26,-26,-26,-26 -s76,-153,76,-153s77,-151,77,-151c0.7,0.7,35.7,202,105,604c67.3,400.7,102,602.7,104, -606zM`+(1001+e)+" "+t+"h400000v"+(40+e)+"H1017.7z"},c0=function(e){var t=e/2;return"M400000 "+e+" H0 L"+t+" 0 l65 45 L145 "+(e-80)+" H400000z"},Lt=function(e,t,r){var n=r-54-t-e;return"M702 "+(e+t)+"H400000"+(40+e)+` -H742v`+n+`l-4 4-4 4c-.667.7 -2 1.5-4 2.5s-4.167 1.833-6.5 2.5-5.5 1-9.5 1 -h-12l-28-84c-16.667-52-96.667 -294.333-240-727l-212 -643 -85 170 -c-4-3.333-8.333-7.667-13 -13l-13-13l77-155 77-156c66 199.333 139 419.667 -219 661 l218 661zM702 `+t+"H400000v"+(40+e)+"H742z"},Wt=function(e,t,r){t=1e3*t;var n="";switch(e){case"sqrtMain":n=Ct(t,at);break;case"sqrtSize1":n=Vt(t,at);break;case"sqrtSize2":n=u0(t,at);break;case"sqrtSize3":n=gt(t,at);break;case"sqrtSize4":n=it(t,at);break;case"sqrtTall":n=Lt(t,at,r)}return n},k0=function(e,t){switch(e){case"⎜":return"M291 0 H417 V"+t+" H291z M291 0 H417 V"+t+" H291z";case"∣":return"M145 0 H188 V"+t+" H145z M145 0 H188 V"+t+" H145z";case"∥":return"M145 0 H188 V"+t+" H145z M145 0 H188 V"+t+" H145z"+("M367 0 H410 V"+t+" H367z M367 0 H410 V"+t+" H367z");case"⎟":return"M457 0 H583 V"+t+" H457z M457 0 H583 V"+t+" H457z";case"⎢":return"M319 0 H403 V"+t+" H319z M319 0 H403 V"+t+" H319z";case"⎥":return"M263 0 H347 V"+t+" H263z M263 0 H347 V"+t+" H263z";case"⎪":return"M384 0 H504 V"+t+" H384z M384 0 H504 V"+t+" H384z";case"⏐":return"M312 0 H355 V"+t+" H312z M312 0 H355 V"+t+" H312z";case"‖":return"M257 0 H300 V"+t+" H257z M257 0 H300 V"+t+" H257z"+("M478 0 H521 V"+t+" H478z M478 0 H521 V"+t+" H478z");default:return""}},h0={doubleleftarrow:`M262 157 -l10-10c34-36 62.7-77 86-123 3.3-8 5-13.3 5-16 0-5.3-6.7-8-20-8-7.3 - 0-12.2.5-14.5 1.5-2.3 1-4.8 4.5-7.5 10.5-49.3 97.3-121.7 169.3-217 216-28 - 14-57.3 25-88 33-6.7 2-11 3.8-13 5.5-2 1.7-3 4.2-3 7.5s1 5.8 3 7.5 -c2 1.7 6.3 3.5 13 5.5 68 17.3 128.2 47.8 180.5 91.5 52.3 43.7 93.8 96.2 124.5 - 157.5 9.3 8 15.3 12.3 18 13h6c12-.7 18-4 18-10 0-2-1.7-7-5-15-23.3-46-52-87 --86-123l-10-10h399738v-40H218c328 0 0 0 0 0l-10-8c-26.7-20-65.7-43-117-69 2.7 --2 6-3.7 10-5 36.7-16 72.3-37.3 107-64l10-8h399782v-40z -m8 0v40h399730v-40zm0 194v40h399730v-40z`,doublerightarrow:`M399738 392l --10 10c-34 36-62.7 77-86 123-3.3 8-5 13.3-5 16 0 5.3 6.7 8 20 8 7.3 0 12.2-.5 - 14.5-1.5 2.3-1 4.8-4.5 7.5-10.5 49.3-97.3 121.7-169.3 217-216 28-14 57.3-25 88 --33 6.7-2 11-3.8 13-5.5 2-1.7 3-4.2 3-7.5s-1-5.8-3-7.5c-2-1.7-6.3-3.5-13-5.5-68 --17.3-128.2-47.8-180.5-91.5-52.3-43.7-93.8-96.2-124.5-157.5-9.3-8-15.3-12.3-18 --13h-6c-12 .7-18 4-18 10 0 2 1.7 7 5 15 23.3 46 52 87 86 123l10 10H0v40h399782 -c-328 0 0 0 0 0l10 8c26.7 20 65.7 43 117 69-2.7 2-6 3.7-10 5-36.7 16-72.3 37.3 --107 64l-10 8H0v40zM0 157v40h399730v-40zm0 194v40h399730v-40z`,leftarrow:`M400000 241H110l3-3c68.7-52.7 113.7-120 - 135-202 4-14.7 6-23 6-25 0-7.3-7-11-21-11-8 0-13.2.8-15.5 2.5-2.3 1.7-4.2 5.8 --5.5 12.5-1.3 4.7-2.7 10.3-4 17-12 48.7-34.8 92-68.5 130S65.3 228.3 18 247 -c-10 4-16 7.7-18 11 0 8.7 6 14.3 18 17 47.3 18.7 87.8 47 121.5 85S196 441.3 208 - 490c.7 2 1.3 5 2 9s1.2 6.7 1.5 8c.3 1.3 1 3.3 2 6s2.2 4.5 3.5 5.5c1.3 1 3.3 - 1.8 6 2.5s6 1 10 1c14 0 21-3.7 21-11 0-2-2-10.3-6-25-20-79.3-65-146.7-135-202 - l-3-3h399890zM100 241v40h399900v-40z`,leftbrace:`M6 548l-6-6v-35l6-11c56-104 135.3-181.3 238-232 57.3-28.7 117 --45 179-50h399577v120H403c-43.3 7-81 15-113 26-100.7 33-179.7 91-237 174-2.7 - 5-6 9-10 13-.7 1-7.3 1-20 1H6z`,leftbraceunder:`M0 6l6-6h17c12.688 0 19.313.3 20 1 4 4 7.313 8.3 10 13 - 35.313 51.3 80.813 93.8 136.5 127.5 55.688 33.7 117.188 55.8 184.5 66.5.688 - 0 2 .3 4 1 18.688 2.7 76 4.3 172 5h399450v120H429l-6-1c-124.688-8-235-61.7 --331-161C60.687 138.7 32.312 99.3 7 54L0 41V6z`,leftgroup:`M400000 80 -H435C64 80 168.3 229.4 21 260c-5.9 1.2-18 0-18 0-2 0-3-1-3-3v-38C76 61 257 0 - 435 0h399565z`,leftgroupunder:`M400000 262 -H435C64 262 168.3 112.6 21 82c-5.9-1.2-18 0-18 0-2 0-3 1-3 3v38c76 158 257 219 - 435 219h399565z`,leftharpoon:`M0 267c.7 5.3 3 10 7 14h399993v-40H93c3.3 --3.3 10.2-9.5 20.5-18.5s17.8-15.8 22.5-20.5c50.7-52 88-110.3 112-175 4-11.3 5 --18.3 3-21-1.3-4-7.3-6-18-6-8 0-13 .7-15 2s-4.7 6.7-8 16c-42 98.7-107.3 174.7 --196 228-6.7 4.7-10.7 8-12 10-1.3 2-2 5.7-2 11zm100-26v40h399900v-40z`,leftharpoonplus:`M0 267c.7 5.3 3 10 7 14h399993v-40H93c3.3-3.3 10.2-9.5 - 20.5-18.5s17.8-15.8 22.5-20.5c50.7-52 88-110.3 112-175 4-11.3 5-18.3 3-21-1.3 --4-7.3-6-18-6-8 0-13 .7-15 2s-4.7 6.7-8 16c-42 98.7-107.3 174.7-196 228-6.7 4.7 --10.7 8-12 10-1.3 2-2 5.7-2 11zm100-26v40h399900v-40zM0 435v40h400000v-40z -m0 0v40h400000v-40z`,leftharpoondown:`M7 241c-4 4-6.333 8.667-7 14 0 5.333.667 9 2 11s5.333 - 5.333 12 10c90.667 54 156 130 196 228 3.333 10.667 6.333 16.333 9 17 2 .667 5 - 1 9 1h5c10.667 0 16.667-2 18-6 2-2.667 1-9.667-3-21-32-87.333-82.667-157.667 --152-211l-3-3h399907v-40zM93 281 H400000 v-40L7 241z`,leftharpoondownplus:`M7 435c-4 4-6.3 8.7-7 14 0 5.3.7 9 2 11s5.3 5.3 12 - 10c90.7 54 156 130 196 228 3.3 10.7 6.3 16.3 9 17 2 .7 5 1 9 1h5c10.7 0 16.7 --2 18-6 2-2.7 1-9.7-3-21-32-87.3-82.7-157.7-152-211l-3-3h399907v-40H7zm93 0 -v40h399900v-40zM0 241v40h399900v-40zm0 0v40h399900v-40z`,lefthook:`M400000 281 H103s-33-11.2-61-33.5S0 197.3 0 164s14.2-61.2 42.5 --83.5C70.8 58.2 104 47 142 47 c16.7 0 25 6.7 25 20 0 12-8.7 18.7-26 20-40 3.3 --68.7 15.7-86 37-10 12-15 25.3-15 40 0 22.7 9.8 40.7 29.5 54 19.7 13.3 43.5 21 - 71.5 23h399859zM103 281v-40h399897v40z`,leftlinesegment:`M40 281 V428 H0 V94 H40 V241 H400000 v40z -M40 281 V428 H0 V94 H40 V241 H400000 v40z`,leftmapsto:`M40 281 V448H0V74H40V241H400000v40z -M40 281 V448H0V74H40V241H400000v40z`,leftToFrom:`M0 147h400000v40H0zm0 214c68 40 115.7 95.7 143 167h22c15.3 0 23 --.3 23-1 0-1.3-5.3-13.7-16-37-18-35.3-41.3-69-70-101l-7-8h399905v-40H95l7-8 -c28.7-32 52-65.7 70-101 10.7-23.3 16-35.7 16-37 0-.7-7.7-1-23-1h-22C115.7 265.3 - 68 321 0 361zm0-174v-40h399900v40zm100 154v40h399900v-40z`,longequal:`M0 50 h400000 v40H0z m0 194h40000v40H0z -M0 50 h400000 v40H0z m0 194h40000v40H0z`,midbrace:`M200428 334 -c-100.7-8.3-195.3-44-280-108-55.3-42-101.7-93-139-153l-9-14c-2.7 4-5.7 8.7-9 14 --53.3 86.7-123.7 153-211 199-66.7 36-137.3 56.3-212 62H0V214h199568c178.3-11.7 - 311.7-78.3 403-201 6-8 9.7-12 11-12 .7-.7 6.7-1 18-1s17.3.3 18 1c1.3 0 5 4 11 - 12 44.7 59.3 101.3 106.3 170 141s145.3 54.3 229 60h199572v120z`,midbraceunder:`M199572 214 -c100.7 8.3 195.3 44 280 108 55.3 42 101.7 93 139 153l9 14c2.7-4 5.7-8.7 9-14 - 53.3-86.7 123.7-153 211-199 66.7-36 137.3-56.3 212-62h199568v120H200432c-178.3 - 11.7-311.7 78.3-403 201-6 8-9.7 12-11 12-.7.7-6.7 1-18 1s-17.3-.3-18-1c-1.3 0 --5-4-11-12-44.7-59.3-101.3-106.3-170-141s-145.3-54.3-229-60H0V214z`,oiintSize1:`M512.6 71.6c272.6 0 320.3 106.8 320.3 178.2 0 70.8-47.7 177.6 --320.3 177.6S193.1 320.6 193.1 249.8c0-71.4 46.9-178.2 319.5-178.2z -m368.1 178.2c0-86.4-60.9-215.4-368.1-215.4-306.4 0-367.3 129-367.3 215.4 0 85.8 -60.9 214.8 367.3 214.8 307.2 0 368.1-129 368.1-214.8z`,oiintSize2:`M757.8 100.1c384.7 0 451.1 137.6 451.1 230 0 91.3-66.4 228.8 --451.1 228.8-386.3 0-452.7-137.5-452.7-228.8 0-92.4 66.4-230 452.7-230z -m502.4 230c0-111.2-82.4-277.2-502.4-277.2s-504 166-504 277.2 -c0 110 84 276 504 276s502.4-166 502.4-276z`,oiiintSize1:`M681.4 71.6c408.9 0 480.5 106.8 480.5 178.2 0 70.8-71.6 177.6 --480.5 177.6S202.1 320.6 202.1 249.8c0-71.4 70.5-178.2 479.3-178.2z -m525.8 178.2c0-86.4-86.8-215.4-525.7-215.4-437.9 0-524.7 129-524.7 215.4 0 -85.8 86.8 214.8 524.7 214.8 438.9 0 525.7-129 525.7-214.8z`,oiiintSize2:`M1021.2 53c603.6 0 707.8 165.8 707.8 277.2 0 110-104.2 275.8 --707.8 275.8-606 0-710.2-165.8-710.2-275.8C311 218.8 415.2 53 1021.2 53z -m770.4 277.1c0-131.2-126.4-327.6-770.5-327.6S248.4 198.9 248.4 330.1 -c0 130 128.8 326.4 772.7 326.4s770.5-196.4 770.5-326.4z`,rightarrow:`M0 241v40h399891c-47.3 35.3-84 78-110 128 --16.7 32-27.7 63.7-33 95 0 1.3-.2 2.7-.5 4-.3 1.3-.5 2.3-.5 3 0 7.3 6.7 11 20 - 11 8 0 13.2-.8 15.5-2.5 2.3-1.7 4.2-5.5 5.5-11.5 2-13.3 5.7-27 11-41 14.7-44.7 - 39-84.5 73-119.5s73.7-60.2 119-75.5c6-2 9-5.7 9-11s-3-9-9-11c-45.3-15.3-85 --40.5-119-75.5s-58.3-74.8-73-119.5c-4.7-14-8.3-27.3-11-40-1.3-6.7-3.2-10.8-5.5 --12.5-2.3-1.7-7.5-2.5-15.5-2.5-14 0-21 3.7-21 11 0 2 2 10.3 6 25 20.7 83.3 67 - 151.7 139 205zm0 0v40h399900v-40z`,rightbrace:`M400000 542l --6 6h-17c-12.7 0-19.3-.3-20-1-4-4-7.3-8.3-10-13-35.3-51.3-80.8-93.8-136.5-127.5 -s-117.2-55.8-184.5-66.5c-.7 0-2-.3-4-1-18.7-2.7-76-4.3-172-5H0V214h399571l6 1 -c124.7 8 235 61.7 331 161 31.3 33.3 59.7 72.7 85 118l7 13v35z`,rightbraceunder:`M399994 0l6 6v35l-6 11c-56 104-135.3 181.3-238 232-57.3 - 28.7-117 45-179 50H-300V214h399897c43.3-7 81-15 113-26 100.7-33 179.7-91 237 --174 2.7-5 6-9 10-13 .7-1 7.3-1 20-1h17z`,rightgroup:`M0 80h399565c371 0 266.7 149.4 414 180 5.9 1.2 18 0 18 0 2 0 - 3-1 3-3v-38c-76-158-257-219-435-219H0z`,rightgroupunder:`M0 262h399565c371 0 266.7-149.4 414-180 5.9-1.2 18 0 18 - 0 2 0 3 1 3 3v38c-76 158-257 219-435 219H0z`,rightharpoon:`M0 241v40h399993c4.7-4.7 7-9.3 7-14 0-9.3 --3.7-15.3-11-18-92.7-56.7-159-133.7-199-231-3.3-9.3-6-14.7-8-16-2-1.3-7-2-15-2 --10.7 0-16.7 2-18 6-2 2.7-1 9.7 3 21 15.3 42 36.7 81.8 64 119.5 27.3 37.7 58 - 69.2 92 94.5zm0 0v40h399900v-40z`,rightharpoonplus:`M0 241v40h399993c4.7-4.7 7-9.3 7-14 0-9.3-3.7-15.3-11 --18-92.7-56.7-159-133.7-199-231-3.3-9.3-6-14.7-8-16-2-1.3-7-2-15-2-10.7 0-16.7 - 2-18 6-2 2.7-1 9.7 3 21 15.3 42 36.7 81.8 64 119.5 27.3 37.7 58 69.2 92 94.5z -m0 0v40h399900v-40z m100 194v40h399900v-40zm0 0v40h399900v-40z`,rightharpoondown:`M399747 511c0 7.3 6.7 11 20 11 8 0 13-.8 15-2.5s4.7-6.8 - 8-15.5c40-94 99.3-166.3 178-217 13.3-8 20.3-12.3 21-13 5.3-3.3 8.5-5.8 9.5 --7.5 1-1.7 1.5-5.2 1.5-10.5s-2.3-10.3-7-15H0v40h399908c-34 25.3-64.7 57-92 95 --27.3 38-48.7 77.7-64 119-3.3 8.7-5 14-5 16zM0 241v40h399900v-40z`,rightharpoondownplus:`M399747 705c0 7.3 6.7 11 20 11 8 0 13-.8 - 15-2.5s4.7-6.8 8-15.5c40-94 99.3-166.3 178-217 13.3-8 20.3-12.3 21-13 5.3-3.3 - 8.5-5.8 9.5-7.5 1-1.7 1.5-5.2 1.5-10.5s-2.3-10.3-7-15H0v40h399908c-34 25.3 --64.7 57-92 95-27.3 38-48.7 77.7-64 119-3.3 8.7-5 14-5 16zM0 435v40h399900v-40z -m0-194v40h400000v-40zm0 0v40h400000v-40z`,righthook:`M399859 241c-764 0 0 0 0 0 40-3.3 68.7-15.7 86-37 10-12 15-25.3 - 15-40 0-22.7-9.8-40.7-29.5-54-19.7-13.3-43.5-21-71.5-23-17.3-1.3-26-8-26-20 0 --13.3 8.7-20 26-20 38 0 71 11.2 99 33.5 0 0 7 5.6 21 16.7 14 11.2 21 33.5 21 - 66.8s-14 61.2-42 83.5c-28 22.3-61 33.5-99 33.5L0 241z M0 281v-40h399859v40z`,rightlinesegment:`M399960 241 V94 h40 V428 h-40 V281 H0 v-40z -M399960 241 V94 h40 V428 h-40 V281 H0 v-40z`,rightToFrom:`M400000 167c-70.7-42-118-97.7-142-167h-23c-15.3 0-23 .3-23 - 1 0 1.3 5.3 13.7 16 37 18 35.3 41.3 69 70 101l7 8H0v40h399905l-7 8c-28.7 32 --52 65.7-70 101-10.7 23.3-16 35.7-16 37 0 .7 7.7 1 23 1h23c24-69.3 71.3-125 142 --167z M100 147v40h399900v-40zM0 341v40h399900v-40z`,twoheadleftarrow:`M0 167c68 40 - 115.7 95.7 143 167h22c15.3 0 23-.3 23-1 0-1.3-5.3-13.7-16-37-18-35.3-41.3-69 --70-101l-7-8h125l9 7c50.7 39.3 85 86 103 140h46c0-4.7-6.3-18.7-19-42-18-35.3 --40-67.3-66-96l-9-9h399716v-40H284l9-9c26-28.7 48-60.7 66-96 12.7-23.333 19 --37.333 19-42h-46c-18 54-52.3 100.7-103 140l-9 7H95l7-8c28.7-32 52-65.7 70-101 - 10.7-23.333 16-35.7 16-37 0-.7-7.7-1-23-1h-22C115.7 71.3 68 127 0 167z`,twoheadrightarrow:`M400000 167 -c-68-40-115.7-95.7-143-167h-22c-15.3 0-23 .3-23 1 0 1.3 5.3 13.7 16 37 18 35.3 - 41.3 69 70 101l7 8h-125l-9-7c-50.7-39.3-85-86-103-140h-46c0 4.7 6.3 18.7 19 42 - 18 35.3 40 67.3 66 96l9 9H0v40h399716l-9 9c-26 28.7-48 60.7-66 96-12.7 23.333 --19 37.333-19 42h46c18-54 52.3-100.7 103-140l9-7h125l-7 8c-28.7 32-52 65.7-70 - 101-10.7 23.333-16 35.7-16 37 0 .7 7.7 1 23 1h22c27.3-71.3 75-127 143-167z`,tilde1:`M200 55.538c-77 0-168 73.953-177 73.953-3 0-7 --2.175-9-5.437L2 97c-1-2-2-4-2-6 0-4 2-7 5-9l20-12C116 12 171 0 207 0c86 0 - 114 68 191 68 78 0 168-68 177-68 4 0 7 2 9 5l12 19c1 2.175 2 4.35 2 6.525 0 - 4.35-2 7.613-5 9.788l-19 13.05c-92 63.077-116.937 75.308-183 76.128 --68.267.847-113-73.952-191-73.952z`,tilde2:`M344 55.266c-142 0-300.638 81.316-311.5 86.418 --8.01 3.762-22.5 10.91-23.5 5.562L1 120c-1-2-1-3-1-4 0-5 3-9 8-10l18.4-9C160.9 - 31.9 283 0 358 0c148 0 188 122 331 122s314-97 326-97c4 0 8 2 10 7l7 21.114 -c1 2.14 1 3.21 1 4.28 0 5.347-3 9.626-7 10.696l-22.3 12.622C852.6 158.372 751 - 181.476 676 181.476c-149 0-189-126.21-332-126.21z`,tilde3:`M786 59C457 59 32 175.242 13 175.242c-6 0-10-3.457 --11-10.37L.15 138c-1-7 3-12 10-13l19.2-6.4C378.4 40.7 634.3 0 804.3 0c337 0 - 411.8 157 746.8 157 328 0 754-112 773-112 5 0 10 3 11 9l1 14.075c1 8.066-.697 - 16.595-6.697 17.492l-21.052 7.31c-367.9 98.146-609.15 122.696-778.15 122.696 - -338 0-409-156.573-744-156.573z`,tilde4:`M786 58C457 58 32 177.487 13 177.487c-6 0-10-3.345 --11-10.035L.15 143c-1-7 3-12 10-13l22-6.7C381.2 35 637.15 0 807.15 0c337 0 409 - 177 744 177 328 0 754-127 773-127 5 0 10 3 11 9l1 14.794c1 7.805-3 13.38-9 - 14.495l-20.7 5.574c-366.85 99.79-607.3 139.372-776.3 139.372-338 0-409 - -175.236-744-175.236z`,vec:`M377 20c0-5.333 1.833-10 5.5-14S391 0 397 0c4.667 0 8.667 1.667 12 5 -3.333 2.667 6.667 9 10 19 6.667 24.667 20.333 43.667 41 57 7.333 4.667 11 -10.667 11 18 0 6-1 10-3 12s-6.667 5-14 9c-28.667 14.667-53.667 35.667-75 63 --1.333 1.333-3.167 3.5-5.5 6.5s-4 4.833-5 5.5c-1 .667-2.5 1.333-4.5 2s-4.333 1 --7 1c-4.667 0-9.167-1.833-13.5-5.5S337 184 337 178c0-12.667 15.667-32.333 47-59 -H213l-171-1c-8.667-6-13-12.333-13-19 0-4.667 4.333-11.333 13-20h359 -c-16-25.333-24-45-24-59z`,widehat1:`M529 0h5l519 115c5 1 9 5 9 10 0 1-1 2-1 3l-4 22 -c-1 5-5 9-11 9h-2L532 67 19 159h-2c-5 0-9-4-11-9l-5-22c-1-6 2-12 8-13z`,widehat2:`M1181 0h2l1171 176c6 0 10 5 10 11l-2 23c-1 6-5 10 --11 10h-1L1182 67 15 220h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z`,widehat3:`M1181 0h2l1171 236c6 0 10 5 10 11l-2 23c-1 6-5 10 --11 10h-1L1182 67 15 280h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z`,widehat4:`M1181 0h2l1171 296c6 0 10 5 10 11l-2 23c-1 6-5 10 --11 10h-1L1182 67 15 340h-1c-6 0-10-4-11-10l-2-23c-1-6 4-11 10-11z`,widecheck1:`M529,159h5l519,-115c5,-1,9,-5,9,-10c0,-1,-1,-2,-1,-3l-4,-22c-1, --5,-5,-9,-11,-9h-2l-512,92l-513,-92h-2c-5,0,-9,4,-11,9l-5,22c-1,6,2,12,8,13z`,widecheck2:`M1181,220h2l1171,-176c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10, --11,-10h-1l-1168,153l-1167,-153h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z`,widecheck3:`M1181,280h2l1171,-236c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10, --11,-10h-1l-1168,213l-1167,-213h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z`,widecheck4:`M1181,340h2l1171,-296c6,0,10,-5,10,-11l-2,-23c-1,-6,-5,-10, --11,-10h-1l-1168,273l-1167,-273h-1c-6,0,-10,4,-11,10l-2,23c-1,6,4,11,10,11z`,baraboveleftarrow:`M400000 620h-399890l3 -3c68.7 -52.7 113.7 -120 135 -202 -c4 -14.7 6 -23 6 -25c0 -7.3 -7 -11 -21 -11c-8 0 -13.2 0.8 -15.5 2.5 -c-2.3 1.7 -4.2 5.8 -5.5 12.5c-1.3 4.7 -2.7 10.3 -4 17c-12 48.7 -34.8 92 -68.5 130 -s-74.2 66.3 -121.5 85c-10 4 -16 7.7 -18 11c0 8.7 6 14.3 18 17c47.3 18.7 87.8 47 -121.5 85s56.5 81.3 68.5 130c0.7 2 1.3 5 2 9s1.2 6.7 1.5 8c0.3 1.3 1 3.3 2 6 -s2.2 4.5 3.5 5.5c1.3 1 3.3 1.8 6 2.5s6 1 10 1c14 0 21 -3.7 21 -11 -c0 -2 -2 -10.3 -6 -25c-20 -79.3 -65 -146.7 -135 -202l-3 -3h399890z -M100 620v40h399900v-40z M0 241v40h399900v-40zM0 241v40h399900v-40z`,rightarrowabovebar:`M0 241v40h399891c-47.3 35.3-84 78-110 128-16.7 32 --27.7 63.7-33 95 0 1.3-.2 2.7-.5 4-.3 1.3-.5 2.3-.5 3 0 7.3 6.7 11 20 11 8 0 -13.2-.8 15.5-2.5 2.3-1.7 4.2-5.5 5.5-11.5 2-13.3 5.7-27 11-41 14.7-44.7 39 --84.5 73-119.5s73.7-60.2 119-75.5c6-2 9-5.7 9-11s-3-9-9-11c-45.3-15.3-85-40.5 --119-75.5s-58.3-74.8-73-119.5c-4.7-14-8.3-27.3-11-40-1.3-6.7-3.2-10.8-5.5 --12.5-2.3-1.7-7.5-2.5-15.5-2.5-14 0-21 3.7-21 11 0 2 2 10.3 6 25 20.7 83.3 67 -151.7 139 205zm96 379h399894v40H0zm0 0h399904v40H0z`,baraboveshortleftharpoon:`M507,435c-4,4,-6.3,8.7,-7,14c0,5.3,0.7,9,2,11 -c1.3,2,5.3,5.3,12,10c90.7,54,156,130,196,228c3.3,10.7,6.3,16.3,9,17 -c2,0.7,5,1,9,1c0,0,5,0,5,0c10.7,0,16.7,-2,18,-6c2,-2.7,1,-9.7,-3,-21 -c-32,-87.3,-82.7,-157.7,-152,-211c0,0,-3,-3,-3,-3l399351,0l0,-40 -c-398570,0,-399437,0,-399437,0z M593 435 v40 H399500 v-40z -M0 281 v-40 H399908 v40z M0 281 v-40 H399908 v40z`,rightharpoonaboveshortbar:`M0,241 l0,40c399126,0,399993,0,399993,0 -c4.7,-4.7,7,-9.3,7,-14c0,-9.3,-3.7,-15.3,-11,-18c-92.7,-56.7,-159,-133.7,-199, --231c-3.3,-9.3,-6,-14.7,-8,-16c-2,-1.3,-7,-2,-15,-2c-10.7,0,-16.7,2,-18,6 -c-2,2.7,-1,9.7,3,21c15.3,42,36.7,81.8,64,119.5c27.3,37.7,58,69.2,92,94.5z -M0 241 v40 H399908 v-40z M0 475 v-40 H399500 v40z M0 475 v-40 H399500 v40z`,shortbaraboveleftharpoon:`M7,435c-4,4,-6.3,8.7,-7,14c0,5.3,0.7,9,2,11 -c1.3,2,5.3,5.3,12,10c90.7,54,156,130,196,228c3.3,10.7,6.3,16.3,9,17c2,0.7,5,1,9, -1c0,0,5,0,5,0c10.7,0,16.7,-2,18,-6c2,-2.7,1,-9.7,-3,-21c-32,-87.3,-82.7,-157.7, --152,-211c0,0,-3,-3,-3,-3l399907,0l0,-40c-399126,0,-399993,0,-399993,0z -M93 435 v40 H400000 v-40z M500 241 v40 H400000 v-40z M500 241 v40 H400000 v-40z`,shortrightharpoonabovebar:`M53,241l0,40c398570,0,399437,0,399437,0 -c4.7,-4.7,7,-9.3,7,-14c0,-9.3,-3.7,-15.3,-11,-18c-92.7,-56.7,-159,-133.7,-199, --231c-3.3,-9.3,-6,-14.7,-8,-16c-2,-1.3,-7,-2,-15,-2c-10.7,0,-16.7,2,-18,6 -c-2,2.7,-1,9.7,3,21c15.3,42,36.7,81.8,64,119.5c27.3,37.7,58,69.2,92,94.5z -M500 241 v40 H399408 v-40z M500 435 v40 H400000 v-40z`},ar=function(e,t){switch(e){case"lbrack":return"M403 1759 V84 H666 V0 H319 V1759 v"+t+` v1759 h347 v-84 -H403z M403 1759 V0 H319 V1759 v`+t+" v1759 h84z";case"rbrack":return"M347 1759 V0 H0 V84 H263 V1759 v"+t+` v1759 H0 v84 H347z -M347 1759 V0 H263 V1759 v`+t+" v1759 h84z";case"vert":return"M145 15 v585 v"+t+` v585 c2.667,10,9.667,15,21,15 -c10,0,16.667,-5,20,-15 v-585 v`+-t+` v-585 c-2.667,-10,-9.667,-15,-21,-15 -c-10,0,-16.667,5,-20,15z M188 15 H145 v585 v`+t+" v585 h43z";case"doublevert":return"M145 15 v585 v"+t+` v585 c2.667,10,9.667,15,21,15 -c10,0,16.667,-5,20,-15 v-585 v`+-t+` v-585 c-2.667,-10,-9.667,-15,-21,-15 -c-10,0,-16.667,5,-20,15z M188 15 H145 v585 v`+t+` v585 h43z -M367 15 v585 v`+t+` v585 c2.667,10,9.667,15,21,15 -c10,0,16.667,-5,20,-15 v-585 v`+-t+` v-585 c-2.667,-10,-9.667,-15,-21,-15 -c-10,0,-16.667,5,-20,15z M410 15 H367 v585 v`+t+" v585 h43z";case"lfloor":return"M319 602 V0 H403 V602 v"+t+` v1715 h263 v84 H319z -MM319 602 V0 H403 V602 v`+t+" v1715 H319z";case"rfloor":return"M319 602 V0 H403 V602 v"+t+` v1799 H0 v-84 H319z -MM319 602 V0 H403 V602 v`+t+" v1715 H319z";case"lceil":return"M403 1759 V84 H666 V0 H319 V1759 v"+t+` v602 h84z -M403 1759 V0 H319 V1759 v`+t+" v602 h84z";case"rceil":return"M347 1759 V0 H0 V84 H263 V1759 v"+t+` v602 h84z -M347 1759 V0 h-84 V1759 v`+t+" v602 h84z";case"lparen":return`M863,9c0,-2,-2,-5,-6,-9c0,0,-17,0,-17,0c-12.7,0,-19.3,0.3,-20,1 -c-5.3,5.3,-10.3,11,-15,17c-242.7,294.7,-395.3,682,-458,1162c-21.3,163.3,-33.3,349, --36,557 l0,`+(t+84)+`c0.2,6,0,26,0,60c2,159.3,10,310.7,24,454c53.3,528,210, -949.7,470,1265c4.7,6,9.7,11.7,15,17c0.7,0.7,7,1,19,1c0,0,18,0,18,0c4,-4,6,-7,6,-9 -c0,-2.7,-3.3,-8.7,-10,-18c-135.3,-192.7,-235.5,-414.3,-300.5,-665c-65,-250.7,-102.5, --544.7,-112.5,-882c-2,-104,-3,-167,-3,-189 -l0,-`+(t+92)+`c0,-162.7,5.7,-314,17,-454c20.7,-272,63.7,-513,129,-723c65.3, --210,155.3,-396.3,270,-559c6.7,-9.3,10,-15.3,10,-18z`;case"rparen":return`M76,0c-16.7,0,-25,3,-25,9c0,2,2,6.3,6,13c21.3,28.7,42.3,60.3, -63,95c96.7,156.7,172.8,332.5,228.5,527.5c55.7,195,92.8,416.5,111.5,664.5 -c11.3,139.3,17,290.7,17,454c0,28,1.7,43,3.3,45l0,`+(t+9)+` -c-3,4,-3.3,16.7,-3.3,38c0,162,-5.7,313.7,-17,455c-18.7,248,-55.8,469.3,-111.5,664 -c-55.7,194.7,-131.8,370.3,-228.5,527c-20.7,34.7,-41.7,66.3,-63,95c-2,3.3,-4,7,-6,11 -c0,7.3,5.7,11,17,11c0,0,11,0,11,0c9.3,0,14.3,-0.3,15,-1c5.3,-5.3,10.3,-11,15,-17 -c242.7,-294.7,395.3,-681.7,458,-1161c21.3,-164.7,33.3,-350.7,36,-558 -l0,-`+(t+144)+`c-2,-159.3,-10,-310.7,-24,-454c-53.3,-528,-210,-949.7, --470,-1265c-4.7,-6,-9.7,-11.7,-15,-17c-0.7,-0.7,-6.7,-1,-18,-1z`;default:throw new Error("Unknown stretchy delimiter.")}},Yt=function(){function u(t){this.children=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,this.children=t,this.classes=[],this.height=0,this.depth=0,this.maxFontSize=0,this.style={}}var e=u.prototype;return e.hasClass=function(r){return q.contains(this.classes,r)},e.toNode=function(){for(var r=document.createDocumentFragment(),n=0;n=5?e=0:u>=3?e=1:e=2,!S0[e]){var t=S0[e]={cssEmPerMu:jt.quad[e]/18};for(var r in jt)jt.hasOwnProperty(r)&&(t[r]=jt[r][e])}return S0[e]}var lr=[[1,1,1],[2,1,1],[3,1,1],[4,2,1],[5,2,1],[6,3,1],[7,4,2],[8,6,3],[9,7,6],[10,8,7],[11,10,9]],A0=[.5,.6,.7,.8,.9,1,1.2,1.44,1.728,2.074,2.488],Y0=function(e,t){return t.size<2?e:lr[e-1][t.size-1]},m0=function(){function u(t){this.style=void 0,this.color=void 0,this.size=void 0,this.textSize=void 0,this.phantom=void 0,this.font=void 0,this.fontFamily=void 0,this.fontWeight=void 0,this.fontShape=void 0,this.sizeMultiplier=void 0,this.maxSize=void 0,this.minRuleThickness=void 0,this._fontMetrics=void 0,this.style=t.style,this.color=t.color,this.size=t.size||u.BASESIZE,this.textSize=t.textSize||this.size,this.phantom=!!t.phantom,this.font=t.font||"",this.fontFamily=t.fontFamily||"",this.fontWeight=t.fontWeight||"",this.fontShape=t.fontShape||"",this.sizeMultiplier=A0[this.size-1],this.maxSize=t.maxSize,this.minRuleThickness=t.minRuleThickness,this._fontMetrics=void 0}var e=u.prototype;return e.extend=function(r){var n={style:this.style,size:this.size,textSize:this.textSize,color:this.color,phantom:this.phantom,font:this.font,fontFamily:this.fontFamily,fontWeight:this.fontWeight,fontShape:this.fontShape,maxSize:this.maxSize,minRuleThickness:this.minRuleThickness};for(var a in r)r.hasOwnProperty(a)&&(n[a]=r[a]);return new u(n)},e.havingStyle=function(r){return this.style===r?this:this.extend({style:r,size:Y0(this.textSize,r)})},e.havingCrampedStyle=function(){return this.havingStyle(this.style.cramp())},e.havingSize=function(r){return this.size===r&&this.textSize===r?this:this.extend({style:this.style.text(),size:r,textSize:r,sizeMultiplier:A0[r-1]})},e.havingBaseStyle=function(r){r=r||this.style.text();var n=Y0(u.BASESIZE,r);return this.size===n&&this.textSize===u.BASESIZE&&this.style===r?this:this.extend({style:r,size:n})},e.havingBaseSizing=function(){var r;switch(this.style.id){case 4:case 5:r=3;break;case 6:case 7:r=1;break;default:r=6}return this.extend({style:this.style.text(),size:r})},e.withColor=function(r){return this.extend({color:r})},e.withPhantom=function(){return this.extend({phantom:!0})},e.withFont=function(r){return this.extend({font:r})},e.withTextFontFamily=function(r){return this.extend({fontFamily:r,font:""})},e.withTextFontWeight=function(r){return this.extend({fontWeight:r,font:""})},e.withTextFontShape=function(r){return this.extend({fontShape:r,font:""})},e.sizingClasses=function(r){return r.size!==this.size?["sizing","reset-size"+r.size,"size"+this.size]:[]},e.baseSizingClasses=function(){return this.size!==u.BASESIZE?["sizing","reset-size"+this.size,"size"+u.BASESIZE]:[]},e.fontMetrics=function(){return this._fontMetrics||(this._fontMetrics=ir(this.size)),this._fontMetrics},e.getColor=function(){return this.phantom?"transparent":this.color},u}();m0.BASESIZE=6;var T0=m0,ht={pt:1,mm:7227/2540,cm:7227/254,in:72.27,bp:803/800,pc:12,dd:1238/1157,cc:14856/1157,nd:685/642,nc:1370/107,sp:1/65536,px:803/800},Zt={ex:!0,em:!0,mu:!0},M0=function(e){return typeof e!="string"&&(e=e.unit),e in ht||e in Zt||e==="ex"},Ee=function(e,t){var r;if(e.unit in ht)r=ht[e.unit]/t.fontMetrics().ptPerEm/t.sizeMultiplier;else if(e.unit==="mu")r=t.fontMetrics().cssEmPerMu;else{var n;if(t.style.isTight()?n=t.havingStyle(t.style.text()):n=t,e.unit==="ex")r=n.fontMetrics().xHeight;else if(e.unit==="em")r=n.fontMetrics().quad;else throw new p("Invalid unit: '"+e.unit+"'");n!==t&&(r*=n.sizeMultiplier/t.sizeMultiplier)}return Math.min(e.number*r,t.maxSize)},X=function(e){return+e.toFixed(4)+"em"},Je=function(e){return e.filter(function(t){return t}).join(" ")},sr=function(e,t,r){if(this.classes=e||[],this.attributes={},this.height=0,this.depth=0,this.maxFontSize=0,this.style=r||{},t){t.style.isTight()&&this.classes.push("mtight");var n=t.getColor();n&&(this.style.color=n)}},or=function(e){var t=document.createElement(e);t.className=Je(this.classes);for(var r in this.style)this.style.hasOwnProperty(r)&&(t.style[r]=this.style[r]);for(var n in this.attributes)this.attributes.hasOwnProperty(n)&&t.setAttribute(n,this.attributes[n]);for(var a=0;a",t},bt=function(){function u(t,r,n,a){this.children=void 0,this.attributes=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.width=void 0,this.maxFontSize=void 0,this.style=void 0,sr.call(this,t,n,a),this.children=r||[]}var e=u.prototype;return e.setAttribute=function(r,n){this.attributes[r]=n},e.hasClass=function(r){return q.contains(this.classes,r)},e.toNode=function(){return or.call(this,"span")},e.toMarkup=function(){return Ue.call(this,"span")},u}(),j0=function(){function u(t,r,n,a){this.children=void 0,this.attributes=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,sr.call(this,r,a),this.children=n||[],this.setAttribute("href",t)}var e=u.prototype;return e.setAttribute=function(r,n){this.attributes[r]=n},e.hasClass=function(r){return q.contains(this.classes,r)},e.toNode=function(){return or.call(this,"a")},e.toMarkup=function(){return Ue.call(this,"a")},u}(),ur=function(){function u(t,r,n){this.src=void 0,this.alt=void 0,this.classes=void 0,this.height=void 0,this.depth=void 0,this.maxFontSize=void 0,this.style=void 0,this.alt=r,this.src=t,this.classes=["mord"],this.style=n}var e=u.prototype;return e.hasClass=function(r){return q.contains(this.classes,r)},e.toNode=function(){var r=document.createElement("img");r.src=this.src,r.alt=this.alt,r.className="mord";for(var n in this.style)this.style.hasOwnProperty(n)&&(r.style[n]=this.style[n]);return r},e.toMarkup=function(){var r=""+this.alt+"0&&(n=document.createElement("span"),n.style.marginRight=X(this.italic)),this.classes.length>0&&(n=n||document.createElement("span"),n.className=Je(this.classes));for(var a in this.style)this.style.hasOwnProperty(a)&&(n=n||document.createElement("span"),n.style[a]=this.style[a]);return n?(n.appendChild(r),n):r},e.toMarkup=function(){var r=!1,n="0&&(a+="margin-right:"+this.italic+"em;");for(var c in this.style)this.style.hasOwnProperty(c)&&(a+=q.hyphenate(c)+":"+this.style[c]+";");a&&(r=!0,n+=' style="'+q.escape(a)+'"');var d=q.escape(this.text);return r?(n+=">",n+=d,n+="",n):d},u}(),yt=function(){function u(t,r){this.children=void 0,this.attributes=void 0,this.children=t||[],this.attributes=r||{}}var e=u.prototype;return e.toNode=function(){var r="http://www.w3.org/2000/svg",n=document.createElementNS(r,"svg");for(var a in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,a)&&n.setAttribute(a,this.attributes[a]);for(var c=0;c":""},u}(),Kt=function(){function u(t){this.attributes=void 0,this.attributes=t||{}}var e=u.prototype;return e.toNode=function(){var r="http://www.w3.org/2000/svg",n=document.createElementNS(r,"line");for(var a in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,a)&&n.setAttribute(a,this.attributes[a]);return n},e.toMarkup=function(){var r=" but got "+String(u)+".")}var Ot={bin:1,close:1,inner:1,open:1,punct:1,rel:1},$0={"accent-token":1,mathord:1,"op-token":1,spacing:1,textord:1},d0={math:{},text:{}},Ne=d0;function l(u,e,t,r,n,a){d0[u][n]={font:e,group:t,replace:r},a&&r&&(d0[u][r]=d0[u][n])}var h="math",H="text",f="main",S="ams",Be="accent-token",te="bin",Ke="close",oe="inner",A="mathord",P="op-token",$="open",we="punct",k="rel",qe="spacing",M="textord";l(h,f,k,"≡","\\equiv",!0),l(h,f,k,"≺","\\prec",!0),l(h,f,k,"≻","\\succ",!0),l(h,f,k,"∼","\\sim",!0),l(h,f,k,"⊥","\\perp"),l(h,f,k,"⪯","\\preceq",!0),l(h,f,k,"⪰","\\succeq",!0),l(h,f,k,"≃","\\simeq",!0),l(h,f,k,"∣","\\mid",!0),l(h,f,k,"≪","\\ll",!0),l(h,f,k,"≫","\\gg",!0),l(h,f,k,"≍","\\asymp",!0),l(h,f,k,"∥","\\parallel"),l(h,f,k,"⋈","\\bowtie",!0),l(h,f,k,"⌣","\\smile",!0),l(h,f,k,"⊑","\\sqsubseteq",!0),l(h,f,k,"⊒","\\sqsupseteq",!0),l(h,f,k,"≐","\\doteq",!0),l(h,f,k,"⌢","\\frown",!0),l(h,f,k,"∋","\\ni",!0),l(h,f,k,"∝","\\propto",!0),l(h,f,k,"⊢","\\vdash",!0),l(h,f,k,"⊣","\\dashv",!0),l(h,f,k,"∋","\\owns"),l(h,f,we,".","\\ldotp"),l(h,f,we,"⋅","\\cdotp"),l(h,f,M,"#","\\#"),l(H,f,M,"#","\\#"),l(h,f,M,"&","\\&"),l(H,f,M,"&","\\&"),l(h,f,M,"ℵ","\\aleph",!0),l(h,f,M,"∀","\\forall",!0),l(h,f,M,"ℏ","\\hbar",!0),l(h,f,M,"∃","\\exists",!0),l(h,f,M,"∇","\\nabla",!0),l(h,f,M,"♭","\\flat",!0),l(h,f,M,"ℓ","\\ell",!0),l(h,f,M,"♮","\\natural",!0),l(h,f,M,"♣","\\clubsuit",!0),l(h,f,M,"℘","\\wp",!0),l(h,f,M,"♯","\\sharp",!0),l(h,f,M,"♢","\\diamondsuit",!0),l(h,f,M,"ℜ","\\Re",!0),l(h,f,M,"♡","\\heartsuit",!0),l(h,f,M,"ℑ","\\Im",!0),l(h,f,M,"♠","\\spadesuit",!0),l(h,f,M,"§","\\S",!0),l(H,f,M,"§","\\S"),l(h,f,M,"¶","\\P",!0),l(H,f,M,"¶","\\P"),l(h,f,M,"†","\\dag"),l(H,f,M,"†","\\dag"),l(H,f,M,"†","\\textdagger"),l(h,f,M,"‡","\\ddag"),l(H,f,M,"‡","\\ddag"),l(H,f,M,"‡","\\textdaggerdbl"),l(h,f,Ke,"⎱","\\rmoustache",!0),l(h,f,$,"⎰","\\lmoustache",!0),l(h,f,Ke,"⟯","\\rgroup",!0),l(h,f,$,"⟮","\\lgroup",!0),l(h,f,te,"∓","\\mp",!0),l(h,f,te,"⊖","\\ominus",!0),l(h,f,te,"⊎","\\uplus",!0),l(h,f,te,"⊓","\\sqcap",!0),l(h,f,te,"∗","\\ast"),l(h,f,te,"⊔","\\sqcup",!0),l(h,f,te,"◯","\\bigcirc",!0),l(h,f,te,"∙","\\bullet",!0),l(h,f,te,"‡","\\ddagger"),l(h,f,te,"≀","\\wr",!0),l(h,f,te,"⨿","\\amalg"),l(h,f,te,"&","\\And"),l(h,f,k,"⟵","\\longleftarrow",!0),l(h,f,k,"⇐","\\Leftarrow",!0),l(h,f,k,"⟸","\\Longleftarrow",!0),l(h,f,k,"⟶","\\longrightarrow",!0),l(h,f,k,"⇒","\\Rightarrow",!0),l(h,f,k,"⟹","\\Longrightarrow",!0),l(h,f,k,"↔","\\leftrightarrow",!0),l(h,f,k,"⟷","\\longleftrightarrow",!0),l(h,f,k,"⇔","\\Leftrightarrow",!0),l(h,f,k,"⟺","\\Longleftrightarrow",!0),l(h,f,k,"↦","\\mapsto",!0),l(h,f,k,"⟼","\\longmapsto",!0),l(h,f,k,"↗","\\nearrow",!0),l(h,f,k,"↩","\\hookleftarrow",!0),l(h,f,k,"↪","\\hookrightarrow",!0),l(h,f,k,"↘","\\searrow",!0),l(h,f,k,"↼","\\leftharpoonup",!0),l(h,f,k,"⇀","\\rightharpoonup",!0),l(h,f,k,"↙","\\swarrow",!0),l(h,f,k,"↽","\\leftharpoondown",!0),l(h,f,k,"⇁","\\rightharpoondown",!0),l(h,f,k,"↖","\\nwarrow",!0),l(h,f,k,"⇌","\\rightleftharpoons",!0),l(h,S,k,"≮","\\nless",!0),l(h,S,k,"","\\@nleqslant"),l(h,S,k,"","\\@nleqq"),l(h,S,k,"⪇","\\lneq",!0),l(h,S,k,"≨","\\lneqq",!0),l(h,S,k,"","\\@lvertneqq"),l(h,S,k,"⋦","\\lnsim",!0),l(h,S,k,"⪉","\\lnapprox",!0),l(h,S,k,"⊀","\\nprec",!0),l(h,S,k,"⋠","\\npreceq",!0),l(h,S,k,"⋨","\\precnsim",!0),l(h,S,k,"⪹","\\precnapprox",!0),l(h,S,k,"≁","\\nsim",!0),l(h,S,k,"","\\@nshortmid"),l(h,S,k,"∤","\\nmid",!0),l(h,S,k,"⊬","\\nvdash",!0),l(h,S,k,"⊭","\\nvDash",!0),l(h,S,k,"⋪","\\ntriangleleft"),l(h,S,k,"⋬","\\ntrianglelefteq",!0),l(h,S,k,"⊊","\\subsetneq",!0),l(h,S,k,"","\\@varsubsetneq"),l(h,S,k,"⫋","\\subsetneqq",!0),l(h,S,k,"","\\@varsubsetneqq"),l(h,S,k,"≯","\\ngtr",!0),l(h,S,k,"","\\@ngeqslant"),l(h,S,k,"","\\@ngeqq"),l(h,S,k,"⪈","\\gneq",!0),l(h,S,k,"≩","\\gneqq",!0),l(h,S,k,"","\\@gvertneqq"),l(h,S,k,"⋧","\\gnsim",!0),l(h,S,k,"⪊","\\gnapprox",!0),l(h,S,k,"⊁","\\nsucc",!0),l(h,S,k,"⋡","\\nsucceq",!0),l(h,S,k,"⋩","\\succnsim",!0),l(h,S,k,"⪺","\\succnapprox",!0),l(h,S,k,"≆","\\ncong",!0),l(h,S,k,"","\\@nshortparallel"),l(h,S,k,"∦","\\nparallel",!0),l(h,S,k,"⊯","\\nVDash",!0),l(h,S,k,"⋫","\\ntriangleright"),l(h,S,k,"⋭","\\ntrianglerighteq",!0),l(h,S,k,"","\\@nsupseteqq"),l(h,S,k,"⊋","\\supsetneq",!0),l(h,S,k,"","\\@varsupsetneq"),l(h,S,k,"⫌","\\supsetneqq",!0),l(h,S,k,"","\\@varsupsetneqq"),l(h,S,k,"⊮","\\nVdash",!0),l(h,S,k,"⪵","\\precneqq",!0),l(h,S,k,"⪶","\\succneqq",!0),l(h,S,k,"","\\@nsubseteqq"),l(h,S,te,"⊴","\\unlhd"),l(h,S,te,"⊵","\\unrhd"),l(h,S,k,"↚","\\nleftarrow",!0),l(h,S,k,"↛","\\nrightarrow",!0),l(h,S,k,"⇍","\\nLeftarrow",!0),l(h,S,k,"⇏","\\nRightarrow",!0),l(h,S,k,"↮","\\nleftrightarrow",!0),l(h,S,k,"⇎","\\nLeftrightarrow",!0),l(h,S,k,"△","\\vartriangle"),l(h,S,M,"ℏ","\\hslash"),l(h,S,M,"▽","\\triangledown"),l(h,S,M,"◊","\\lozenge"),l(h,S,M,"Ⓢ","\\circledS"),l(h,S,M,"®","\\circledR"),l(H,S,M,"®","\\circledR"),l(h,S,M,"∡","\\measuredangle",!0),l(h,S,M,"∄","\\nexists"),l(h,S,M,"℧","\\mho"),l(h,S,M,"Ⅎ","\\Finv",!0),l(h,S,M,"⅁","\\Game",!0),l(h,S,M,"‵","\\backprime"),l(h,S,M,"▲","\\blacktriangle"),l(h,S,M,"▼","\\blacktriangledown"),l(h,S,M,"■","\\blacksquare"),l(h,S,M,"⧫","\\blacklozenge"),l(h,S,M,"★","\\bigstar"),l(h,S,M,"∢","\\sphericalangle",!0),l(h,S,M,"∁","\\complement",!0),l(h,S,M,"ð","\\eth",!0),l(H,f,M,"ð","ð"),l(h,S,M,"╱","\\diagup"),l(h,S,M,"╲","\\diagdown"),l(h,S,M,"□","\\square"),l(h,S,M,"□","\\Box"),l(h,S,M,"◊","\\Diamond"),l(h,S,M,"¥","\\yen",!0),l(H,S,M,"¥","\\yen",!0),l(h,S,M,"✓","\\checkmark",!0),l(H,S,M,"✓","\\checkmark"),l(h,S,M,"ℶ","\\beth",!0),l(h,S,M,"ℸ","\\daleth",!0),l(h,S,M,"ℷ","\\gimel",!0),l(h,S,M,"ϝ","\\digamma",!0),l(h,S,M,"ϰ","\\varkappa"),l(h,S,$,"┌","\\@ulcorner",!0),l(h,S,Ke,"┐","\\@urcorner",!0),l(h,S,$,"└","\\@llcorner",!0),l(h,S,Ke,"┘","\\@lrcorner",!0),l(h,S,k,"≦","\\leqq",!0),l(h,S,k,"⩽","\\leqslant",!0),l(h,S,k,"⪕","\\eqslantless",!0),l(h,S,k,"≲","\\lesssim",!0),l(h,S,k,"⪅","\\lessapprox",!0),l(h,S,k,"≊","\\approxeq",!0),l(h,S,te,"⋖","\\lessdot"),l(h,S,k,"⋘","\\lll",!0),l(h,S,k,"≶","\\lessgtr",!0),l(h,S,k,"⋚","\\lesseqgtr",!0),l(h,S,k,"⪋","\\lesseqqgtr",!0),l(h,S,k,"≑","\\doteqdot"),l(h,S,k,"≓","\\risingdotseq",!0),l(h,S,k,"≒","\\fallingdotseq",!0),l(h,S,k,"∽","\\backsim",!0),l(h,S,k,"⋍","\\backsimeq",!0),l(h,S,k,"⫅","\\subseteqq",!0),l(h,S,k,"⋐","\\Subset",!0),l(h,S,k,"⊏","\\sqsubset",!0),l(h,S,k,"≼","\\preccurlyeq",!0),l(h,S,k,"⋞","\\curlyeqprec",!0),l(h,S,k,"≾","\\precsim",!0),l(h,S,k,"⪷","\\precapprox",!0),l(h,S,k,"⊲","\\vartriangleleft"),l(h,S,k,"⊴","\\trianglelefteq"),l(h,S,k,"⊨","\\vDash",!0),l(h,S,k,"⊪","\\Vvdash",!0),l(h,S,k,"⌣","\\smallsmile"),l(h,S,k,"⌢","\\smallfrown"),l(h,S,k,"≏","\\bumpeq",!0),l(h,S,k,"≎","\\Bumpeq",!0),l(h,S,k,"≧","\\geqq",!0),l(h,S,k,"⩾","\\geqslant",!0),l(h,S,k,"⪖","\\eqslantgtr",!0),l(h,S,k,"≳","\\gtrsim",!0),l(h,S,k,"⪆","\\gtrapprox",!0),l(h,S,te,"⋗","\\gtrdot"),l(h,S,k,"⋙","\\ggg",!0),l(h,S,k,"≷","\\gtrless",!0),l(h,S,k,"⋛","\\gtreqless",!0),l(h,S,k,"⪌","\\gtreqqless",!0),l(h,S,k,"≖","\\eqcirc",!0),l(h,S,k,"≗","\\circeq",!0),l(h,S,k,"≜","\\triangleq",!0),l(h,S,k,"∼","\\thicksim"),l(h,S,k,"≈","\\thickapprox"),l(h,S,k,"⫆","\\supseteqq",!0),l(h,S,k,"⋑","\\Supset",!0),l(h,S,k,"⊐","\\sqsupset",!0),l(h,S,k,"≽","\\succcurlyeq",!0),l(h,S,k,"⋟","\\curlyeqsucc",!0),l(h,S,k,"≿","\\succsim",!0),l(h,S,k,"⪸","\\succapprox",!0),l(h,S,k,"⊳","\\vartriangleright"),l(h,S,k,"⊵","\\trianglerighteq"),l(h,S,k,"⊩","\\Vdash",!0),l(h,S,k,"∣","\\shortmid"),l(h,S,k,"∥","\\shortparallel"),l(h,S,k,"≬","\\between",!0),l(h,S,k,"⋔","\\pitchfork",!0),l(h,S,k,"∝","\\varpropto"),l(h,S,k,"◀","\\blacktriangleleft"),l(h,S,k,"∴","\\therefore",!0),l(h,S,k,"∍","\\backepsilon"),l(h,S,k,"▶","\\blacktriangleright"),l(h,S,k,"∵","\\because",!0),l(h,S,k,"⋘","\\llless"),l(h,S,k,"⋙","\\gggtr"),l(h,S,te,"⊲","\\lhd"),l(h,S,te,"⊳","\\rhd"),l(h,S,k,"≂","\\eqsim",!0),l(h,f,k,"⋈","\\Join"),l(h,S,k,"≑","\\Doteq",!0),l(h,S,te,"∔","\\dotplus",!0),l(h,S,te,"∖","\\smallsetminus"),l(h,S,te,"⋒","\\Cap",!0),l(h,S,te,"⋓","\\Cup",!0),l(h,S,te,"⩞","\\doublebarwedge",!0),l(h,S,te,"⊟","\\boxminus",!0),l(h,S,te,"⊞","\\boxplus",!0),l(h,S,te,"⋇","\\divideontimes",!0),l(h,S,te,"⋉","\\ltimes",!0),l(h,S,te,"⋊","\\rtimes",!0),l(h,S,te,"⋋","\\leftthreetimes",!0),l(h,S,te,"⋌","\\rightthreetimes",!0),l(h,S,te,"⋏","\\curlywedge",!0),l(h,S,te,"⋎","\\curlyvee",!0),l(h,S,te,"⊝","\\circleddash",!0),l(h,S,te,"⊛","\\circledast",!0),l(h,S,te,"⋅","\\centerdot"),l(h,S,te,"⊺","\\intercal",!0),l(h,S,te,"⋒","\\doublecap"),l(h,S,te,"⋓","\\doublecup"),l(h,S,te,"⊠","\\boxtimes",!0),l(h,S,k,"⇢","\\dashrightarrow",!0),l(h,S,k,"⇠","\\dashleftarrow",!0),l(h,S,k,"⇇","\\leftleftarrows",!0),l(h,S,k,"⇆","\\leftrightarrows",!0),l(h,S,k,"⇚","\\Lleftarrow",!0),l(h,S,k,"↞","\\twoheadleftarrow",!0),l(h,S,k,"↢","\\leftarrowtail",!0),l(h,S,k,"↫","\\looparrowleft",!0),l(h,S,k,"⇋","\\leftrightharpoons",!0),l(h,S,k,"↶","\\curvearrowleft",!0),l(h,S,k,"↺","\\circlearrowleft",!0),l(h,S,k,"↰","\\Lsh",!0),l(h,S,k,"⇈","\\upuparrows",!0),l(h,S,k,"↿","\\upharpoonleft",!0),l(h,S,k,"⇃","\\downharpoonleft",!0),l(h,f,k,"⊶","\\origof",!0),l(h,f,k,"⊷","\\imageof",!0),l(h,S,k,"⊸","\\multimap",!0),l(h,S,k,"↭","\\leftrightsquigarrow",!0),l(h,S,k,"⇉","\\rightrightarrows",!0),l(h,S,k,"⇄","\\rightleftarrows",!0),l(h,S,k,"↠","\\twoheadrightarrow",!0),l(h,S,k,"↣","\\rightarrowtail",!0),l(h,S,k,"↬","\\looparrowright",!0),l(h,S,k,"↷","\\curvearrowright",!0),l(h,S,k,"↻","\\circlearrowright",!0),l(h,S,k,"↱","\\Rsh",!0),l(h,S,k,"⇊","\\downdownarrows",!0),l(h,S,k,"↾","\\upharpoonright",!0),l(h,S,k,"⇂","\\downharpoonright",!0),l(h,S,k,"⇝","\\rightsquigarrow",!0),l(h,S,k,"⇝","\\leadsto"),l(h,S,k,"⇛","\\Rrightarrow",!0),l(h,S,k,"↾","\\restriction"),l(h,f,M,"‘","`"),l(h,f,M,"$","\\$"),l(H,f,M,"$","\\$"),l(H,f,M,"$","\\textdollar"),l(h,f,M,"%","\\%"),l(H,f,M,"%","\\%"),l(h,f,M,"_","\\_"),l(H,f,M,"_","\\_"),l(H,f,M,"_","\\textunderscore"),l(h,f,M,"∠","\\angle",!0),l(h,f,M,"∞","\\infty",!0),l(h,f,M,"′","\\prime"),l(h,f,M,"△","\\triangle"),l(h,f,M,"Γ","\\Gamma",!0),l(h,f,M,"Δ","\\Delta",!0),l(h,f,M,"Θ","\\Theta",!0),l(h,f,M,"Λ","\\Lambda",!0),l(h,f,M,"Ξ","\\Xi",!0),l(h,f,M,"Π","\\Pi",!0),l(h,f,M,"Σ","\\Sigma",!0),l(h,f,M,"Υ","\\Upsilon",!0),l(h,f,M,"Φ","\\Phi",!0),l(h,f,M,"Ψ","\\Psi",!0),l(h,f,M,"Ω","\\Omega",!0),l(h,f,M,"A","Α"),l(h,f,M,"B","Β"),l(h,f,M,"E","Ε"),l(h,f,M,"Z","Ζ"),l(h,f,M,"H","Η"),l(h,f,M,"I","Ι"),l(h,f,M,"K","Κ"),l(h,f,M,"M","Μ"),l(h,f,M,"N","Ν"),l(h,f,M,"O","Ο"),l(h,f,M,"P","Ρ"),l(h,f,M,"T","Τ"),l(h,f,M,"X","Χ"),l(h,f,M,"¬","\\neg",!0),l(h,f,M,"¬","\\lnot"),l(h,f,M,"⊤","\\top"),l(h,f,M,"⊥","\\bot"),l(h,f,M,"∅","\\emptyset"),l(h,S,M,"∅","\\varnothing"),l(h,f,A,"α","\\alpha",!0),l(h,f,A,"β","\\beta",!0),l(h,f,A,"γ","\\gamma",!0),l(h,f,A,"δ","\\delta",!0),l(h,f,A,"ϵ","\\epsilon",!0),l(h,f,A,"ζ","\\zeta",!0),l(h,f,A,"η","\\eta",!0),l(h,f,A,"θ","\\theta",!0),l(h,f,A,"ι","\\iota",!0),l(h,f,A,"κ","\\kappa",!0),l(h,f,A,"λ","\\lambda",!0),l(h,f,A,"μ","\\mu",!0),l(h,f,A,"ν","\\nu",!0),l(h,f,A,"ξ","\\xi",!0),l(h,f,A,"ο","\\omicron",!0),l(h,f,A,"π","\\pi",!0),l(h,f,A,"ρ","\\rho",!0),l(h,f,A,"σ","\\sigma",!0),l(h,f,A,"τ","\\tau",!0),l(h,f,A,"υ","\\upsilon",!0),l(h,f,A,"ϕ","\\phi",!0),l(h,f,A,"χ","\\chi",!0),l(h,f,A,"ψ","\\psi",!0),l(h,f,A,"ω","\\omega",!0),l(h,f,A,"ε","\\varepsilon",!0),l(h,f,A,"ϑ","\\vartheta",!0),l(h,f,A,"ϖ","\\varpi",!0),l(h,f,A,"ϱ","\\varrho",!0),l(h,f,A,"ς","\\varsigma",!0),l(h,f,A,"φ","\\varphi",!0),l(h,f,te,"∗","*",!0),l(h,f,te,"+","+"),l(h,f,te,"−","-",!0),l(h,f,te,"⋅","\\cdot",!0),l(h,f,te,"∘","\\circ",!0),l(h,f,te,"÷","\\div",!0),l(h,f,te,"±","\\pm",!0),l(h,f,te,"×","\\times",!0),l(h,f,te,"∩","\\cap",!0),l(h,f,te,"∪","\\cup",!0),l(h,f,te,"∖","\\setminus",!0),l(h,f,te,"∧","\\land"),l(h,f,te,"∨","\\lor"),l(h,f,te,"∧","\\wedge",!0),l(h,f,te,"∨","\\vee",!0),l(h,f,M,"√","\\surd"),l(h,f,$,"⟨","\\langle",!0),l(h,f,$,"∣","\\lvert"),l(h,f,$,"∥","\\lVert"),l(h,f,Ke,"?","?"),l(h,f,Ke,"!","!"),l(h,f,Ke,"⟩","\\rangle",!0),l(h,f,Ke,"∣","\\rvert"),l(h,f,Ke,"∥","\\rVert"),l(h,f,k,"=","="),l(h,f,k,":",":"),l(h,f,k,"≈","\\approx",!0),l(h,f,k,"≅","\\cong",!0),l(h,f,k,"≥","\\ge"),l(h,f,k,"≥","\\geq",!0),l(h,f,k,"←","\\gets"),l(h,f,k,">","\\gt",!0),l(h,f,k,"∈","\\in",!0),l(h,f,k,"","\\@not"),l(h,f,k,"⊂","\\subset",!0),l(h,f,k,"⊃","\\supset",!0),l(h,f,k,"⊆","\\subseteq",!0),l(h,f,k,"⊇","\\supseteq",!0),l(h,S,k,"⊈","\\nsubseteq",!0),l(h,S,k,"⊉","\\nsupseteq",!0),l(h,f,k,"⊨","\\models"),l(h,f,k,"←","\\leftarrow",!0),l(h,f,k,"≤","\\le"),l(h,f,k,"≤","\\leq",!0),l(h,f,k,"<","\\lt",!0),l(h,f,k,"→","\\rightarrow",!0),l(h,f,k,"→","\\to"),l(h,S,k,"≱","\\ngeq",!0),l(h,S,k,"≰","\\nleq",!0),l(h,f,qe," ","\\ "),l(h,f,qe," ","\\space"),l(h,f,qe," ","\\nobreakspace"),l(H,f,qe," ","\\ "),l(H,f,qe," "," "),l(H,f,qe," ","\\space"),l(H,f,qe," ","\\nobreakspace"),l(h,f,qe,null,"\\nobreak"),l(h,f,qe,null,"\\allowbreak"),l(h,f,we,",",","),l(h,f,we,";",";"),l(h,S,te,"⊼","\\barwedge",!0),l(h,S,te,"⊻","\\veebar",!0),l(h,f,te,"⊙","\\odot",!0),l(h,f,te,"⊕","\\oplus",!0),l(h,f,te,"⊗","\\otimes",!0),l(h,f,M,"∂","\\partial",!0),l(h,f,te,"⊘","\\oslash",!0),l(h,S,te,"⊚","\\circledcirc",!0),l(h,S,te,"⊡","\\boxdot",!0),l(h,f,te,"△","\\bigtriangleup"),l(h,f,te,"▽","\\bigtriangledown"),l(h,f,te,"†","\\dagger"),l(h,f,te,"⋄","\\diamond"),l(h,f,te,"⋆","\\star"),l(h,f,te,"◃","\\triangleleft"),l(h,f,te,"▹","\\triangleright"),l(h,f,$,"{","\\{"),l(H,f,M,"{","\\{"),l(H,f,M,"{","\\textbraceleft"),l(h,f,Ke,"}","\\}"),l(H,f,M,"}","\\}"),l(H,f,M,"}","\\textbraceright"),l(h,f,$,"{","\\lbrace"),l(h,f,Ke,"}","\\rbrace"),l(h,f,$,"[","\\lbrack",!0),l(H,f,M,"[","\\lbrack",!0),l(h,f,Ke,"]","\\rbrack",!0),l(H,f,M,"]","\\rbrack",!0),l(h,f,$,"(","\\lparen",!0),l(h,f,Ke,")","\\rparen",!0),l(H,f,M,"<","\\textless",!0),l(H,f,M,">","\\textgreater",!0),l(h,f,$,"⌊","\\lfloor",!0),l(h,f,Ke,"⌋","\\rfloor",!0),l(h,f,$,"⌈","\\lceil",!0),l(h,f,Ke,"⌉","\\rceil",!0),l(h,f,M,"\\","\\backslash"),l(h,f,M,"∣","|"),l(h,f,M,"∣","\\vert"),l(H,f,M,"|","\\textbar",!0),l(h,f,M,"∥","\\|"),l(h,f,M,"∥","\\Vert"),l(H,f,M,"∥","\\textbardbl"),l(H,f,M,"~","\\textasciitilde"),l(H,f,M,"\\","\\textbackslash"),l(H,f,M,"^","\\textasciicircum"),l(h,f,k,"↑","\\uparrow",!0),l(h,f,k,"⇑","\\Uparrow",!0),l(h,f,k,"↓","\\downarrow",!0),l(h,f,k,"⇓","\\Downarrow",!0),l(h,f,k,"↕","\\updownarrow",!0),l(h,f,k,"⇕","\\Updownarrow",!0),l(h,f,P,"∐","\\coprod"),l(h,f,P,"⋁","\\bigvee"),l(h,f,P,"⋀","\\bigwedge"),l(h,f,P,"⨄","\\biguplus"),l(h,f,P,"⋂","\\bigcap"),l(h,f,P,"⋃","\\bigcup"),l(h,f,P,"∫","\\int"),l(h,f,P,"∫","\\intop"),l(h,f,P,"∬","\\iint"),l(h,f,P,"∭","\\iiint"),l(h,f,P,"∏","\\prod"),l(h,f,P,"∑","\\sum"),l(h,f,P,"⨂","\\bigotimes"),l(h,f,P,"⨁","\\bigoplus"),l(h,f,P,"⨀","\\bigodot"),l(h,f,P,"∮","\\oint"),l(h,f,P,"∯","\\oiint"),l(h,f,P,"∰","\\oiiint"),l(h,f,P,"⨆","\\bigsqcup"),l(h,f,P,"∫","\\smallint"),l(H,f,oe,"…","\\textellipsis"),l(h,f,oe,"…","\\mathellipsis"),l(H,f,oe,"…","\\ldots",!0),l(h,f,oe,"…","\\ldots",!0),l(h,f,oe,"⋯","\\@cdots",!0),l(h,f,oe,"⋱","\\ddots",!0),l(h,f,M,"⋮","\\varvdots"),l(h,f,Be,"ˊ","\\acute"),l(h,f,Be,"ˋ","\\grave"),l(h,f,Be,"¨","\\ddot"),l(h,f,Be,"~","\\tilde"),l(h,f,Be,"ˉ","\\bar"),l(h,f,Be,"˘","\\breve"),l(h,f,Be,"ˇ","\\check"),l(h,f,Be,"^","\\hat"),l(h,f,Be,"⃗","\\vec"),l(h,f,Be,"˙","\\dot"),l(h,f,Be,"˚","\\mathring"),l(h,f,A,"","\\@imath"),l(h,f,A,"","\\@jmath"),l(h,f,M,"ı","ı"),l(h,f,M,"ȷ","ȷ"),l(H,f,M,"ı","\\i",!0),l(H,f,M,"ȷ","\\j",!0),l(H,f,M,"ß","\\ss",!0),l(H,f,M,"æ","\\ae",!0),l(H,f,M,"œ","\\oe",!0),l(H,f,M,"ø","\\o",!0),l(H,f,M,"Æ","\\AE",!0),l(H,f,M,"Œ","\\OE",!0),l(H,f,M,"Ø","\\O",!0),l(H,f,Be,"ˊ","\\'"),l(H,f,Be,"ˋ","\\`"),l(H,f,Be,"ˆ","\\^"),l(H,f,Be,"˜","\\~"),l(H,f,Be,"ˉ","\\="),l(H,f,Be,"˘","\\u"),l(H,f,Be,"˙","\\."),l(H,f,Be,"¸","\\c"),l(H,f,Be,"˚","\\r"),l(H,f,Be,"ˇ","\\v"),l(H,f,Be,"¨",'\\"'),l(H,f,Be,"˝","\\H"),l(H,f,Be,"◯","\\textcircled");var mt={"--":!0,"---":!0,"``":!0,"''":!0};l(H,f,M,"–","--",!0),l(H,f,M,"–","\\textendash"),l(H,f,M,"—","---",!0),l(H,f,M,"—","\\textemdash"),l(H,f,M,"‘","`",!0),l(H,f,M,"‘","\\textquoteleft"),l(H,f,M,"’","'",!0),l(H,f,M,"’","\\textquoteright"),l(H,f,M,"“","``",!0),l(H,f,M,"“","\\textquotedblleft"),l(H,f,M,"”","''",!0),l(H,f,M,"”","\\textquotedblright"),l(h,f,M,"°","\\degree",!0),l(H,f,M,"°","\\degree"),l(H,f,M,"°","\\textdegree",!0),l(h,f,M,"£","\\pounds"),l(h,f,M,"£","\\mathsterling",!0),l(H,f,M,"£","\\pounds"),l(H,f,M,"£","\\textsterling",!0),l(h,S,M,"✠","\\maltese"),l(H,S,M,"✠","\\maltese");for(var _0='0123456789/@."',E0=0;E0<_0.length;E0++){var Ur=_0.charAt(E0);l(h,f,M,Ur,Ur)}for(var Vn='0123456789!@*()-=+";:?/.,',Gr=0;Grt&&(t=c.height),c.depth>r&&(r=c.depth),c.maxFontSize>n&&(n=c.maxFontSize)}e.height=t,e.depth=r,e.maxFontSize=n},st=function(e,t,r,n){var a=new bt(e,t,r,n);return jr(a),a},jn=function(e,t,r,n){return new bt(e,t,r,n)},yl=function(e,t,r){var n=st([e],[],t);return n.height=Math.max(r||t.fontMetrics().defaultRuleThickness,t.minRuleThickness),n.style.borderBottomWidth=X(n.height),n.maxFontSize=1,n},xl=function(e,t,r,n){var a=new j0(e,t,r,n);return jr(a),a},Xn=function(e){var t=new Yt(e);return jr(t),t},wl=function(e,t){return e instanceof Yt?st([],[e],t):e},kl=function(e){if(e.positionType==="individualShift"){for(var t=e.children,r=[t[0]],n=-t[0].shift-t[0].elem.depth,a=n,c=1;c0&&(a.push(yr(c,e)),c=[]),a.push(r[d]));c.length>0&&a.push(yr(c,e));var y;t?(y=yr(je(t,e,!0)),y.classes=["tag"],a.push(y)):n&&a.push(n);var T=Pt(["katex-html"],a);if(T.setAttribute("aria-hidden","true"),y){var B=y.children[0];B.style.height=X(T.height+T.depth),T.depth&&(B.style.verticalAlign=X(-T.depth))}return T}function ea(u){return new Yt(u)}var xt=function(){function u(t,r,n){this.type=void 0,this.attributes=void 0,this.children=void 0,this.classes=void 0,this.type=t,this.attributes={},this.children=r||[],this.classes=n||[]}var e=u.prototype;return e.setAttribute=function(r,n){this.attributes[r]=n},e.getAttribute=function(r){return this.attributes[r]},e.toNode=function(){var r=document.createElementNS("http://www.w3.org/1998/Math/MathML",this.type);for(var n in this.attributes)Object.prototype.hasOwnProperty.call(this.attributes,n)&&r.setAttribute(n,this.attributes[n]);this.classes.length>0&&(r.className=Je(this.classes));for(var a=0;a0&&(r+=' class ="'+q.escape(Je(this.classes))+'"'),r+=">";for(var a=0;a",r},e.toText=function(){return this.children.map(function(r){return r.toText()}).join("")},u}(),K0=function(){function u(t){this.text=void 0,this.text=t}var e=u.prototype;return e.toNode=function(){return document.createTextNode(this.text)},e.toMarkup=function(){return q.escape(this.toText())},e.toText=function(){return this.text},u}(),Nl=function(){function u(t){this.width=void 0,this.character=void 0,this.width=t,t>=.05555&&t<=.05556?this.character=" ":t>=.1666&&t<=.1667?this.character=" ":t>=.2222&&t<=.2223?this.character=" ":t>=.2777&&t<=.2778?this.character="  ":t>=-.05556&&t<=-.05555?this.character=" ⁣":t>=-.1667&&t<=-.1666?this.character=" ⁣":t>=-.2223&&t<=-.2222?this.character=" ⁣":t>=-.2778&&t<=-.2777?this.character=" ⁣":this.character=null}var e=u.prototype;return e.toNode=function(){if(this.character)return document.createTextNode(this.character);var r=document.createElementNS("http://www.w3.org/1998/Math/MathML","mspace");return r.setAttribute("width",X(this.width)),r},e.toMarkup=function(){return this.character?""+this.character+"":''},e.toText=function(){return this.character?this.character:" "},u}(),W={MathNode:xt,TextNode:K0,SpaceNode:Nl,newDocumentFragment:ea},wt=function(e,t,r){return Ne[t][e]&&Ne[t][e].replace&&e.charCodeAt(0)!==55349&&!(mt.hasOwnProperty(e)&&r&&(r.fontFamily&&r.fontFamily.slice(4,6)==="tt"||r.font&&r.font.slice(4,6)==="tt"))&&(e=Ne[t][e].replace),new W.TextNode(e)},Zr=function(e){return e.length===1?e[0]:new W.MathNode("mrow",e)},Kr=function(e,t){if(t.fontFamily==="texttt")return"monospace";if(t.fontFamily==="textsf")return t.fontShape==="textit"&&t.fontWeight==="textbf"?"sans-serif-bold-italic":t.fontShape==="textit"?"sans-serif-italic":t.fontWeight==="textbf"?"bold-sans-serif":"sans-serif";if(t.fontShape==="textit"&&t.fontWeight==="textbf")return"bold-italic";if(t.fontShape==="textit")return"italic";if(t.fontWeight==="textbf")return"bold";var r=t.font;if(!r||r==="mathnormal")return null;var n=e.mode;if(r==="mathit")return"italic";if(r==="boldsymbol")return e.type==="textord"?"bold":"bold-italic";if(r==="mathbf")return"bold";if(r==="mathbb")return"double-struck";if(r==="mathfrak")return"fraktur";if(r==="mathscr"||r==="mathcal")return"script";if(r==="mathsf")return"sans-serif";if(r==="mathtt")return"monospace";var a=e.text;if(q.contains(["\\imath","\\jmath"],a))return null;Ne[n][a]&&Ne[n][a].replace&&(a=Ne[n][a].replace);var c=E.fontMap[r].fontName;return Dt(a,c,n)?E.fontMap[r].variant:null},ot=function(e,t,r){if(e.length===1){var n=Ce(e[0],t);return r&&n instanceof xt&&n.type==="mo"&&(n.setAttribute("lspace","0em"),n.setAttribute("rspace","0em")),[n]}for(var a=[],c,d=0;d0&&(O.text=O.text.slice(0,1)+"̸"+O.text.slice(1),a.pop())}}}a.push(g),c=g}return a},Jt=function(e,t,r){return Zr(ot(e,t,r))},Ce=function(e,t){if(!e)return new W.MathNode("mrow");if(vr[e.type]){var r=vr[e.type](e,t);return r}else throw new p("Got group of unknown type: '"+e.type+"'")};function ta(u,e,t,r,n){var a=ot(u,t),c;a.length===1&&a[0]instanceof xt&&q.contains(["mrow","mtable"],a[0].type)?c=a[0]:c=new W.MathNode("mrow",a);var d=new W.MathNode("annotation",[new W.TextNode(e)]);d.setAttribute("encoding","application/x-tex");var g=new W.MathNode("semantics",[c,d]),y=new W.MathNode("math",[g]);y.setAttribute("xmlns","http://www.w3.org/1998/Math/MathML"),r&&y.setAttribute("display","block");var T=n?"katex":"katex-mathml";return E.makeSpan([T],[y])}var ra=function(e){return new T0({style:e.displayMode?J.DISPLAY:J.TEXT,maxSize:e.maxSize,minRuleThickness:e.minRuleThickness})},na=function(e,t){if(t.displayMode){var r=["katex-display"];t.leqno&&r.push("leqno"),t.fleqn&&r.push("fleqn"),e=E.makeSpan(r,[e])}return e},Rl=function(e,t,r){var n=ra(r),a;if(r.output==="mathml")return ta(e,t,n,r.displayMode,!0);if(r.output==="html"){var c=$r(e,n);a=E.makeSpan(["katex"],[c])}else{var d=ta(e,t,n,r.displayMode,!1),g=$r(e,n);a=E.makeSpan(["katex"],[d,g])}return na(a,r)},Fl=function(e,t,r){var n=ra(r),a=$r(e,n),c=E.makeSpan(["katex"],[a]);return na(c,r)},Il={widehat:"^",widecheck:"ˇ",widetilde:"~",utilde:"~",overleftarrow:"←",underleftarrow:"←",xleftarrow:"←",overrightarrow:"→",underrightarrow:"→",xrightarrow:"→",underbrace:"⏟",overbrace:"⏞",overgroup:"⏠",undergroup:"⏡",overleftrightarrow:"↔",underleftrightarrow:"↔",xleftrightarrow:"↔",Overrightarrow:"⇒",xRightarrow:"⇒",overleftharpoon:"↼",xleftharpoonup:"↼",overrightharpoon:"⇀",xrightharpoonup:"⇀",xLeftarrow:"⇐",xLeftrightarrow:"⇔",xhookleftarrow:"↩",xhookrightarrow:"↪",xmapsto:"↦",xrightharpoondown:"⇁",xleftharpoondown:"↽",xrightleftharpoons:"⇌",xleftrightharpoons:"⇋",xtwoheadleftarrow:"↞",xtwoheadrightarrow:"↠",xlongequal:"=",xtofrom:"⇄",xrightleftarrows:"⇄",xrightequilibrium:"⇌",xleftequilibrium:"⇋","\\cdrightarrow":"→","\\cdleftarrow":"←","\\cdlongequal":"="},Ll=function(e){var t=new W.MathNode("mo",[new W.TextNode(Il[e.replace(/^\\/,"")])]);return t.setAttribute("stretchy","true"),t},Ol={overrightarrow:[["rightarrow"],.888,522,"xMaxYMin"],overleftarrow:[["leftarrow"],.888,522,"xMinYMin"],underrightarrow:[["rightarrow"],.888,522,"xMaxYMin"],underleftarrow:[["leftarrow"],.888,522,"xMinYMin"],xrightarrow:[["rightarrow"],1.469,522,"xMaxYMin"],"\\cdrightarrow":[["rightarrow"],3,522,"xMaxYMin"],xleftarrow:[["leftarrow"],1.469,522,"xMinYMin"],"\\cdleftarrow":[["leftarrow"],3,522,"xMinYMin"],Overrightarrow:[["doublerightarrow"],.888,560,"xMaxYMin"],xRightarrow:[["doublerightarrow"],1.526,560,"xMaxYMin"],xLeftarrow:[["doubleleftarrow"],1.526,560,"xMinYMin"],overleftharpoon:[["leftharpoon"],.888,522,"xMinYMin"],xleftharpoonup:[["leftharpoon"],.888,522,"xMinYMin"],xleftharpoondown:[["leftharpoondown"],.888,522,"xMinYMin"],overrightharpoon:[["rightharpoon"],.888,522,"xMaxYMin"],xrightharpoonup:[["rightharpoon"],.888,522,"xMaxYMin"],xrightharpoondown:[["rightharpoondown"],.888,522,"xMaxYMin"],xlongequal:[["longequal"],.888,334,"xMinYMin"],"\\cdlongequal":[["longequal"],3,334,"xMinYMin"],xtwoheadleftarrow:[["twoheadleftarrow"],.888,334,"xMinYMin"],xtwoheadrightarrow:[["twoheadrightarrow"],.888,334,"xMaxYMin"],overleftrightarrow:[["leftarrow","rightarrow"],.888,522],overbrace:[["leftbrace","midbrace","rightbrace"],1.6,548],underbrace:[["leftbraceunder","midbraceunder","rightbraceunder"],1.6,548],underleftrightarrow:[["leftarrow","rightarrow"],.888,522],xleftrightarrow:[["leftarrow","rightarrow"],1.75,522],xLeftrightarrow:[["doubleleftarrow","doublerightarrow"],1.75,560],xrightleftharpoons:[["leftharpoondownplus","rightharpoonplus"],1.75,716],xleftrightharpoons:[["leftharpoonplus","rightharpoondownplus"],1.75,716],xhookleftarrow:[["leftarrow","righthook"],1.08,522],xhookrightarrow:[["lefthook","rightarrow"],1.08,522],overlinesegment:[["leftlinesegment","rightlinesegment"],.888,522],underlinesegment:[["leftlinesegment","rightlinesegment"],.888,522],overgroup:[["leftgroup","rightgroup"],.888,342],undergroup:[["leftgroupunder","rightgroupunder"],.888,342],xmapsto:[["leftmapsto","rightarrow"],1.5,522],xtofrom:[["leftToFrom","rightToFrom"],1.75,528],xrightleftarrows:[["baraboveleftarrow","rightarrowabovebar"],1.75,901],xrightequilibrium:[["baraboveshortleftharpoon","rightharpoonaboveshortbar"],1.75,716],xleftequilibrium:[["shortbaraboveleftharpoon","shortrightharpoonabovebar"],1.75,716]},ql=function(e){return e.type==="ordgroup"?e.body.length:1},Pl=function(e,t){function r(){var g=4e5,y=e.label.slice(1);if(q.contains(["widehat","widecheck","widetilde","utilde"],y)){var T=e,B=ql(T.base),F,R,O;if(B>5)y==="widehat"||y==="widecheck"?(F=420,g=2364,O=.42,R=y+"4"):(F=312,g=2340,O=.34,R="tilde4");else{var Y=[1,1,2,2,3,3][B];y==="widehat"||y==="widecheck"?(g=[0,1062,2364,2364,2364][Y],F=[0,239,300,360,420][Y],O=[0,.24,.3,.3,.36,.42][Y],R=y+Y):(g=[0,600,1033,2339,2340][Y],F=[0,260,286,306,312][Y],O=[0,.26,.286,.3,.306,.34][Y],R="tilde"+Y)}var Q=new Nt(R),ae=new yt([Q],{width:"100%",height:X(O),viewBox:"0 0 "+g+" "+F,preserveAspectRatio:"none"});return{span:E.makeSvgSpan([],[ae],t),minWidth:0,height:O}}else{var ue=[],ce=Ol[y],Ae=ce[0],be=ce[1],Me=ce[2],Se=Me/1e3,_e=Ae.length,Ie,Qe;if(_e===1){var dt=ce[3];Ie=["hide-tail"],Qe=[dt]}else if(_e===2)Ie=["halfarrow-left","halfarrow-right"],Qe=["xMinYMin","xMaxYMin"];else if(_e===3)Ie=["brace-left","brace-center","brace-right"],Qe=["xMinYMin","xMidYMin","xMaxYMin"];else throw new Error(`Correct katexImagesData or update code here to support - `+_e+" children.");for(var Oe=0;Oe<_e;Oe++){var v0=new Nt(Ae[Oe]),kt=new yt([v0],{width:"400em",height:X(Se),viewBox:"0 0 "+g+" "+Me,preserveAspectRatio:Qe[Oe]+" slice"}),rt=E.makeSvgSpan([Ie[Oe]],[kt],t);if(_e===1)return{span:rt,minWidth:be,height:Se};rt.style.height=X(Se),ue.push(rt)}return{span:E.makeSpan(["stretchy"],ue,t),minWidth:be,height:Se}}}var n=r(),a=n.span,c=n.minWidth,d=n.height;return a.height=d,a.style.height=X(d),c>0&&(a.style.minWidth=X(c)),a},Hl=function(e,t,r,n,a){var c,d=e.height+e.depth+r+n;if(/fbox|color|angl/.test(t)){if(c=E.makeSpan(["stretchy",t],[],a),t==="fbox"){var g=a.color&&a.getColor();g&&(c.style.borderColor=g)}}else{var y=[];/^[bx]cancel$/.test(t)&&y.push(new Kt({x1:"0",y1:"0",x2:"100%",y2:"100%","stroke-width":"0.046em"})),/^x?cancel$/.test(t)&&y.push(new Kt({x1:"0",y1:"100%",x2:"100%",y2:"0","stroke-width":"0.046em"}));var T=new yt(y,{width:"100%",height:X(d)});c=E.makeSvgSpan([],[T],a)}return c.height=d,c.style.height=X(d),c},Ht={encloseSpan:Hl,mathMLnode:Ll,svgSpan:Pl};function ve(u,e){if(!u||u.type!==e)throw new Error("Expected node of type "+e+", but got "+(u?"node of type "+u.type:String(u)));return u}function Qr(u){var e=xr(u);if(!e)throw new Error("Expected node of symbol group type, but got "+(u?"node of type "+u.type:String(u)));return e}function xr(u){return u&&(u.type==="atom"||$0.hasOwnProperty(u.type))?u:null}var Jr=function(e,t){var r,n,a;e&&e.type==="supsub"?(n=ve(e.base,"accent"),r=n.base,e.base=r,a=Hr(ke(e,t)),e.base=n):(n=ve(e,"accent"),r=n.base);var c=ke(r,t.havingCrampedStyle()),d=n.isShifty&&q.isCharacterBox(r),g=0;if(d){var y=q.getBaseElem(r),T=ke(y,t.havingCrampedStyle());g=z0(T).skew}var B=n.label==="\\c",F=B?c.height+c.depth:Math.min(c.height,t.fontMetrics().xHeight),R;if(n.isStretchy)R=Ht.svgSpan(n,t),R=E.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:c},{type:"elem",elem:R,wrapperClasses:["svg-align"],wrapperStyle:g>0?{width:"calc(100% - "+X(2*g)+")",marginLeft:X(2*g)}:void 0}]},t);else{var O,Y;n.label==="\\vec"?(O=E.staticSvg("vec",t),Y=E.svgData.vec[1]):(O=E.makeOrd({mode:n.mode,text:n.label},t,"textord"),O=z0(O),O.italic=0,Y=O.width,B&&(F+=O.depth)),R=E.makeSpan(["accent-body"],[O]);var Q=n.label==="\\textcircled";Q&&(R.classes.push("accent-full"),F=c.height);var ae=g;Q||(ae-=Y/2),R.style.left=X(ae),n.label==="\\textcircled"&&(R.style.top=".2em"),R=E.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:c},{type:"kern",size:-F},{type:"elem",elem:R}]},t)}var ue=E.makeSpan(["mord","accent"],[R],t);return a?(a.children[0]=ue,a.height=Math.max(ue.height,a.height),a.classes[0]="mord",a):ue},aa=function(e,t){var r=e.isStretchy?Ht.mathMLnode(e.label):new W.MathNode("mo",[wt(e.label,e.mode)]),n=new W.MathNode("mover",[Ce(e.base,t),r]);return n.setAttribute("accent","true"),n},Ul=new RegExp(["\\acute","\\grave","\\ddot","\\tilde","\\bar","\\breve","\\check","\\hat","\\vec","\\dot","\\mathring"].map(function(u){return"\\"+u}).join("|"));ee({type:"accent",names:["\\acute","\\grave","\\ddot","\\tilde","\\bar","\\breve","\\check","\\hat","\\vec","\\dot","\\mathring","\\widecheck","\\widehat","\\widetilde","\\overrightarrow","\\overleftarrow","\\Overrightarrow","\\overleftrightarrow","\\overgroup","\\overlinesegment","\\overleftharpoon","\\overrightharpoon"],props:{numArgs:1},handler:function(e,t){var r=br(t[0]),n=!Ul.test(e.funcName),a=!n||e.funcName==="\\widehat"||e.funcName==="\\widetilde"||e.funcName==="\\widecheck";return{type:"accent",mode:e.parser.mode,label:e.funcName,isStretchy:n,isShifty:a,base:r}},htmlBuilder:Jr,mathmlBuilder:aa}),ee({type:"accent",names:["\\'","\\`","\\^","\\~","\\=","\\u","\\.",'\\"',"\\c","\\r","\\H","\\v","\\textcircled"],props:{numArgs:1,allowedInText:!0,allowedInMath:!0,argTypes:["primitive"]},handler:function(e,t){var r=t[0],n=e.parser.mode;return n==="math"&&(e.parser.settings.reportNonstrict("mathVsTextAccents","LaTeX's accent "+e.funcName+" works only in text mode"),n="text"),{type:"accent",mode:n,label:e.funcName,isStretchy:!1,isShifty:!0,base:r}},htmlBuilder:Jr,mathmlBuilder:aa}),ee({type:"accentUnder",names:["\\underleftarrow","\\underrightarrow","\\underleftrightarrow","\\undergroup","\\underlinesegment","\\utilde"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"accentUnder",mode:r.mode,label:n,base:a}},htmlBuilder:function(e,t){var r=ke(e.base,t),n=Ht.svgSpan(e,t),a=e.label==="\\utilde"?.12:0,c=E.makeVList({positionType:"top",positionData:r.height,children:[{type:"elem",elem:n,wrapperClasses:["svg-align"]},{type:"kern",size:a},{type:"elem",elem:r}]},t);return E.makeSpan(["mord","accentunder"],[c],t)},mathmlBuilder:function(e,t){var r=Ht.mathMLnode(e.label),n=new W.MathNode("munder",[Ce(e.base,t),r]);return n.setAttribute("accentunder","true"),n}});var wr=function(e){var t=new W.MathNode("mpadded",e?[e]:[]);return t.setAttribute("width","+0.6em"),t.setAttribute("lspace","0.3em"),t};ee({type:"xArrow",names:["\\xleftarrow","\\xrightarrow","\\xLeftarrow","\\xRightarrow","\\xleftrightarrow","\\xLeftrightarrow","\\xhookleftarrow","\\xhookrightarrow","\\xmapsto","\\xrightharpoondown","\\xrightharpoonup","\\xleftharpoondown","\\xleftharpoonup","\\xrightleftharpoons","\\xleftrightharpoons","\\xlongequal","\\xtwoheadrightarrow","\\xtwoheadleftarrow","\\xtofrom","\\xrightleftarrows","\\xrightequilibrium","\\xleftequilibrium","\\\\cdrightarrow","\\\\cdleftarrow","\\\\cdlongequal"],props:{numArgs:1,numOptionalArgs:1},handler:function(e,t,r){var n=e.parser,a=e.funcName;return{type:"xArrow",mode:n.mode,label:a,body:t[0],below:r[0]}},htmlBuilder:function(e,t){var r=t.style,n=t.havingStyle(r.sup()),a=E.wrapFragment(ke(e.body,n,t),t),c=e.label.slice(0,2)==="\\x"?"x":"cd";a.classes.push(c+"-arrow-pad");var d;e.below&&(n=t.havingStyle(r.sub()),d=E.wrapFragment(ke(e.below,n,t),t),d.classes.push(c+"-arrow-pad"));var g=Ht.svgSpan(e,t),y=-t.fontMetrics().axisHeight+.5*g.height,T=-t.fontMetrics().axisHeight-.5*g.height-.111;(a.depth>.25||e.label==="\\xleftequilibrium")&&(T-=a.depth);var B;if(d){var F=-t.fontMetrics().axisHeight+d.height+.5*g.height+.111;B=E.makeVList({positionType:"individualShift",children:[{type:"elem",elem:a,shift:T},{type:"elem",elem:g,shift:y},{type:"elem",elem:d,shift:F}]},t)}else B=E.makeVList({positionType:"individualShift",children:[{type:"elem",elem:a,shift:T},{type:"elem",elem:g,shift:y}]},t);return B.children[0].children[0].children[1].classes.push("svg-align"),E.makeSpan(["mrel","x-arrow"],[B],t)},mathmlBuilder:function(e,t){var r=Ht.mathMLnode(e.label);r.setAttribute("minsize",e.label.charAt(0)==="x"?"1.75em":"3.0em");var n;if(e.body){var a=wr(Ce(e.body,t));if(e.below){var c=wr(Ce(e.below,t));n=new W.MathNode("munderover",[r,c,a])}else n=new W.MathNode("mover",[r,a])}else if(e.below){var d=wr(Ce(e.below,t));n=new W.MathNode("munder",[r,d])}else n=wr(),n=new W.MathNode("mover",[r,n]);return n}});var Gl=E.makeSpan;function ia(u,e){var t=je(u.body,e,!0);return Gl([u.mclass],t,e)}function la(u,e){var t,r=ot(u.body,e);return u.mclass==="minner"?t=new W.MathNode("mpadded",r):u.mclass==="mord"?u.isCharacterBox?(t=r[0],t.type="mi"):t=new W.MathNode("mi",r):(u.isCharacterBox?(t=r[0],t.type="mo"):t=new W.MathNode("mo",r),u.mclass==="mbin"?(t.attributes.lspace="0.22em",t.attributes.rspace="0.22em"):u.mclass==="mpunct"?(t.attributes.lspace="0em",t.attributes.rspace="0.17em"):u.mclass==="mopen"||u.mclass==="mclose"?(t.attributes.lspace="0em",t.attributes.rspace="0em"):u.mclass==="minner"&&(t.attributes.lspace="0.0556em",t.attributes.width="+0.1111em")),t}ee({type:"mclass",names:["\\mathord","\\mathbin","\\mathrel","\\mathopen","\\mathclose","\\mathpunct","\\mathinner"],props:{numArgs:1,primitive:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"mclass",mode:r.mode,mclass:"m"+n.slice(5),body:Ge(a),isCharacterBox:q.isCharacterBox(a)}},htmlBuilder:ia,mathmlBuilder:la});var kr=function(e){var t=e.type==="ordgroup"&&e.body.length?e.body[0]:e;return t.type==="atom"&&(t.family==="bin"||t.family==="rel")?"m"+t.family:"mord"};ee({type:"mclass",names:["\\@binrel"],props:{numArgs:2},handler:function(e,t){var r=e.parser;return{type:"mclass",mode:r.mode,mclass:kr(t[0]),body:Ge(t[1]),isCharacterBox:q.isCharacterBox(t[1])}}}),ee({type:"mclass",names:["\\stackrel","\\overset","\\underset"],props:{numArgs:2},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[1],c=t[0],d;n!=="\\stackrel"?d=kr(a):d="mrel";var g={type:"op",mode:a.mode,limits:!0,alwaysHandleSupSub:!0,parentIsSupSub:!1,symbol:!1,suppressBaseShift:n!=="\\stackrel",body:Ge(a)},y={type:"supsub",mode:c.mode,base:g,sup:n==="\\underset"?null:c,sub:n==="\\underset"?c:null};return{type:"mclass",mode:r.mode,mclass:d,body:[y],isCharacterBox:q.isCharacterBox(y)}},htmlBuilder:ia,mathmlBuilder:la}),ee({type:"pmb",names:["\\pmb"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){var r=e.parser;return{type:"pmb",mode:r.mode,mclass:kr(t[0]),body:Ge(t[0])}},htmlBuilder:function(e,t){var r=je(e.body,t,!0),n=E.makeSpan([e.mclass],r,t);return n.style.textShadow="0.02em 0.01em 0.04px",n},mathmlBuilder:function(e,t){var r=ot(e.body,t),n=new W.MathNode("mstyle",r);return n.setAttribute("style","text-shadow: 0.02em 0.01em 0.04px"),n}});var Vl={">":"\\\\cdrightarrow","<":"\\\\cdleftarrow","=":"\\\\cdlongequal",A:"\\uparrow",V:"\\downarrow","|":"\\Vert",".":"no arrow"},sa=function(){return{type:"styling",body:[],mode:"math",style:"display"}},oa=function(e){return e.type==="textord"&&e.text==="@"},Wl=function(e,t){return(e.type==="mathord"||e.type==="atom")&&e.text===t};function Yl(u,e,t){var r=Vl[u];switch(r){case"\\\\cdrightarrow":case"\\\\cdleftarrow":return t.callFunction(r,[e[0]],[e[1]]);case"\\uparrow":case"\\downarrow":{var n=t.callFunction("\\\\cdleft",[e[0]],[]),a={type:"atom",text:r,mode:"math",family:"rel"},c=t.callFunction("\\Big",[a],[]),d=t.callFunction("\\\\cdright",[e[1]],[]),g={type:"ordgroup",mode:"math",body:[n,c,d]};return t.callFunction("\\\\cdparent",[g],[])}case"\\\\cdlongequal":return t.callFunction("\\\\cdlongequal",[],[]);case"\\Vert":{var y={type:"textord",text:"\\Vert",mode:"math"};return t.callFunction("\\Big",[y],[])}default:return{type:"textord",text:" ",mode:"math"}}}function jl(u){var e=[];for(u.gullet.beginGroup(),u.gullet.macros.set("\\cr","\\\\\\relax"),u.gullet.beginGroup();;){e.push(u.parseExpression(!1,"\\\\")),u.gullet.endGroup(),u.gullet.beginGroup();var t=u.fetch().text;if(t==="&"||t==="\\\\")u.consume();else if(t==="\\end"){e[e.length-1].length===0&&e.pop();break}else throw new p("Expected \\\\ or \\cr or \\end",u.nextToken)}for(var r=[],n=[r],a=0;a-1))if("<>AV".indexOf(y)>-1)for(var B=0;B<2;B++){for(var F=!0,R=g+1;RAV=|." after @',c[g]);var O=Yl(y,T,u),Y={type:"styling",body:[O],mode:"math",style:"display"};r.push(Y),d=sa()}a%2===0?r.push(d):r.shift(),r=[],n.push(r)}u.gullet.endGroup(),u.gullet.endGroup();var Q=new Array(n[0].length).fill({type:"align",align:"c",pregap:.25,postgap:.25});return{type:"array",mode:"math",body:n,arraystretch:1,addJot:!0,rowGaps:[null],cols:Q,colSeparationType:"CD",hLinesBeforeRow:new Array(n.length+1).fill([])}}ee({type:"cdlabel",names:["\\\\cdleft","\\\\cdright"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName;return{type:"cdlabel",mode:r.mode,side:n.slice(4),label:t[0]}},htmlBuilder:function(e,t){var r=t.havingStyle(t.style.sup()),n=E.wrapFragment(ke(e.label,r,t),t);return n.classes.push("cd-label-"+e.side),n.style.bottom=X(.8-n.depth),n.height=0,n.depth=0,n},mathmlBuilder:function(e,t){var r=new W.MathNode("mrow",[Ce(e.label,t)]);return r=new W.MathNode("mpadded",[r]),r.setAttribute("width","0"),e.side==="left"&&r.setAttribute("lspace","-1width"),r.setAttribute("voffset","0.7em"),r=new W.MathNode("mstyle",[r]),r.setAttribute("displaystyle","false"),r.setAttribute("scriptlevel","1"),r}}),ee({type:"cdlabelparent",names:["\\\\cdparent"],props:{numArgs:1},handler:function(e,t){var r=e.parser;return{type:"cdlabelparent",mode:r.mode,fragment:t[0]}},htmlBuilder:function(e,t){var r=E.wrapFragment(ke(e.fragment,t),t);return r.classes.push("cd-vert-arrow"),r},mathmlBuilder:function(e,t){return new W.MathNode("mrow",[Ce(e.fragment,t)])}}),ee({type:"textord",names:["\\@char"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){for(var r=e.parser,n=ve(t[0],"ordgroup"),a=n.body,c="",d=0;d=1114111)throw new p("\\@char with invalid code point "+c);return y<=65535?T=String.fromCharCode(y):(y-=65536,T=String.fromCharCode((y>>10)+55296,(y&1023)+56320)),{type:"textord",mode:r.mode,text:T}}});var ua=function(e,t){var r=je(e.body,t.withColor(e.color),!1);return E.makeFragment(r)},ca=function(e,t){var r=ot(e.body,t.withColor(e.color)),n=new W.MathNode("mstyle",r);return n.setAttribute("mathcolor",e.color),n};ee({type:"color",names:["\\textcolor"],props:{numArgs:2,allowedInText:!0,argTypes:["color","original"]},handler:function(e,t){var r=e.parser,n=ve(t[0],"color-token").color,a=t[1];return{type:"color",mode:r.mode,color:n,body:Ge(a)}},htmlBuilder:ua,mathmlBuilder:ca}),ee({type:"color",names:["\\color"],props:{numArgs:1,allowedInText:!0,argTypes:["color"]},handler:function(e,t){var r=e.parser,n=e.breakOnTokenText,a=ve(t[0],"color-token").color;r.gullet.macros.set("\\current@color",a);var c=r.parseExpression(!0,n);return{type:"color",mode:r.mode,color:a,body:c}},htmlBuilder:ua,mathmlBuilder:ca}),ee({type:"cr",names:["\\\\"],props:{numArgs:0,numOptionalArgs:0,allowedInText:!0},handler:function(e,t,r){var n=e.parser,a=n.gullet.future().text==="["?n.parseSizeGroup(!0):null,c=!n.settings.displayMode||!n.settings.useStrictBehavior("newLineInDisplayMode","In LaTeX, \\\\ or \\newline does nothing in display mode");return{type:"cr",mode:n.mode,newLine:c,size:a&&ve(a,"size").value}},htmlBuilder:function(e,t){var r=E.makeSpan(["mspace"],[],t);return e.newLine&&(r.classes.push("newline"),e.size&&(r.style.marginTop=X(Ee(e.size,t)))),r},mathmlBuilder:function(e,t){var r=new W.MathNode("mspace");return e.newLine&&(r.setAttribute("linebreak","newline"),e.size&&r.setAttribute("height",X(Ee(e.size,t)))),r}});var en={"\\global":"\\global","\\long":"\\\\globallong","\\\\globallong":"\\\\globallong","\\def":"\\gdef","\\gdef":"\\gdef","\\edef":"\\xdef","\\xdef":"\\xdef","\\let":"\\\\globallet","\\futurelet":"\\\\globalfuture"},ha=function(e){var t=e.text;if(/^(?:[\\{}$&#^_]|EOF)$/.test(t))throw new p("Expected a control sequence",e);return t},Xl=function(e){var t=e.gullet.popToken();return t.text==="="&&(t=e.gullet.popToken(),t.text===" "&&(t=e.gullet.popToken())),t},ma=function(e,t,r,n){var a=e.gullet.macros.get(r.text);a==null&&(r.noexpand=!0,a={tokens:[r],numArgs:0,unexpandable:!e.gullet.isExpandable(r.text)}),e.gullet.macros.set(t,a,n)};ee({type:"internal",names:["\\global","\\long","\\\\globallong"],props:{numArgs:0,allowedInText:!0},handler:function(e){var t=e.parser,r=e.funcName;t.consumeSpaces();var n=t.fetch();if(en[n.text])return(r==="\\global"||r==="\\\\globallong")&&(n.text=en[n.text]),ve(t.parseFunction(),"internal");throw new p("Invalid token after macro prefix",n)}}),ee({type:"internal",names:["\\def","\\gdef","\\edef","\\xdef"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,n=t.gullet.popToken(),a=n.text;if(/^(?:[\\{}$&#^_]|EOF)$/.test(a))throw new p("Expected a control sequence",n);for(var c=0,d,g=[[]];t.gullet.future().text!=="{";)if(n=t.gullet.popToken(),n.text==="#"){if(t.gullet.future().text==="{"){d=t.gullet.future(),g[c].push("{");break}if(n=t.gullet.popToken(),!/^[1-9]$/.test(n.text))throw new p('Invalid argument number "'+n.text+'"');if(parseInt(n.text)!==c+1)throw new p('Argument number "'+n.text+'" out of order');c++,g.push([])}else{if(n.text==="EOF")throw new p("Expected a macro definition");g[c].push(n.text)}var y=t.gullet.consumeArg(),T=y.tokens;return d&&T.unshift(d),(r==="\\edef"||r==="\\xdef")&&(T=t.gullet.expandTokens(T),T.reverse()),t.gullet.macros.set(a,{tokens:T,numArgs:c,delimiters:g},r===en[r]),{type:"internal",mode:t.mode}}}),ee({type:"internal",names:["\\let","\\\\globallet"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,n=ha(t.gullet.popToken());t.gullet.consumeSpaces();var a=Xl(t);return ma(t,n,a,r==="\\\\globallet"),{type:"internal",mode:t.mode}}}),ee({type:"internal",names:["\\futurelet","\\\\globalfuture"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e){var t=e.parser,r=e.funcName,n=ha(t.gullet.popToken()),a=t.gullet.popToken(),c=t.gullet.popToken();return ma(t,n,c,r==="\\\\globalfuture"),t.gullet.pushToken(c),t.gullet.pushToken(a),{type:"internal",mode:t.mode}}});var Q0=function(e,t,r){var n=Ne.math[e]&&Ne.math[e].replace,a=Dt(n||e,t,r);if(!a)throw new Error("Unsupported symbol "+e+" and font size "+t+".");return a},tn=function(e,t,r,n){var a=r.havingBaseStyle(t),c=E.makeSpan(n.concat(a.sizingClasses(r)),[e],r),d=a.sizeMultiplier/r.sizeMultiplier;return c.height*=d,c.depth*=d,c.maxFontSize=a.sizeMultiplier,c},da=function(e,t,r){var n=t.havingBaseStyle(r),a=(1-t.sizeMultiplier/n.sizeMultiplier)*t.fontMetrics().axisHeight;e.classes.push("delimcenter"),e.style.top=X(a),e.height-=a,e.depth+=a},$l=function(e,t,r,n,a,c){var d=E.makeSymbol(e,"Main-Regular",a,n),g=tn(d,t,n,c);return r&&da(g,n,t),g},Zl=function(e,t,r,n){return E.makeSymbol(e,"Size"+t+"-Regular",r,n)},fa=function(e,t,r,n,a,c){var d=Zl(e,t,a,n),g=tn(E.makeSpan(["delimsizing","size"+t],[d],n),J.TEXT,n,c);return r&&da(g,n,J.TEXT),g},rn=function(e,t,r){var n;t==="Size1-Regular"?n="delim-size1":n="delim-size4";var a=E.makeSpan(["delimsizinginner",n],[E.makeSpan([],[E.makeSymbol(e,t,r)])]);return{type:"elem",elem:a}},nn=function(e,t,r){var n=vt["Size4-Regular"][e.charCodeAt(0)]?vt["Size4-Regular"][e.charCodeAt(0)][4]:vt["Size1-Regular"][e.charCodeAt(0)][4],a=new Nt("inner",k0(e,Math.round(1e3*t))),c=new yt([a],{width:X(n),height:X(t),style:"width:"+X(n),viewBox:"0 0 "+1e3*n+" "+Math.round(1e3*t),preserveAspectRatio:"xMinYMin"}),d=E.makeSvgSpan([],[c],r);return d.height=t,d.style.height=X(t),d.style.width=X(n),{type:"elem",elem:d}},an=.008,Sr={type:"kern",size:-1*an},Kl=["|","\\lvert","\\rvert","\\vert"],Ql=["\\|","\\lVert","\\rVert","\\Vert"],pa=function(e,t,r,n,a,c){var d,g,y,T,B="",F=0;d=y=T=e,g=null;var R="Size1-Regular";e==="\\uparrow"?y=T="⏐":e==="\\Uparrow"?y=T="‖":e==="\\downarrow"?d=y="⏐":e==="\\Downarrow"?d=y="‖":e==="\\updownarrow"?(d="\\uparrow",y="⏐",T="\\downarrow"):e==="\\Updownarrow"?(d="\\Uparrow",y="‖",T="\\Downarrow"):q.contains(Kl,e)?(y="∣",B="vert",F=333):q.contains(Ql,e)?(y="∥",B="doublevert",F=556):e==="["||e==="\\lbrack"?(d="⎡",y="⎢",T="⎣",R="Size4-Regular",B="lbrack",F=667):e==="]"||e==="\\rbrack"?(d="⎤",y="⎥",T="⎦",R="Size4-Regular",B="rbrack",F=667):e==="\\lfloor"||e==="⌊"?(y=d="⎢",T="⎣",R="Size4-Regular",B="lfloor",F=667):e==="\\lceil"||e==="⌈"?(d="⎡",y=T="⎢",R="Size4-Regular",B="lceil",F=667):e==="\\rfloor"||e==="⌋"?(y=d="⎥",T="⎦",R="Size4-Regular",B="rfloor",F=667):e==="\\rceil"||e==="⌉"?(d="⎤",y=T="⎥",R="Size4-Regular",B="rceil",F=667):e==="("||e==="\\lparen"?(d="⎛",y="⎜",T="⎝",R="Size4-Regular",B="lparen",F=875):e===")"||e==="\\rparen"?(d="⎞",y="⎟",T="⎠",R="Size4-Regular",B="rparen",F=875):e==="\\{"||e==="\\lbrace"?(d="⎧",g="⎨",T="⎩",y="⎪",R="Size4-Regular"):e==="\\}"||e==="\\rbrace"?(d="⎫",g="⎬",T="⎭",y="⎪",R="Size4-Regular"):e==="\\lgroup"||e==="⟮"?(d="⎧",T="⎩",y="⎪",R="Size4-Regular"):e==="\\rgroup"||e==="⟯"?(d="⎫",T="⎭",y="⎪",R="Size4-Regular"):e==="\\lmoustache"||e==="⎰"?(d="⎧",T="⎭",y="⎪",R="Size4-Regular"):(e==="\\rmoustache"||e==="⎱")&&(d="⎫",T="⎩",y="⎪",R="Size4-Regular");var O=Q0(d,R,a),Y=O.height+O.depth,Q=Q0(y,R,a),ae=Q.height+Q.depth,ue=Q0(T,R,a),ce=ue.height+ue.depth,Ae=0,be=1;if(g!==null){var Me=Q0(g,R,a);Ae=Me.height+Me.depth,be=2}var Se=Y+ce+Ae,_e=Math.max(0,Math.ceil((t-Se)/(be*ae))),Ie=Se+_e*be*ae,Qe=n.fontMetrics().axisHeight;r&&(Qe*=n.sizeMultiplier);var dt=Ie/2-Qe,Oe=[];if(B.length>0){var v0=Ie-Y-ce,kt=Math.round(Ie*1e3),rt=ar(B,Math.round(v0*1e3)),n0=new Nt(B,rt),C0=(F/1e3).toFixed(3)+"em",D0=(kt/1e3).toFixed(3)+"em",An=new yt([n0],{width:C0,height:D0,viewBox:"0 0 "+F+" "+kt}),a0=E.makeSvgSpan([],[An],n);a0.height=kt/1e3,a0.style.width=C0,a0.style.height=D0,Oe.push({type:"elem",elem:a0})}else{if(Oe.push(rn(T,R,a)),Oe.push(Sr),g===null){var i0=Ie-Y-ce+2*an;Oe.push(nn(y,i0,n))}else{var St=(Ie-Y-ce-Ae)/2+2*an;Oe.push(nn(y,St,n)),Oe.push(Sr),Oe.push(rn(g,R,a)),Oe.push(Sr),Oe.push(nn(y,St,n))}Oe.push(Sr),Oe.push(rn(d,R,a))}var tr=n.havingBaseStyle(J.TEXT),Tn=E.makeVList({positionType:"bottom",positionData:dt,children:Oe},tr);return tn(E.makeSpan(["delimsizing","mult"],[Tn],tr),J.TEXT,n,c)},ln=80,sn=.08,on=function(e,t,r,n,a){var c=Wt(e,n,r),d=new Nt(e,c),g=new yt([d],{width:"400em",height:X(t),viewBox:"0 0 400000 "+r,preserveAspectRatio:"xMinYMin slice"});return E.makeSvgSpan(["hide-tail"],[g],a)},Jl=function(e,t){var r=t.havingBaseSizing(),n=ya("\\surd",e*r.sizeMultiplier,ba,r),a=r.sizeMultiplier,c=Math.max(0,t.minRuleThickness-t.fontMetrics().sqrtRuleThickness),d,g=0,y=0,T=0,B;return n.type==="small"?(T=1e3+1e3*c+ln,e<1?a=1:e<1.4&&(a=.7),g=(1+c+sn)/a,y=(1+c)/a,d=on("sqrtMain",g,T,c,t),d.style.minWidth="0.853em",B=.833/a):n.type==="large"?(T=(1e3+ln)*J0[n.size],y=(J0[n.size]+c)/a,g=(J0[n.size]+c+sn)/a,d=on("sqrtSize"+n.size,g,T,c,t),d.style.minWidth="1.02em",B=1/a):(g=e+c+sn,y=e+c,T=Math.floor(1e3*e+c)+ln,d=on("sqrtTall",g,T,c,t),d.style.minWidth="0.742em",B=1.056),d.height=y,d.style.height=X(g),{span:d,advanceWidth:B,ruleWidth:(t.fontMetrics().sqrtRuleThickness+c)*a}},ga=["(","\\lparen",")","\\rparen","[","\\lbrack","]","\\rbrack","\\{","\\lbrace","\\}","\\rbrace","\\lfloor","\\rfloor","⌊","⌋","\\lceil","\\rceil","⌈","⌉","\\surd"],es=["\\uparrow","\\downarrow","\\updownarrow","\\Uparrow","\\Downarrow","\\Updownarrow","|","\\|","\\vert","\\Vert","\\lvert","\\rvert","\\lVert","\\rVert","\\lgroup","\\rgroup","⟮","⟯","\\lmoustache","\\rmoustache","⎰","⎱"],va=["<",">","\\langle","\\rangle","/","\\backslash","\\lt","\\gt"],J0=[0,1.2,1.8,2.4,3],ts=function(e,t,r,n,a){if(e==="<"||e==="\\lt"||e==="⟨"?e="\\langle":(e===">"||e==="\\gt"||e==="⟩")&&(e="\\rangle"),q.contains(ga,e)||q.contains(va,e))return fa(e,t,!1,r,n,a);if(q.contains(es,e))return pa(e,J0[t],!1,r,n,a);throw new p("Illegal delimiter: '"+e+"'")},rs=[{type:"small",style:J.SCRIPTSCRIPT},{type:"small",style:J.SCRIPT},{type:"small",style:J.TEXT},{type:"large",size:1},{type:"large",size:2},{type:"large",size:3},{type:"large",size:4}],ns=[{type:"small",style:J.SCRIPTSCRIPT},{type:"small",style:J.SCRIPT},{type:"small",style:J.TEXT},{type:"stack"}],ba=[{type:"small",style:J.SCRIPTSCRIPT},{type:"small",style:J.SCRIPT},{type:"small",style:J.TEXT},{type:"large",size:1},{type:"large",size:2},{type:"large",size:3},{type:"large",size:4},{type:"stack"}],as=function(e){if(e.type==="small")return"Main-Regular";if(e.type==="large")return"Size"+e.size+"-Regular";if(e.type==="stack")return"Size4-Regular";throw new Error("Add support for delim type '"+e.type+"' here.")},ya=function(e,t,r,n){for(var a=Math.min(2,3-n.style.size),c=a;ct)return r[c]}return r[r.length-1]},xa=function(e,t,r,n,a,c){e==="<"||e==="\\lt"||e==="⟨"?e="\\langle":(e===">"||e==="\\gt"||e==="⟩")&&(e="\\rangle");var d;q.contains(va,e)?d=rs:q.contains(ga,e)?d=ba:d=ns;var g=ya(e,t,d,n);return g.type==="small"?$l(e,g.style,r,n,a,c):g.type==="large"?fa(e,g.size,r,n,a,c):pa(e,t,r,n,a,c)},is=function(e,t,r,n,a,c){var d=n.fontMetrics().axisHeight*n.sizeMultiplier,g=901,y=5/n.fontMetrics().ptPerEm,T=Math.max(t-d,r+d),B=Math.max(T/500*g,2*T-y);return xa(e,B,!0,n,a,c)},Ut={sqrtImage:Jl,sizedDelim:ts,sizeToMaxHeight:J0,customSizedDelim:xa,leftRightDelim:is},wa={"\\bigl":{mclass:"mopen",size:1},"\\Bigl":{mclass:"mopen",size:2},"\\biggl":{mclass:"mopen",size:3},"\\Biggl":{mclass:"mopen",size:4},"\\bigr":{mclass:"mclose",size:1},"\\Bigr":{mclass:"mclose",size:2},"\\biggr":{mclass:"mclose",size:3},"\\Biggr":{mclass:"mclose",size:4},"\\bigm":{mclass:"mrel",size:1},"\\Bigm":{mclass:"mrel",size:2},"\\biggm":{mclass:"mrel",size:3},"\\Biggm":{mclass:"mrel",size:4},"\\big":{mclass:"mord",size:1},"\\Big":{mclass:"mord",size:2},"\\bigg":{mclass:"mord",size:3},"\\Bigg":{mclass:"mord",size:4}},ls=["(","\\lparen",")","\\rparen","[","\\lbrack","]","\\rbrack","\\{","\\lbrace","\\}","\\rbrace","\\lfloor","\\rfloor","⌊","⌋","\\lceil","\\rceil","⌈","⌉","<",">","\\langle","⟨","\\rangle","⟩","\\lt","\\gt","\\lvert","\\rvert","\\lVert","\\rVert","\\lgroup","\\rgroup","⟮","⟯","\\lmoustache","\\rmoustache","⎰","⎱","/","\\backslash","|","\\vert","\\|","\\Vert","\\uparrow","\\Uparrow","\\downarrow","\\Downarrow","\\updownarrow","\\Updownarrow","."];function Ar(u,e){var t=xr(u);if(t&&q.contains(ls,t.text))return t;throw t?new p("Invalid delimiter '"+t.text+"' after '"+e.funcName+"'",u):new p("Invalid delimiter type '"+u.type+"'",u)}ee({type:"delimsizing",names:["\\bigl","\\Bigl","\\biggl","\\Biggl","\\bigr","\\Bigr","\\biggr","\\Biggr","\\bigm","\\Bigm","\\biggm","\\Biggm","\\big","\\Big","\\bigg","\\Bigg"],props:{numArgs:1,argTypes:["primitive"]},handler:function(e,t){var r=Ar(t[0],e);return{type:"delimsizing",mode:e.parser.mode,size:wa[e.funcName].size,mclass:wa[e.funcName].mclass,delim:r.text}},htmlBuilder:function(e,t){return e.delim==="."?E.makeSpan([e.mclass]):Ut.sizedDelim(e.delim,e.size,t,e.mode,[e.mclass])},mathmlBuilder:function(e){var t=[];e.delim!=="."&&t.push(wt(e.delim,e.mode));var r=new W.MathNode("mo",t);e.mclass==="mopen"||e.mclass==="mclose"?r.setAttribute("fence","true"):r.setAttribute("fence","false"),r.setAttribute("stretchy","true");var n=X(Ut.sizeToMaxHeight[e.size]);return r.setAttribute("minsize",n),r.setAttribute("maxsize",n),r}});function ka(u){if(!u.body)throw new Error("Bug: The leftright ParseNode wasn't fully parsed.")}ee({type:"leftright-right",names:["\\right"],props:{numArgs:1,primitive:!0},handler:function(e,t){var r=e.parser.gullet.macros.get("\\current@color");if(r&&typeof r!="string")throw new p("\\current@color set to non-string in \\right");return{type:"leftright-right",mode:e.parser.mode,delim:Ar(t[0],e).text,color:r}}}),ee({type:"leftright",names:["\\left"],props:{numArgs:1,primitive:!0},handler:function(e,t){var r=Ar(t[0],e),n=e.parser;++n.leftrightDepth;var a=n.parseExpression(!1);--n.leftrightDepth,n.expect("\\right",!1);var c=ve(n.parseFunction(),"leftright-right");return{type:"leftright",mode:n.mode,body:a,left:r.text,right:c.delim,rightColor:c.color}},htmlBuilder:function(e,t){ka(e);for(var r=je(e.body,t,!0,["mopen","mclose"]),n=0,a=0,c=!1,d=0;d-1?"mpadded":"menclose",[Ce(e.body,t)]);switch(e.label){case"\\cancel":n.setAttribute("notation","updiagonalstrike");break;case"\\bcancel":n.setAttribute("notation","downdiagonalstrike");break;case"\\phase":n.setAttribute("notation","phasorangle");break;case"\\sout":n.setAttribute("notation","horizontalstrike");break;case"\\fbox":n.setAttribute("notation","box");break;case"\\angl":n.setAttribute("notation","actuarial");break;case"\\fcolorbox":case"\\colorbox":if(r=t.fontMetrics().fboxsep*t.fontMetrics().ptPerEm,n.setAttribute("width","+"+2*r+"pt"),n.setAttribute("height","+"+2*r+"pt"),n.setAttribute("lspace",r+"pt"),n.setAttribute("voffset",r+"pt"),e.label==="\\fcolorbox"){var a=Math.max(t.fontMetrics().fboxrule,t.minRuleThickness);n.setAttribute("style","border: "+a+"em solid "+String(e.borderColor))}break;case"\\xcancel":n.setAttribute("notation","updiagonalstrike downdiagonalstrike");break}return e.backgroundColor&&n.setAttribute("mathbackground",e.backgroundColor),n};ee({type:"enclose",names:["\\colorbox"],props:{numArgs:2,allowedInText:!0,argTypes:["color","text"]},handler:function(e,t,r){var n=e.parser,a=e.funcName,c=ve(t[0],"color-token").color,d=t[1];return{type:"enclose",mode:n.mode,label:a,backgroundColor:c,body:d}},htmlBuilder:un,mathmlBuilder:cn}),ee({type:"enclose",names:["\\fcolorbox"],props:{numArgs:3,allowedInText:!0,argTypes:["color","color","text"]},handler:function(e,t,r){var n=e.parser,a=e.funcName,c=ve(t[0],"color-token").color,d=ve(t[1],"color-token").color,g=t[2];return{type:"enclose",mode:n.mode,label:a,backgroundColor:d,borderColor:c,body:g}},htmlBuilder:un,mathmlBuilder:cn}),ee({type:"enclose",names:["\\fbox"],props:{numArgs:1,argTypes:["hbox"],allowedInText:!0},handler:function(e,t){var r=e.parser;return{type:"enclose",mode:r.mode,label:"\\fbox",body:t[0]}}}),ee({type:"enclose",names:["\\cancel","\\bcancel","\\xcancel","\\sout","\\phase"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"enclose",mode:r.mode,label:n,body:a}},htmlBuilder:un,mathmlBuilder:cn}),ee({type:"enclose",names:["\\angl"],props:{numArgs:1,argTypes:["hbox"],allowedInText:!1},handler:function(e,t){var r=e.parser;return{type:"enclose",mode:r.mode,label:"\\angl",body:t[0]}}});var Sa={};function Rt(u){for(var e=u.type,t=u.names,r=u.props,n=u.handler,a=u.htmlBuilder,c=u.mathmlBuilder,d={type:e,numArgs:r.numArgs||0,allowedInText:!1,numOptionalArgs:0,handler:n},g=0;g1||!T)&&Y.pop(),ae.length0&&(ce+=.25),y.push({pos:ce,isDashed:Er[Br]})}for(Ae(c[0]),r=0;r0&&(dt+=ue,Se=d)){var R0=void 0;(n>0||e.hskipBeforeAndAfter)&&(R0=q.deflt(St.pregap,F),R0!==0&&(rt=E.makeSpan(["arraycolsep"],[]),rt.style.width=X(R0),kt.push(rt)));var F0=[];for(r=0;r0){for(var Rs=E.makeLineSpan("hline",t,T),Fs=E.makeLineSpan("hdashline",t,T),Mn=[{type:"elem",elem:g,shift:0}];y.length>0;){var si=y.pop(),oi=si.pos-Oe;si.isDashed?Mn.push({type:"elem",elem:Fs,shift:oi}):Mn.push({type:"elem",elem:Rs,shift:oi})}g=E.makeVList({positionType:"individualShift",children:Mn},t)}if(C0.length===0)return E.makeSpan(["mord"],[g],t);var zn=E.makeVList({positionType:"individualShift",children:C0},t);return zn=E.makeSpan(["tag"],[zn],t),E.makeFragment([g,zn])},ss={c:"center ",l:"left ",r:"right "},It=function(e,t){for(var r=[],n=new W.MathNode("mtd",[],["mtr-glue"]),a=new W.MathNode("mtd",[],["mml-eqn-num"]),c=0;c0){var O=e.cols,Y="",Q=!1,ae=0,ue=O.length;O[0].type==="separator"&&(F+="top ",ae=1),O[O.length-1].type==="separator"&&(F+="bottom ",ue-=1);for(var ce=ae;ce0?"left ":"",F+=_e[_e.length-1].length>0?"right ":"";for(var Ie=1;Ie<_e.length-1;Ie++)Se+=_e[Ie].length===0?"none ":_e[Ie][0]?"dashed ":"solid ";return/[sd]/.test(Se)&&T.setAttribute("rowlines",Se.trim()),F!==""&&(T=new W.MathNode("menclose",[T]),T.setAttribute("notation",F.trim())),e.arraystretch&&e.arraystretch<1&&(T=new W.MathNode("mstyle",[T]),T.setAttribute("scriptlevel","1")),T},Ma=function(e,t){e.envName.indexOf("ed")===-1&&Tr(e);var r=[],n=e.envName.indexOf("at")>-1?"alignat":"align",a=e.envName==="split",c=t0(e.parser,{cols:r,addJot:!0,autoTag:a?void 0:hn(e.envName),emptySingleRow:!0,colSeparationType:n,maxNumCols:a?2:void 0,leqno:e.parser.settings.leqno},"display"),d,g=0,y={type:"ordgroup",mode:e.mode,body:[]};if(t[0]&&t[0].type==="ordgroup"){for(var T="",B=0;B0&&R&&(Q=1),r[O]={type:"align",align:Y,pregap:Q,postgap:0}}return c.colSeparationType=R?"align":"alignat",c};Rt({type:"array",names:["array","darray"],props:{numArgs:1},handler:function(e,t){var r=xr(t[0]),n=r?[t[0]]:ve(t[0],"ordgroup").body,a=n.map(function(d){var g=Qr(d),y=g.text;if("lcr".indexOf(y)!==-1)return{type:"align",align:y};if(y==="|")return{type:"separator",separator:"|"};if(y===":")return{type:"separator",separator:":"};throw new p("Unknown column alignment: "+y,d)}),c={cols:a,hskipBeforeAndAfter:!0,maxNumCols:a.length};return t0(e.parser,c,mn(e.envName))},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["matrix","pmatrix","bmatrix","Bmatrix","vmatrix","Vmatrix","matrix*","pmatrix*","bmatrix*","Bmatrix*","vmatrix*","Vmatrix*"],props:{numArgs:0},handler:function(e){var t={matrix:null,pmatrix:["(",")"],bmatrix:["[","]"],Bmatrix:["\\{","\\}"],vmatrix:["|","|"],Vmatrix:["\\Vert","\\Vert"]}[e.envName.replace("*","")],r="c",n={hskipBeforeAndAfter:!1,cols:[{type:"align",align:r}]};if(e.envName.charAt(e.envName.length-1)==="*"){var a=e.parser;if(a.consumeSpaces(),a.fetch().text==="["){if(a.consume(),a.consumeSpaces(),r=a.fetch().text,"lcr".indexOf(r)===-1)throw new p("Expected l or c or r",a.nextToken);a.consume(),a.consumeSpaces(),a.expect("]"),a.consume(),n.cols=[{type:"align",align:r}]}}var c=t0(e.parser,n,mn(e.envName)),d=Math.max.apply(Math,[0].concat(c.body.map(function(g){return g.length})));return c.cols=new Array(d).fill({type:"align",align:r}),t?{type:"leftright",mode:e.mode,body:[c],left:t[0],right:t[1],rightColor:void 0}:c},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["smallmatrix"],props:{numArgs:0},handler:function(e){var t={arraystretch:.5},r=t0(e.parser,t,"script");return r.colSeparationType="small",r},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["subarray"],props:{numArgs:1},handler:function(e,t){var r=xr(t[0]),n=r?[t[0]]:ve(t[0],"ordgroup").body,a=n.map(function(d){var g=Qr(d),y=g.text;if("lc".indexOf(y)!==-1)return{type:"align",align:y};throw new p("Unknown column alignment: "+y,d)});if(a.length>1)throw new p("{subarray} can contain only one column");var c={cols:a,hskipBeforeAndAfter:!1,arraystretch:.5};if(c=t0(e.parser,c,"script"),c.body.length>0&&c.body[0].length>1)throw new p("{subarray} can contain only one column");return c},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["cases","dcases","rcases","drcases"],props:{numArgs:0},handler:function(e){var t={arraystretch:1.2,cols:[{type:"align",align:"l",pregap:0,postgap:1},{type:"align",align:"l",pregap:0,postgap:0}]},r=t0(e.parser,t,mn(e.envName));return{type:"leftright",mode:e.mode,body:[r],left:e.envName.indexOf("r")>-1?".":"\\{",right:e.envName.indexOf("r")>-1?"\\}":".",rightColor:void 0}},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["align","align*","aligned","split"],props:{numArgs:0},handler:Ma,htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["gathered","gather","gather*"],props:{numArgs:0},handler:function(e){q.contains(["gather","gather*"],e.envName)&&Tr(e);var t={cols:[{type:"align",align:"c"}],addJot:!0,colSeparationType:"gather",autoTag:hn(e.envName),emptySingleRow:!0,leqno:e.parser.settings.leqno};return t0(e.parser,t,"display")},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["alignat","alignat*","alignedat"],props:{numArgs:1},handler:Ma,htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["equation","equation*"],props:{numArgs:0},handler:function(e){Tr(e);var t={autoTag:hn(e.envName),emptySingleRow:!0,singleRow:!0,maxNumCols:1,leqno:e.parser.settings.leqno};return t0(e.parser,t,"display")},htmlBuilder:Ft,mathmlBuilder:It}),Rt({type:"array",names:["CD"],props:{numArgs:0},handler:function(e){return Tr(e),jl(e.parser)},htmlBuilder:Ft,mathmlBuilder:It}),b("\\nonumber","\\gdef\\@eqnsw{0}"),b("\\notag","\\nonumber"),ee({type:"text",names:["\\hline","\\hdashline"],props:{numArgs:0,allowedInText:!0,allowedInMath:!0},handler:function(e,t){throw new p(e.funcName+" valid only within array environment")}});var os=Sa,za=os;ee({type:"environment",names:["\\begin","\\end"],props:{numArgs:1,argTypes:["text"]},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];if(a.type!=="ordgroup")throw new p("Invalid environment name",a);for(var c="",d=0;d=J.SCRIPT.id?r.text():J.DISPLAY:e==="text"&&r.size===J.DISPLAY.size?r=J.TEXT:e==="script"?r=J.SCRIPT:e==="scriptscript"&&(r=J.SCRIPTSCRIPT),r},dn=function(e,t){var r=Ca(e.size,t.style),n=r.fracNum(),a=r.fracDen(),c;c=t.havingStyle(n);var d=ke(e.numer,c,t);if(e.continued){var g=8.5/t.fontMetrics().ptPerEm,y=3.5/t.fontMetrics().ptPerEm;d.height=d.height0?Y=3*R:Y=7*R,Q=t.fontMetrics().denom1):(F>0?(O=t.fontMetrics().num2,Y=R):(O=t.fontMetrics().num3,Y=3*R),Q=t.fontMetrics().denom2);var ae;if(B){var ce=t.fontMetrics().axisHeight;O-d.depth-(ce+.5*F)0&&(t=e,t=t==="."?null:t),t};ee({type:"genfrac",names:["\\genfrac"],props:{numArgs:6,allowedInArgument:!0,argTypes:["math","math","size","text","math","math"]},handler:function(e,t){var r=e.parser,n=t[4],a=t[5],c=br(t[0]),d=c.type==="atom"&&c.family==="open"?Na(c.text):null,g=br(t[1]),y=g.type==="atom"&&g.family==="close"?Na(g.text):null,T=ve(t[2],"size"),B,F=null;T.isBlank?B=!0:(F=T.value,B=F.number>0);var R="auto",O=t[3];if(O.type==="ordgroup"){if(O.body.length>0){var Y=ve(O.body[0],"textord");R=Da[Number(Y.text)]}}else O=ve(O,"textord"),R=Da[Number(O.text)];return{type:"genfrac",mode:r.mode,numer:n,denom:a,continued:!1,hasBarLine:B,barSize:F,leftDelim:d,rightDelim:y,size:R}},htmlBuilder:dn,mathmlBuilder:fn}),ee({type:"infix",names:["\\above"],props:{numArgs:1,argTypes:["size"],infix:!0},handler:function(e,t){var r=e.parser;e.funcName;var n=e.token;return{type:"infix",mode:r.mode,replaceWith:"\\\\abovefrac",size:ve(t[0],"size").value,token:n}}}),ee({type:"genfrac",names:["\\\\abovefrac"],props:{numArgs:3,argTypes:["math","size","math"]},handler:function(e,t){var r=e.parser;e.funcName;var n=t[0],a=ge(ve(t[1],"infix").size),c=t[2],d=a.number>0;return{type:"genfrac",mode:r.mode,numer:n,denom:c,continued:!1,hasBarLine:d,barSize:a,leftDelim:null,rightDelim:null,size:"auto"}},htmlBuilder:dn,mathmlBuilder:fn});var Ra=function(e,t){var r=t.style,n,a;e.type==="supsub"?(n=e.sup?ke(e.sup,t.havingStyle(r.sup()),t):ke(e.sub,t.havingStyle(r.sub()),t),a=ve(e.base,"horizBrace")):a=ve(e,"horizBrace");var c=ke(a.base,t.havingBaseStyle(J.DISPLAY)),d=Ht.svgSpan(a,t),g;if(a.isOver?(g=E.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:c},{type:"kern",size:.1},{type:"elem",elem:d}]},t),g.children[0].children[0].children[1].classes.push("svg-align")):(g=E.makeVList({positionType:"bottom",positionData:c.depth+.1+d.height,children:[{type:"elem",elem:d},{type:"kern",size:.1},{type:"elem",elem:c}]},t),g.children[0].children[0].children[0].classes.push("svg-align")),n){var y=E.makeSpan(["mord",a.isOver?"mover":"munder"],[g],t);a.isOver?g=E.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:y},{type:"kern",size:.2},{type:"elem",elem:n}]},t):g=E.makeVList({positionType:"bottom",positionData:y.depth+.2+n.height+n.depth,children:[{type:"elem",elem:n},{type:"kern",size:.2},{type:"elem",elem:y}]},t)}return E.makeSpan(["mord",a.isOver?"mover":"munder"],[g],t)},us=function(e,t){var r=Ht.mathMLnode(e.label);return new W.MathNode(e.isOver?"mover":"munder",[Ce(e.base,t),r])};ee({type:"horizBrace",names:["\\overbrace","\\underbrace"],props:{numArgs:1},handler:function(e,t){var r=e.parser,n=e.funcName;return{type:"horizBrace",mode:r.mode,label:n,isOver:/^\\over/.test(n),base:t[0]}},htmlBuilder:Ra,mathmlBuilder:us}),ee({type:"href",names:["\\href"],props:{numArgs:2,argTypes:["url","original"],allowedInText:!0},handler:function(e,t){var r=e.parser,n=t[1],a=ve(t[0],"url").url;return r.settings.isTrusted({command:"\\href",url:a})?{type:"href",mode:r.mode,href:a,body:Ge(n)}:r.formatUnsupportedCmd("\\href")},htmlBuilder:function(e,t){var r=je(e.body,t,!1);return E.makeAnchor(e.href,[],r,t)},mathmlBuilder:function(e,t){var r=Jt(e.body,t);return r instanceof xt||(r=new xt("mrow",[r])),r.setAttribute("href",e.href),r}}),ee({type:"href",names:["\\url"],props:{numArgs:1,argTypes:["url"],allowedInText:!0},handler:function(e,t){var r=e.parser,n=ve(t[0],"url").url;if(!r.settings.isTrusted({command:"\\url",url:n}))return r.formatUnsupportedCmd("\\url");for(var a=[],c=0;c0&&(n=Ee(e.totalheight,t)-r);var a=0;e.width.number>0&&(a=Ee(e.width,t));var c={height:X(r+n)};a>0&&(c.width=X(a)),n>0&&(c.verticalAlign=X(-n));var d=new ur(e.src,e.alt,c);return d.height=r,d.depth=n,d},mathmlBuilder:function(e,t){var r=new W.MathNode("mglyph",[]);r.setAttribute("alt",e.alt);var n=Ee(e.height,t),a=0;if(e.totalheight.number>0&&(a=Ee(e.totalheight,t)-n,r.setAttribute("valign",X(-a))),r.setAttribute("height",X(n+a)),e.width.number>0){var c=Ee(e.width,t);r.setAttribute("width",X(c))}return r.setAttribute("src",e.src),r}}),ee({type:"kern",names:["\\kern","\\mkern","\\hskip","\\mskip"],props:{numArgs:1,argTypes:["size"],primitive:!0,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=ve(t[0],"size");if(r.settings.strict){var c=n[1]==="m",d=a.value.unit==="mu";c?(d||r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" supports only mu units, "+("not "+a.value.unit+" units")),r.mode!=="math"&&r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" works only in math mode")):d&&r.settings.reportNonstrict("mathVsTextUnits","LaTeX's "+n+" doesn't support mu units")}return{type:"kern",mode:r.mode,dimension:a.value}},htmlBuilder:function(e,t){return E.makeGlue(e.dimension,t)},mathmlBuilder:function(e,t){var r=Ee(e.dimension,t);return new W.SpaceNode(r)}}),ee({type:"lap",names:["\\mathllap","\\mathrlap","\\mathclap"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"lap",mode:r.mode,alignment:n.slice(5),body:a}},htmlBuilder:function(e,t){var r;e.alignment==="clap"?(r=E.makeSpan([],[ke(e.body,t)]),r=E.makeSpan(["inner"],[r],t)):r=E.makeSpan(["inner"],[ke(e.body,t)]);var n=E.makeSpan(["fix"],[]),a=E.makeSpan([e.alignment],[r,n],t),c=E.makeSpan(["strut"]);return c.style.height=X(a.height+a.depth),a.depth&&(c.style.verticalAlign=X(-a.depth)),a.children.unshift(c),a=E.makeSpan(["thinbox"],[a],t),E.makeSpan(["mord","vbox"],[a],t)},mathmlBuilder:function(e,t){var r=new W.MathNode("mpadded",[Ce(e.body,t)]);if(e.alignment!=="rlap"){var n=e.alignment==="llap"?"-1":"-0.5";r.setAttribute("lspace",n+"width")}return r.setAttribute("width","0px"),r}}),ee({type:"styling",names:["\\(","$"],props:{numArgs:0,allowedInText:!0,allowedInMath:!1},handler:function(e,t){var r=e.funcName,n=e.parser,a=n.mode;n.switchMode("math");var c=r==="\\("?"\\)":"$",d=n.parseExpression(!1,c);return n.expect(c),n.switchMode(a),{type:"styling",mode:n.mode,style:"text",body:d}}}),ee({type:"text",names:["\\)","\\]"],props:{numArgs:0,allowedInText:!0,allowedInMath:!1},handler:function(e,t){throw new p("Mismatched "+e.funcName)}});var Fa=function(e,t){switch(t.style.size){case J.DISPLAY.size:return e.display;case J.TEXT.size:return e.text;case J.SCRIPT.size:return e.script;case J.SCRIPTSCRIPT.size:return e.scriptscript;default:return e.text}};ee({type:"mathchoice",names:["\\mathchoice"],props:{numArgs:4,primitive:!0},handler:function(e,t){var r=e.parser;return{type:"mathchoice",mode:r.mode,display:Ge(t[0]),text:Ge(t[1]),script:Ge(t[2]),scriptscript:Ge(t[3])}},htmlBuilder:function(e,t){var r=Fa(e,t),n=je(r,t,!1);return E.makeFragment(n)},mathmlBuilder:function(e,t){var r=Fa(e,t);return Jt(r,t)}});var Ia=function(e,t,r,n,a,c,d){e=E.makeSpan([],[e]);var g=r&&q.isCharacterBox(r),y,T;if(t){var B=ke(t,n.havingStyle(a.sup()),n);T={elem:B,kern:Math.max(n.fontMetrics().bigOpSpacing1,n.fontMetrics().bigOpSpacing3-B.depth)}}if(r){var F=ke(r,n.havingStyle(a.sub()),n);y={elem:F,kern:Math.max(n.fontMetrics().bigOpSpacing2,n.fontMetrics().bigOpSpacing4-F.height)}}var R;if(T&&y){var O=n.fontMetrics().bigOpSpacing5+y.elem.height+y.elem.depth+y.kern+e.depth+d;R=E.makeVList({positionType:"bottom",positionData:O,children:[{type:"kern",size:n.fontMetrics().bigOpSpacing5},{type:"elem",elem:y.elem,marginLeft:X(-c)},{type:"kern",size:y.kern},{type:"elem",elem:e},{type:"kern",size:T.kern},{type:"elem",elem:T.elem,marginLeft:X(c)},{type:"kern",size:n.fontMetrics().bigOpSpacing5}]},n)}else if(y){var Y=e.height-d;R=E.makeVList({positionType:"top",positionData:Y,children:[{type:"kern",size:n.fontMetrics().bigOpSpacing5},{type:"elem",elem:y.elem,marginLeft:X(-c)},{type:"kern",size:y.kern},{type:"elem",elem:e}]},n)}else if(T){var Q=e.depth+d;R=E.makeVList({positionType:"bottom",positionData:Q,children:[{type:"elem",elem:e},{type:"kern",size:T.kern},{type:"elem",elem:T.elem,marginLeft:X(c)},{type:"kern",size:n.fontMetrics().bigOpSpacing5}]},n)}else return e;var ae=[R];if(y&&c!==0&&!g){var ue=E.makeSpan(["mspace"],[],n);ue.style.marginRight=X(c),ae.unshift(ue)}return E.makeSpan(["mop","op-limits"],ae,n)},La=["\\smallint"],B0=function(e,t){var r,n,a=!1,c;e.type==="supsub"?(r=e.sup,n=e.sub,c=ve(e.base,"op"),a=!0):c=ve(e,"op");var d=t.style,g=!1;d.size===J.DISPLAY.size&&c.symbol&&!q.contains(La,c.name)&&(g=!0);var y;if(c.symbol){var T=g?"Size2-Regular":"Size1-Regular",B="";if((c.name==="\\oiint"||c.name==="\\oiiint")&&(B=c.name.slice(1),c.name=B==="oiint"?"\\iint":"\\iiint"),y=E.makeSymbol(c.name,T,"math",t,["mop","op-symbol",g?"large-op":"small-op"]),B.length>0){var F=y.italic,R=E.staticSvg(B+"Size"+(g?"2":"1"),t);y=E.makeVList({positionType:"individualShift",children:[{type:"elem",elem:y,shift:0},{type:"elem",elem:R,shift:g?.08:0}]},t),c.name="\\"+B,y.classes.unshift("mop"),y.italic=F}}else if(c.body){var O=je(c.body,t,!0);O.length===1&&O[0]instanceof tt?(y=O[0],y.classes[0]="mop"):y=E.makeSpan(["mop"],O,t)}else{for(var Y=[],Q=1;Q0){for(var g=c.body.map(function(F){var R=F.text;return typeof R=="string"?{type:"textord",mode:F.mode,text:R}:F}),y=je(g,t.withFont("mathrm"),!0),T=0;T=0?g.setAttribute("height",X(a)):(g.setAttribute("height",X(a)),g.setAttribute("depth",X(-a))),g.setAttribute("voffset",X(a)),g}});function qa(u,e,t){for(var r=je(u,e,!1),n=e.sizeMultiplier/t.sizeMultiplier,a=0;ar.height+r.depth+d&&(d=(d+R-r.height-r.depth)/2);var O=T.height-r.height-d-B;r.style.paddingLeft=X(F);var Y=E.makeVList({positionType:"firstBaseline",children:[{type:"elem",elem:r,wrapperClasses:["svg-align"]},{type:"kern",size:-(r.height+O)},{type:"elem",elem:T},{type:"kern",size:B}]},t);if(e.index){var Q=t.havingStyle(J.SCRIPTSCRIPT),ae=ke(e.index,Q,t),ue=.6*(Y.height-Y.depth),ce=E.makeVList({positionType:"shift",positionData:-ue,children:[{type:"elem",elem:ae}]},t),Ae=E.makeSpan(["root"],[ce]);return E.makeSpan(["mord","sqrt"],[Ae,Y],t)}else return E.makeSpan(["mord","sqrt"],[Y],t)},mathmlBuilder:function(e,t){var r=e.body,n=e.index;return n?new W.MathNode("mroot",[Ce(r,t),Ce(n,t)]):new W.MathNode("msqrt",[Ce(r,t)])}});var Ha={display:J.DISPLAY,text:J.TEXT,script:J.SCRIPT,scriptscript:J.SCRIPTSCRIPT};ee({type:"styling",names:["\\displaystyle","\\textstyle","\\scriptstyle","\\scriptscriptstyle"],props:{numArgs:0,allowedInText:!0,primitive:!0},handler:function(e,t){var r=e.breakOnTokenText,n=e.funcName,a=e.parser,c=a.parseExpression(!0,r),d=n.slice(1,n.length-5);return{type:"styling",mode:a.mode,style:d,body:c}},htmlBuilder:function(e,t){var r=Ha[e.style],n=t.havingStyle(r).withFont("");return qa(e.body,n,t)},mathmlBuilder:function(e,t){var r=Ha[e.style],n=t.havingStyle(r),a=ot(e.body,n),c=new W.MathNode("mstyle",a),d={display:["0","true"],text:["0","false"],script:["1","false"],scriptscript:["2","false"]},g=d[e.style];return c.setAttribute("scriptlevel",g[0]),c.setAttribute("displaystyle",g[1]),c}});var fs=function(e,t){var r=e.base;if(r)if(r.type==="op"){var n=r.limits&&(t.style.size===J.DISPLAY.size||r.alwaysHandleSupSub);return n?B0:null}else if(r.type==="operatorname"){var a=r.alwaysHandleSupSub&&(t.style.size===J.DISPLAY.size||r.limits);return a?Oa:null}else{if(r.type==="accent")return q.isCharacterBox(r.base)?Jr:null;if(r.type==="horizBrace"){var c=!e.sub;return c===r.isOver?Ra:null}else return null}else return null};g0({type:"supsub",htmlBuilder:function(e,t){var r=fs(e,t);if(r)return r(e,t);var n=e.base,a=e.sup,c=e.sub,d=ke(n,t),g,y,T=t.fontMetrics(),B=0,F=0,R=n&&q.isCharacterBox(n);if(a){var O=t.havingStyle(t.style.sup());g=ke(a,O,t),R||(B=d.height-O.fontMetrics().supDrop*O.sizeMultiplier/t.sizeMultiplier)}if(c){var Y=t.havingStyle(t.style.sub());y=ke(c,Y,t),R||(F=d.depth+Y.fontMetrics().subDrop*Y.sizeMultiplier/t.sizeMultiplier)}var Q;t.style===J.DISPLAY?Q=T.sup1:t.style.cramped?Q=T.sup3:Q=T.sup2;var ae=t.sizeMultiplier,ue=X(.5/T.ptPerEm/ae),ce=null;if(y){var Ae=e.base&&e.base.type==="op"&&e.base.name&&(e.base.name==="\\oiint"||e.base.name==="\\oiiint");(d instanceof tt||Ae)&&(ce=X(-d.italic))}var be;if(g&&y){B=Math.max(B,Q,g.depth+.25*T.xHeight),F=Math.max(F,T.sub2);var Me=T.defaultRuleThickness,Se=4*Me;if(B-g.depth-(y.height-F)0&&(B+=_e,F-=_e)}var Ie=[{type:"elem",elem:y,shift:F,marginRight:ue,marginLeft:ce},{type:"elem",elem:g,shift:-B,marginRight:ue}];be=E.makeVList({positionType:"individualShift",children:Ie},t)}else if(y){F=Math.max(F,T.sub1,y.height-.8*T.xHeight);var Qe=[{type:"elem",elem:y,marginLeft:ce,marginRight:ue}];be=E.makeVList({positionType:"shift",positionData:F,children:Qe},t)}else if(g)B=Math.max(B,Q,g.depth+.25*T.xHeight),be=E.makeVList({positionType:"shift",positionData:-B,children:[{type:"elem",elem:g,marginRight:ue}]},t);else throw new Error("supsub must have either sup or sub.");var dt=Xr(d,"right")||"mord";return E.makeSpan([dt],[d,E.makeSpan(["msupsub"],[be])],t)},mathmlBuilder:function(e,t){var r=!1,n,a;e.base&&e.base.type==="horizBrace"&&(a=!!e.sup,a===e.base.isOver&&(r=!0,n=e.base.isOver)),e.base&&(e.base.type==="op"||e.base.type==="operatorname")&&(e.base.parentIsSupSub=!0);var c=[Ce(e.base,t)];e.sub&&c.push(Ce(e.sub,t)),e.sup&&c.push(Ce(e.sup,t));var d;if(r)d=n?"mover":"munder";else if(e.sub)if(e.sup){var T=e.base;T&&T.type==="op"&&T.limits&&t.style===J.DISPLAY||T&&T.type==="operatorname"&&T.alwaysHandleSupSub&&(t.style===J.DISPLAY||T.limits)?d="munderover":d="msubsup"}else{var y=e.base;y&&y.type==="op"&&y.limits&&(t.style===J.DISPLAY||y.alwaysHandleSupSub)||y&&y.type==="operatorname"&&y.alwaysHandleSupSub&&(y.limits||t.style===J.DISPLAY)?d="munder":d="msub"}else{var g=e.base;g&&g.type==="op"&&g.limits&&(t.style===J.DISPLAY||g.alwaysHandleSupSub)||g&&g.type==="operatorname"&&g.alwaysHandleSupSub&&(g.limits||t.style===J.DISPLAY)?d="mover":d="msup"}return new W.MathNode(d,c)}}),g0({type:"atom",htmlBuilder:function(e,t){return E.mathsym(e.text,e.mode,t,["m"+e.family])},mathmlBuilder:function(e,t){var r=new W.MathNode("mo",[wt(e.text,e.mode)]);if(e.family==="bin"){var n=Kr(e,t);n==="bold-italic"&&r.setAttribute("mathvariant",n)}else e.family==="punct"?r.setAttribute("separator","true"):(e.family==="open"||e.family==="close")&&r.setAttribute("stretchy","false");return r}});var Ua={mi:"italic",mn:"normal",mtext:"normal"};g0({type:"mathord",htmlBuilder:function(e,t){return E.makeOrd(e,t,"mathord")},mathmlBuilder:function(e,t){var r=new W.MathNode("mi",[wt(e.text,e.mode,t)]),n=Kr(e,t)||"italic";return n!==Ua[r.type]&&r.setAttribute("mathvariant",n),r}}),g0({type:"textord",htmlBuilder:function(e,t){return E.makeOrd(e,t,"textord")},mathmlBuilder:function(e,t){var r=wt(e.text,e.mode,t),n=Kr(e,t)||"normal",a;return e.mode==="text"?a=new W.MathNode("mtext",[r]):/[0-9]/.test(e.text)?a=new W.MathNode("mn",[r]):e.text==="\\prime"?a=new W.MathNode("mo",[r]):a=new W.MathNode("mi",[r]),n!==Ua[a.type]&&a.setAttribute("mathvariant",n),a}});var gn={"\\nobreak":"nobreak","\\allowbreak":"allowbreak"},vn={" ":{},"\\ ":{},"~":{className:"nobreak"},"\\space":{},"\\nobreakspace":{className:"nobreak"}};g0({type:"spacing",htmlBuilder:function(e,t){if(vn.hasOwnProperty(e.text)){var r=vn[e.text].className||"";if(e.mode==="text"){var n=E.makeOrd(e,t,"textord");return n.classes.push(r),n}else return E.makeSpan(["mspace",r],[E.mathsym(e.text,e.mode,t)],t)}else{if(gn.hasOwnProperty(e.text))return E.makeSpan(["mspace",gn[e.text]],[],t);throw new p('Unknown type of space "'+e.text+'"')}},mathmlBuilder:function(e,t){var r;if(vn.hasOwnProperty(e.text))r=new W.MathNode("mtext",[new W.TextNode(" ")]);else{if(gn.hasOwnProperty(e.text))return new W.MathNode("mspace");throw new p('Unknown type of space "'+e.text+'"')}return r}});var Ga=function(){var e=new W.MathNode("mtd",[]);return e.setAttribute("width","50%"),e};g0({type:"tag",mathmlBuilder:function(e,t){var r=new W.MathNode("mtable",[new W.MathNode("mtr",[Ga(),new W.MathNode("mtd",[Jt(e.body,t)]),Ga(),new W.MathNode("mtd",[Jt(e.tag,t)])])]);return r.setAttribute("width","100%"),r}});var Va={"\\text":void 0,"\\textrm":"textrm","\\textsf":"textsf","\\texttt":"texttt","\\textnormal":"textrm"},Wa={"\\textbf":"textbf","\\textmd":"textmd"},ps={"\\textit":"textit","\\textup":"textup"},Ya=function(e,t){var r=e.font;return r?Va[r]?t.withTextFontFamily(Va[r]):Wa[r]?t.withTextFontWeight(Wa[r]):t.withTextFontShape(ps[r]):t};ee({type:"text",names:["\\text","\\textrm","\\textsf","\\texttt","\\textnormal","\\textbf","\\textmd","\\textit","\\textup"],props:{numArgs:1,argTypes:["text"],allowedInArgument:!0,allowedInText:!0},handler:function(e,t){var r=e.parser,n=e.funcName,a=t[0];return{type:"text",mode:r.mode,body:Ge(a),font:n}},htmlBuilder:function(e,t){var r=Ya(e,t),n=je(e.body,r,!0);return E.makeSpan(["mord","text"],n,r)},mathmlBuilder:function(e,t){var r=Ya(e,t);return Jt(e.body,r)}}),ee({type:"underline",names:["\\underline"],props:{numArgs:1,allowedInText:!0},handler:function(e,t){var r=e.parser;return{type:"underline",mode:r.mode,body:t[0]}},htmlBuilder:function(e,t){var r=ke(e.body,t),n=E.makeLineSpan("underline-line",t),a=t.fontMetrics().defaultRuleThickness,c=E.makeVList({positionType:"top",positionData:r.height,children:[{type:"kern",size:a},{type:"elem",elem:n},{type:"kern",size:3*a},{type:"elem",elem:r}]},t);return E.makeSpan(["mord","underline"],[c],t)},mathmlBuilder:function(e,t){var r=new W.MathNode("mo",[new W.TextNode("‾")]);r.setAttribute("stretchy","true");var n=new W.MathNode("munder",[Ce(e.body,t),r]);return n.setAttribute("accentunder","true"),n}}),ee({type:"vcenter",names:["\\vcenter"],props:{numArgs:1,argTypes:["original"],allowedInText:!1},handler:function(e,t){var r=e.parser;return{type:"vcenter",mode:r.mode,body:t[0]}},htmlBuilder:function(e,t){var r=ke(e.body,t),n=t.fontMetrics().axisHeight,a=.5*(r.height-n-(r.depth+n));return E.makeVList({positionType:"shift",positionData:a,children:[{type:"elem",elem:r}]},t)},mathmlBuilder:function(e,t){return new W.MathNode("mpadded",[Ce(e.body,t)],["vcenter"])}}),ee({type:"verb",names:["\\verb"],props:{numArgs:0,allowedInText:!0},handler:function(e,t,r){throw new p("\\verb ended by end of line instead of matching delimiter")},htmlBuilder:function(e,t){for(var r=ja(e),n=[],a=t.havingStyle(t.style.text()),c=0;c0;)this.endGroup()},e.has=function(r){return this.current.hasOwnProperty(r)||this.builtins.hasOwnProperty(r)},e.get=function(r){return this.current.hasOwnProperty(r)?this.current[r]:this.builtins[r]},e.set=function(r,n,a){if(a===void 0&&(a=!1),a){for(var c=0;c0&&(this.undefStack[this.undefStack.length-1][r]=n)}else{var d=this.undefStack[this.undefStack.length-1];d&&!d.hasOwnProperty(r)&&(d[r]=this.current[r])}n==null?delete this.current[r]:this.current[r]=n},u}(),As=Aa,Ts=As;b("\\noexpand",function(u){var e=u.popToken();return u.isExpandable(e.text)&&(e.noexpand=!0,e.treatAsRelax=!0),{tokens:[e],numArgs:0}}),b("\\expandafter",function(u){var e=u.popToken();return u.expandOnce(!0),{tokens:[e],numArgs:0}}),b("\\@firstoftwo",function(u){var e=u.consumeArgs(2);return{tokens:e[0],numArgs:0}}),b("\\@secondoftwo",function(u){var e=u.consumeArgs(2);return{tokens:e[1],numArgs:0}}),b("\\@ifnextchar",function(u){var e=u.consumeArgs(3);u.consumeSpaces();var t=u.future();return e[0].length===1&&e[0][0].text===t.text?{tokens:e[1],numArgs:0}:{tokens:e[2],numArgs:0}}),b("\\@ifstar","\\@ifnextchar *{\\@firstoftwo{#1}}"),b("\\TextOrMath",function(u){var e=u.consumeArgs(2);return u.mode==="text"?{tokens:e[0],numArgs:0}:{tokens:e[1],numArgs:0}});var Za={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,a:10,A:10,b:11,B:11,c:12,C:12,d:13,D:13,e:14,E:14,f:15,F:15};b("\\char",function(u){var e=u.popToken(),t,r="";if(e.text==="'")t=8,e=u.popToken();else if(e.text==='"')t=16,e=u.popToken();else if(e.text==="`")if(e=u.popToken(),e.text[0]==="\\")r=e.text.charCodeAt(1);else{if(e.text==="EOF")throw new p("\\char` missing argument");r=e.text.charCodeAt(0)}else t=10;if(t){if(r=Za[e.text],r==null||r>=t)throw new p("Invalid base-"+t+" digit "+e.text);for(var n;(n=Za[u.future().text])!=null&&n":"\\dotsb","-":"\\dotsb","*":"\\dotsb",":":"\\dotsb","\\DOTSB":"\\dotsb","\\coprod":"\\dotsb","\\bigvee":"\\dotsb","\\bigwedge":"\\dotsb","\\biguplus":"\\dotsb","\\bigcap":"\\dotsb","\\bigcup":"\\dotsb","\\prod":"\\dotsb","\\sum":"\\dotsb","\\bigotimes":"\\dotsb","\\bigoplus":"\\dotsb","\\bigodot":"\\dotsb","\\bigsqcup":"\\dotsb","\\And":"\\dotsb","\\longrightarrow":"\\dotsb","\\Longrightarrow":"\\dotsb","\\longleftarrow":"\\dotsb","\\Longleftarrow":"\\dotsb","\\longleftrightarrow":"\\dotsb","\\Longleftrightarrow":"\\dotsb","\\mapsto":"\\dotsb","\\longmapsto":"\\dotsb","\\hookrightarrow":"\\dotsb","\\doteq":"\\dotsb","\\mathbin":"\\dotsb","\\mathrel":"\\dotsb","\\relbar":"\\dotsb","\\Relbar":"\\dotsb","\\xrightarrow":"\\dotsb","\\xleftarrow":"\\dotsb","\\DOTSI":"\\dotsi","\\int":"\\dotsi","\\oint":"\\dotsi","\\iint":"\\dotsi","\\iiint":"\\dotsi","\\iiiint":"\\dotsi","\\idotsint":"\\dotsi","\\DOTSX":"\\dotsx"};b("\\dots",function(u){var e="\\dotso",t=u.expandAfterFuture().text;return t in Ka?e=Ka[t]:(t.slice(0,4)==="\\not"||t in Ne.math&&q.contains(["bin","rel"],Ne.math[t].group))&&(e="\\dotsb"),e});var xn={")":!0,"]":!0,"\\rbrack":!0,"\\}":!0,"\\rbrace":!0,"\\rangle":!0,"\\rceil":!0,"\\rfloor":!0,"\\rgroup":!0,"\\rmoustache":!0,"\\right":!0,"\\bigr":!0,"\\biggr":!0,"\\Bigr":!0,"\\Biggr":!0,$:!0,";":!0,".":!0,",":!0};b("\\dotso",function(u){var e=u.future().text;return e in xn?"\\ldots\\,":"\\ldots"}),b("\\dotsc",function(u){var e=u.future().text;return e in xn&&e!==","?"\\ldots\\,":"\\ldots"}),b("\\cdots",function(u){var e=u.future().text;return e in xn?"\\@cdots\\,":"\\@cdots"}),b("\\dotsb","\\cdots"),b("\\dotsm","\\cdots"),b("\\dotsi","\\!\\cdots"),b("\\dotsx","\\ldots\\,"),b("\\DOTSI","\\relax"),b("\\DOTSB","\\relax"),b("\\DOTSX","\\relax"),b("\\tmspace","\\TextOrMath{\\kern#1#3}{\\mskip#1#2}\\relax"),b("\\,","\\tmspace+{3mu}{.1667em}"),b("\\thinspace","\\,"),b("\\>","\\mskip{4mu}"),b("\\:","\\tmspace+{4mu}{.2222em}"),b("\\medspace","\\:"),b("\\;","\\tmspace+{5mu}{.2777em}"),b("\\thickspace","\\;"),b("\\!","\\tmspace-{3mu}{.1667em}"),b("\\negthinspace","\\!"),b("\\negmedspace","\\tmspace-{4mu}{.2222em}"),b("\\negthickspace","\\tmspace-{5mu}{.277em}"),b("\\enspace","\\kern.5em "),b("\\enskip","\\hskip.5em\\relax"),b("\\quad","\\hskip1em\\relax"),b("\\qquad","\\hskip2em\\relax"),b("\\tag","\\@ifstar\\tag@literal\\tag@paren"),b("\\tag@paren","\\tag@literal{({#1})}"),b("\\tag@literal",function(u){if(u.macros.get("\\df@tag"))throw new p("Multiple \\tag");return"\\gdef\\df@tag{\\text{#1}}"}),b("\\bmod","\\mathchoice{\\mskip1mu}{\\mskip1mu}{\\mskip5mu}{\\mskip5mu}\\mathbin{\\rm mod}\\mathchoice{\\mskip1mu}{\\mskip1mu}{\\mskip5mu}{\\mskip5mu}"),b("\\pod","\\allowbreak\\mathchoice{\\mkern18mu}{\\mkern8mu}{\\mkern8mu}{\\mkern8mu}(#1)"),b("\\pmod","\\pod{{\\rm mod}\\mkern6mu#1}"),b("\\mod","\\allowbreak\\mathchoice{\\mkern18mu}{\\mkern12mu}{\\mkern12mu}{\\mkern12mu}{\\rm mod}\\,\\,#1"),b("\\newline","\\\\\\relax"),b("\\TeX","\\textrm{\\html@mathml{T\\kern-.1667em\\raisebox{-.5ex}{E}\\kern-.125emX}{TeX}}");var Qa=X(vt["Main-Regular"]["T".charCodeAt(0)][1]-.7*vt["Main-Regular"]["A".charCodeAt(0)][1]);b("\\LaTeX","\\textrm{\\html@mathml{"+("L\\kern-.36em\\raisebox{"+Qa+"}{\\scriptstyle A}")+"\\kern-.15em\\TeX}{LaTeX}}"),b("\\KaTeX","\\textrm{\\html@mathml{"+("K\\kern-.17em\\raisebox{"+Qa+"}{\\scriptstyle A}")+"\\kern-.15em\\TeX}{KaTeX}}"),b("\\hspace","\\@ifstar\\@hspacer\\@hspace"),b("\\@hspace","\\hskip #1\\relax"),b("\\@hspacer","\\rule{0pt}{0pt}\\hskip #1\\relax"),b("\\ordinarycolon",":"),b("\\vcentcolon","\\mathrel{\\mathop\\ordinarycolon}"),b("\\dblcolon",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-.9mu}\\vcentcolon}}{\\mathop{\\char"2237}}'),b("\\coloneqq",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}=}}{\\mathop{\\char"2254}}'),b("\\Coloneqq",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}=}}{\\mathop{\\char"2237\\char"3d}}'),b("\\coloneq",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\mathrel{-}}}{\\mathop{\\char"3a\\char"2212}}'),b("\\Coloneq",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\mathrel{-}}}{\\mathop{\\char"2237\\char"2212}}'),b("\\eqqcolon",'\\html@mathml{\\mathrel{=\\mathrel{\\mkern-1.2mu}\\vcentcolon}}{\\mathop{\\char"2255}}'),b("\\Eqqcolon",'\\html@mathml{\\mathrel{=\\mathrel{\\mkern-1.2mu}\\dblcolon}}{\\mathop{\\char"3d\\char"2237}}'),b("\\eqcolon",'\\html@mathml{\\mathrel{\\mathrel{-}\\mathrel{\\mkern-1.2mu}\\vcentcolon}}{\\mathop{\\char"2239}}'),b("\\Eqcolon",'\\html@mathml{\\mathrel{\\mathrel{-}\\mathrel{\\mkern-1.2mu}\\dblcolon}}{\\mathop{\\char"2212\\char"2237}}'),b("\\colonapprox",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\approx}}{\\mathop{\\char"3a\\char"2248}}'),b("\\Colonapprox",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\approx}}{\\mathop{\\char"2237\\char"2248}}'),b("\\colonsim",'\\html@mathml{\\mathrel{\\vcentcolon\\mathrel{\\mkern-1.2mu}\\sim}}{\\mathop{\\char"3a\\char"223c}}'),b("\\Colonsim",'\\html@mathml{\\mathrel{\\dblcolon\\mathrel{\\mkern-1.2mu}\\sim}}{\\mathop{\\char"2237\\char"223c}}'),b("∷","\\dblcolon"),b("∹","\\eqcolon"),b("≔","\\coloneqq"),b("≕","\\eqqcolon"),b("⩴","\\Coloneqq"),b("\\ratio","\\vcentcolon"),b("\\coloncolon","\\dblcolon"),b("\\colonequals","\\coloneqq"),b("\\coloncolonequals","\\Coloneqq"),b("\\equalscolon","\\eqqcolon"),b("\\equalscoloncolon","\\Eqqcolon"),b("\\colonminus","\\coloneq"),b("\\coloncolonminus","\\Coloneq"),b("\\minuscolon","\\eqcolon"),b("\\minuscoloncolon","\\Eqcolon"),b("\\coloncolonapprox","\\Colonapprox"),b("\\coloncolonsim","\\Colonsim"),b("\\simcolon","\\mathrel{\\sim\\mathrel{\\mkern-1.2mu}\\vcentcolon}"),b("\\simcoloncolon","\\mathrel{\\sim\\mathrel{\\mkern-1.2mu}\\dblcolon}"),b("\\approxcolon","\\mathrel{\\approx\\mathrel{\\mkern-1.2mu}\\vcentcolon}"),b("\\approxcoloncolon","\\mathrel{\\approx\\mathrel{\\mkern-1.2mu}\\dblcolon}"),b("\\notni","\\html@mathml{\\not\\ni}{\\mathrel{\\char`∌}}"),b("\\limsup","\\DOTSB\\operatorname*{lim\\,sup}"),b("\\liminf","\\DOTSB\\operatorname*{lim\\,inf}"),b("\\injlim","\\DOTSB\\operatorname*{inj\\,lim}"),b("\\projlim","\\DOTSB\\operatorname*{proj\\,lim}"),b("\\varlimsup","\\DOTSB\\operatorname*{\\overline{lim}}"),b("\\varliminf","\\DOTSB\\operatorname*{\\underline{lim}}"),b("\\varinjlim","\\DOTSB\\operatorname*{\\underrightarrow{lim}}"),b("\\varprojlim","\\DOTSB\\operatorname*{\\underleftarrow{lim}}"),b("\\gvertneqq","\\html@mathml{\\@gvertneqq}{≩}"),b("\\lvertneqq","\\html@mathml{\\@lvertneqq}{≨}"),b("\\ngeqq","\\html@mathml{\\@ngeqq}{≱}"),b("\\ngeqslant","\\html@mathml{\\@ngeqslant}{≱}"),b("\\nleqq","\\html@mathml{\\@nleqq}{≰}"),b("\\nleqslant","\\html@mathml{\\@nleqslant}{≰}"),b("\\nshortmid","\\html@mathml{\\@nshortmid}{∤}"),b("\\nshortparallel","\\html@mathml{\\@nshortparallel}{∦}"),b("\\nsubseteqq","\\html@mathml{\\@nsubseteqq}{⊈}"),b("\\nsupseteqq","\\html@mathml{\\@nsupseteqq}{⊉}"),b("\\varsubsetneq","\\html@mathml{\\@varsubsetneq}{⊊}"),b("\\varsubsetneqq","\\html@mathml{\\@varsubsetneqq}{⫋}"),b("\\varsupsetneq","\\html@mathml{\\@varsupsetneq}{⊋}"),b("\\varsupsetneqq","\\html@mathml{\\@varsupsetneqq}{⫌}"),b("\\imath","\\html@mathml{\\@imath}{ı}"),b("\\jmath","\\html@mathml{\\@jmath}{ȷ}"),b("\\llbracket","\\html@mathml{\\mathopen{[\\mkern-3.2mu[}}{\\mathopen{\\char`⟦}}"),b("\\rrbracket","\\html@mathml{\\mathclose{]\\mkern-3.2mu]}}{\\mathclose{\\char`⟧}}"),b("⟦","\\llbracket"),b("⟧","\\rrbracket"),b("\\lBrace","\\html@mathml{\\mathopen{\\{\\mkern-3.2mu[}}{\\mathopen{\\char`⦃}}"),b("\\rBrace","\\html@mathml{\\mathclose{]\\mkern-3.2mu\\}}}{\\mathclose{\\char`⦄}}"),b("⦃","\\lBrace"),b("⦄","\\rBrace"),b("\\minuso","\\mathbin{\\html@mathml{{\\mathrlap{\\mathchoice{\\kern{0.145em}}{\\kern{0.145em}}{\\kern{0.1015em}}{\\kern{0.0725em}}\\circ}{-}}}{\\char`⦵}}"),b("⦵","\\minuso"),b("\\darr","\\downarrow"),b("\\dArr","\\Downarrow"),b("\\Darr","\\Downarrow"),b("\\lang","\\langle"),b("\\rang","\\rangle"),b("\\uarr","\\uparrow"),b("\\uArr","\\Uparrow"),b("\\Uarr","\\Uparrow"),b("\\N","\\mathbb{N}"),b("\\R","\\mathbb{R}"),b("\\Z","\\mathbb{Z}"),b("\\alef","\\aleph"),b("\\alefsym","\\aleph"),b("\\Alpha","\\mathrm{A}"),b("\\Beta","\\mathrm{B}"),b("\\bull","\\bullet"),b("\\Chi","\\mathrm{X}"),b("\\clubs","\\clubsuit"),b("\\cnums","\\mathbb{C}"),b("\\Complex","\\mathbb{C}"),b("\\Dagger","\\ddagger"),b("\\diamonds","\\diamondsuit"),b("\\empty","\\emptyset"),b("\\Epsilon","\\mathrm{E}"),b("\\Eta","\\mathrm{H}"),b("\\exist","\\exists"),b("\\harr","\\leftrightarrow"),b("\\hArr","\\Leftrightarrow"),b("\\Harr","\\Leftrightarrow"),b("\\hearts","\\heartsuit"),b("\\image","\\Im"),b("\\infin","\\infty"),b("\\Iota","\\mathrm{I}"),b("\\isin","\\in"),b("\\Kappa","\\mathrm{K}"),b("\\larr","\\leftarrow"),b("\\lArr","\\Leftarrow"),b("\\Larr","\\Leftarrow"),b("\\lrarr","\\leftrightarrow"),b("\\lrArr","\\Leftrightarrow"),b("\\Lrarr","\\Leftrightarrow"),b("\\Mu","\\mathrm{M}"),b("\\natnums","\\mathbb{N}"),b("\\Nu","\\mathrm{N}"),b("\\Omicron","\\mathrm{O}"),b("\\plusmn","\\pm"),b("\\rarr","\\rightarrow"),b("\\rArr","\\Rightarrow"),b("\\Rarr","\\Rightarrow"),b("\\real","\\Re"),b("\\reals","\\mathbb{R}"),b("\\Reals","\\mathbb{R}"),b("\\Rho","\\mathrm{P}"),b("\\sdot","\\cdot"),b("\\sect","\\S"),b("\\spades","\\spadesuit"),b("\\sub","\\subset"),b("\\sube","\\subseteq"),b("\\supe","\\supseteq"),b("\\Tau","\\mathrm{T}"),b("\\thetasym","\\vartheta"),b("\\weierp","\\wp"),b("\\Zeta","\\mathrm{Z}"),b("\\argmin","\\DOTSB\\operatorname*{arg\\,min}"),b("\\argmax","\\DOTSB\\operatorname*{arg\\,max}"),b("\\plim","\\DOTSB\\mathop{\\operatorname{plim}}\\limits"),b("\\bra","\\mathinner{\\langle{#1}|}"),b("\\ket","\\mathinner{|{#1}\\rangle}"),b("\\braket","\\mathinner{\\langle{#1}\\rangle}"),b("\\Bra","\\left\\langle#1\\right|"),b("\\Ket","\\left|#1\\right\\rangle");var Ja=function(e){return function(t){var r=t.consumeArg().tokens,n=t.consumeArg().tokens,a=t.consumeArg().tokens,c=t.consumeArg().tokens,d=t.macros.get("|"),g=t.macros.get("\\|");t.macros.beginGroup();var y=function(R){return function(O){e&&(O.macros.set("|",d),a.length&&O.macros.set("\\|",g));var Y=R;if(!R&&a.length){var Q=O.future();Q.text==="|"&&(O.popToken(),Y=!0)}return{tokens:Y?a:n,numArgs:0}}};t.macros.set("|",y(!1)),a.length&&t.macros.set("\\|",y(!0));var T=t.consumeArg().tokens,B=t.expandTokens([].concat(c,T,r));return t.macros.endGroup(),{tokens:B.reverse(),numArgs:0}}};b("\\bra@ket",Ja(!1)),b("\\bra@set",Ja(!0)),b("\\Braket","\\bra@ket{\\left\\langle}{\\,\\middle\\vert\\,}{\\,\\middle\\vert\\,}{\\right\\rangle}"),b("\\Set","\\bra@set{\\left\\{\\:}{\\;\\middle\\vert\\;}{\\;\\middle\\Vert\\;}{\\:\\right\\}}"),b("\\set","\\bra@set{\\{\\,}{\\mid}{}{\\,\\}}"),b("\\angln","{\\angl n}"),b("\\blue","\\textcolor{##6495ed}{#1}"),b("\\orange","\\textcolor{##ffa500}{#1}"),b("\\pink","\\textcolor{##ff00af}{#1}"),b("\\red","\\textcolor{##df0030}{#1}"),b("\\green","\\textcolor{##28ae7b}{#1}"),b("\\gray","\\textcolor{gray}{#1}"),b("\\purple","\\textcolor{##9d38bd}{#1}"),b("\\blueA","\\textcolor{##ccfaff}{#1}"),b("\\blueB","\\textcolor{##80f6ff}{#1}"),b("\\blueC","\\textcolor{##63d9ea}{#1}"),b("\\blueD","\\textcolor{##11accd}{#1}"),b("\\blueE","\\textcolor{##0c7f99}{#1}"),b("\\tealA","\\textcolor{##94fff5}{#1}"),b("\\tealB","\\textcolor{##26edd5}{#1}"),b("\\tealC","\\textcolor{##01d1c1}{#1}"),b("\\tealD","\\textcolor{##01a995}{#1}"),b("\\tealE","\\textcolor{##208170}{#1}"),b("\\greenA","\\textcolor{##b6ffb0}{#1}"),b("\\greenB","\\textcolor{##8af281}{#1}"),b("\\greenC","\\textcolor{##74cf70}{#1}"),b("\\greenD","\\textcolor{##1fab54}{#1}"),b("\\greenE","\\textcolor{##0d923f}{#1}"),b("\\goldA","\\textcolor{##ffd0a9}{#1}"),b("\\goldB","\\textcolor{##ffbb71}{#1}"),b("\\goldC","\\textcolor{##ff9c39}{#1}"),b("\\goldD","\\textcolor{##e07d10}{#1}"),b("\\goldE","\\textcolor{##a75a05}{#1}"),b("\\redA","\\textcolor{##fca9a9}{#1}"),b("\\redB","\\textcolor{##ff8482}{#1}"),b("\\redC","\\textcolor{##f9685d}{#1}"),b("\\redD","\\textcolor{##e84d39}{#1}"),b("\\redE","\\textcolor{##bc2612}{#1}"),b("\\maroonA","\\textcolor{##ffbde0}{#1}"),b("\\maroonB","\\textcolor{##ff92c6}{#1}"),b("\\maroonC","\\textcolor{##ed5fa6}{#1}"),b("\\maroonD","\\textcolor{##ca337c}{#1}"),b("\\maroonE","\\textcolor{##9e034e}{#1}"),b("\\purpleA","\\textcolor{##ddd7ff}{#1}"),b("\\purpleB","\\textcolor{##c6b9fc}{#1}"),b("\\purpleC","\\textcolor{##aa87ff}{#1}"),b("\\purpleD","\\textcolor{##7854ab}{#1}"),b("\\purpleE","\\textcolor{##543b78}{#1}"),b("\\mintA","\\textcolor{##f5f9e8}{#1}"),b("\\mintB","\\textcolor{##edf2df}{#1}"),b("\\mintC","\\textcolor{##e0e5cc}{#1}"),b("\\grayA","\\textcolor{##f6f7f7}{#1}"),b("\\grayB","\\textcolor{##f0f1f2}{#1}"),b("\\grayC","\\textcolor{##e3e5e6}{#1}"),b("\\grayD","\\textcolor{##d6d8da}{#1}"),b("\\grayE","\\textcolor{##babec2}{#1}"),b("\\grayF","\\textcolor{##888d93}{#1}"),b("\\grayG","\\textcolor{##626569}{#1}"),b("\\grayH","\\textcolor{##3b3e40}{#1}"),b("\\grayI","\\textcolor{##21242c}{#1}"),b("\\kaBlue","\\textcolor{##314453}{#1}"),b("\\kaGreen","\\textcolor{##71B307}{#1}");var ei={"^":!0,_:!0,"\\limits":!0,"\\nolimits":!0},Ms=function(){function u(t,r,n){this.settings=void 0,this.expansionCount=void 0,this.lexer=void 0,this.macros=void 0,this.stack=void 0,this.mode=void 0,this.settings=r,this.expansionCount=0,this.feed(t),this.macros=new Ss(Ts,r.macros),this.mode=n,this.stack=[]}var e=u.prototype;return e.feed=function(r){this.lexer=new $a(r,this.settings)},e.switchMode=function(r){this.mode=r},e.beginGroup=function(){this.macros.beginGroup()},e.endGroup=function(){this.macros.endGroup()},e.endGroups=function(){this.macros.endGroups()},e.future=function(){return this.stack.length===0&&this.pushToken(this.lexer.lex()),this.stack[this.stack.length-1]},e.popToken=function(){return this.future(),this.stack.pop()},e.pushToken=function(r){this.stack.push(r)},e.pushTokens=function(r){var n;(n=this.stack).push.apply(n,r)},e.scanArgument=function(r){var n,a,c;if(r){if(this.consumeSpaces(),this.future().text!=="[")return null;n=this.popToken();var d=this.consumeArg(["]"]);c=d.tokens,a=d.end}else{var g=this.consumeArg();c=g.tokens,n=g.start,a=g.end}return this.pushToken(new e0("EOF",a.loc)),this.pushTokens(c),n.range(a,"")},e.consumeSpaces=function(){for(;;){var r=this.future();if(r.text===" ")this.stack.pop();else break}},e.consumeArg=function(r){var n=[],a=r&&r.length>0;a||this.consumeSpaces();var c=this.future(),d,g=0,y=0;do{if(d=this.popToken(),n.push(d),d.text==="{")++g;else if(d.text==="}"){if(--g,g===-1)throw new p("Extra }",d)}else if(d.text==="EOF")throw new p("Unexpected end of input in a macro argument, expected '"+(r&&a?r[y]:"}")+"'",d);if(r&&a)if((g===0||g===1&&r[y]==="{")&&d.text===r[y]){if(++y,y===r.length){n.splice(-y,y);break}}else y=0}while(g!==0||a);return c.text==="{"&&n[n.length-1].text==="}"&&(n.pop(),n.shift()),n.reverse(),{tokens:n,start:c,end:d}},e.consumeArgs=function(r,n){if(n){if(n.length!==r+1)throw new p("The length of delimiters doesn't match the number of args!");for(var a=n[0],c=0;cthis.settings.maxExpand)throw new p("Too many expansions: infinite loop or need to increase maxExpand setting");var d=c.tokens,g=this.consumeArgs(c.numArgs,c.delimiters);if(c.numArgs){d=d.slice();for(var y=d.length-1;y>=0;--y){var T=d[y];if(T.text==="#"){if(y===0)throw new p("Incomplete placeholder at end of macro body",T);if(T=d[--y],T.text==="#")d.splice(y+1,1);else if(/^[1-9]$/.test(T.text)){var B;(B=d).splice.apply(B,[y,2].concat(g[+T.text-1]))}else throw new p("Not a valid argument number",T)}}}return this.pushTokens(d),d.length},e.expandAfterFuture=function(){return this.expandOnce(),this.future()},e.expandNextToken=function(){for(;;)if(this.expandOnce()===!1){var r=this.stack.pop();return r.treatAsRelax&&(r.text="\\relax"),r}throw new Error},e.expandMacro=function(r){return this.macros.has(r)?this.expandTokens([new e0(r)]):void 0},e.expandTokens=function(r){var n=[],a=this.stack.length;for(this.pushTokens(r);this.stack.length>a;)if(this.expandOnce(!0)===!1){var c=this.stack.pop();c.treatAsRelax&&(c.noexpand=!1,c.treatAsRelax=!1),n.push(c)}return n},e.expandMacroAsText=function(r){var n=this.expandMacro(r);return n&&n.map(function(a){return a.text}).join("")},e._getExpansion=function(r){var n=this.macros.get(r);if(n==null)return n;if(r.length===1){var a=this.lexer.catcodes[r];if(a!=null&&a!==13)return}var c=typeof n=="function"?n(this):n;if(typeof c=="string"){var d=0;if(c.indexOf("#")!==-1)for(var g=c.replace(/##/g,"");g.indexOf("#"+(d+1))!==-1;)++d;for(var y=new $a(c,this.settings),T=[],B=y.lex();B.text!=="EOF";)T.push(B),B=y.lex();T.reverse();var F={tokens:T,numArgs:d};return F}return c},e.isDefined=function(r){return this.macros.has(r)||r0.hasOwnProperty(r)||Ne.math.hasOwnProperty(r)||Ne.text.hasOwnProperty(r)||ei.hasOwnProperty(r)},e.isExpandable=function(r){var n=this.macros.get(r);return n!=null?typeof n=="string"||typeof n=="function"||!n.unexpandable:r0.hasOwnProperty(r)&&!r0[r].primitive},u}(),ti=/^[₊₋₌₍₎₀₁₂₃₄₅₆₇₈₉ₐₑₕᵢⱼₖₗₘₙₒₚᵣₛₜᵤᵥₓᵦᵧᵨᵩᵪ]/,Mr=Object.freeze({"₊":"+","₋":"-","₌":"=","₍":"(","₎":")","₀":"0","₁":"1","₂":"2","₃":"3","₄":"4","₅":"5","₆":"6","₇":"7","₈":"8","₉":"9","ₐ":"a","ₑ":"e","ₕ":"h","ᵢ":"i","ⱼ":"j","ₖ":"k","ₗ":"l","ₘ":"m","ₙ":"n","ₒ":"o","ₚ":"p","ᵣ":"r","ₛ":"s","ₜ":"t","ᵤ":"u","ᵥ":"v","ₓ":"x","ᵦ":"β","ᵧ":"γ","ᵨ":"ρ","ᵩ":"ϕ","ᵪ":"χ","⁺":"+","⁻":"-","⁼":"=","⁽":"(","⁾":")","⁰":"0","¹":"1","²":"2","³":"3","⁴":"4","⁵":"5","⁶":"6","⁷":"7","⁸":"8","⁹":"9","ᴬ":"A","ᴮ":"B","ᴰ":"D","ᴱ":"E","ᴳ":"G","ᴴ":"H","ᴵ":"I","ᴶ":"J","ᴷ":"K","ᴸ":"L","ᴹ":"M","ᴺ":"N","ᴼ":"O","ᴾ":"P","ᴿ":"R","ᵀ":"T","ᵁ":"U","ⱽ":"V","ᵂ":"W","ᵃ":"a","ᵇ":"b","ᶜ":"c","ᵈ":"d","ᵉ":"e","ᶠ":"f","ᵍ":"g",ʰ:"h","ⁱ":"i",ʲ:"j","ᵏ":"k",ˡ:"l","ᵐ":"m",ⁿ:"n","ᵒ":"o","ᵖ":"p",ʳ:"r",ˢ:"s","ᵗ":"t","ᵘ":"u","ᵛ":"v",ʷ:"w",ˣ:"x",ʸ:"y","ᶻ":"z","ᵝ":"β","ᵞ":"γ","ᵟ":"δ","ᵠ":"ϕ","ᵡ":"χ","ᶿ":"θ"}),wn={"́":{text:"\\'",math:"\\acute"},"̀":{text:"\\`",math:"\\grave"},"̈":{text:'\\"',math:"\\ddot"},"̃":{text:"\\~",math:"\\tilde"},"̄":{text:"\\=",math:"\\bar"},"̆":{text:"\\u",math:"\\breve"},"̌":{text:"\\v",math:"\\check"},"̂":{text:"\\^",math:"\\hat"},"̇":{text:"\\.",math:"\\dot"},"̊":{text:"\\r",math:"\\mathring"},"̋":{text:"\\H"},"̧":{text:"\\c"}},ri={á:"á",à:"à",ä:"ä",ǟ:"ǟ",ã:"ã",ā:"ā",ă:"ă",ắ:"ắ",ằ:"ằ",ẵ:"ẵ",ǎ:"ǎ",â:"â",ấ:"ấ",ầ:"ầ",ẫ:"ẫ",ȧ:"ȧ",ǡ:"ǡ",å:"å",ǻ:"ǻ",ḃ:"ḃ",ć:"ć",ḉ:"ḉ",č:"č",ĉ:"ĉ",ċ:"ċ",ç:"ç",ď:"ď",ḋ:"ḋ",ḑ:"ḑ",é:"é",è:"è",ë:"ë",ẽ:"ẽ",ē:"ē",ḗ:"ḗ",ḕ:"ḕ",ĕ:"ĕ",ḝ:"ḝ",ě:"ě",ê:"ê",ế:"ế",ề:"ề",ễ:"ễ",ė:"ė",ȩ:"ȩ",ḟ:"ḟ",ǵ:"ǵ",ḡ:"ḡ",ğ:"ğ",ǧ:"ǧ",ĝ:"ĝ",ġ:"ġ",ģ:"ģ",ḧ:"ḧ",ȟ:"ȟ",ĥ:"ĥ",ḣ:"ḣ",ḩ:"ḩ",í:"í",ì:"ì",ï:"ï",ḯ:"ḯ",ĩ:"ĩ",ī:"ī",ĭ:"ĭ",ǐ:"ǐ",î:"î",ǰ:"ǰ",ĵ:"ĵ",ḱ:"ḱ",ǩ:"ǩ",ķ:"ķ",ĺ:"ĺ",ľ:"ľ",ļ:"ļ",ḿ:"ḿ",ṁ:"ṁ",ń:"ń",ǹ:"ǹ",ñ:"ñ",ň:"ň",ṅ:"ṅ",ņ:"ņ",ó:"ó",ò:"ò",ö:"ö",ȫ:"ȫ",õ:"õ",ṍ:"ṍ",ṏ:"ṏ",ȭ:"ȭ",ō:"ō",ṓ:"ṓ",ṑ:"ṑ",ŏ:"ŏ",ǒ:"ǒ",ô:"ô",ố:"ố",ồ:"ồ",ỗ:"ỗ",ȯ:"ȯ",ȱ:"ȱ",ő:"ő",ṕ:"ṕ",ṗ:"ṗ",ŕ:"ŕ",ř:"ř",ṙ:"ṙ",ŗ:"ŗ",ś:"ś",ṥ:"ṥ",š:"š",ṧ:"ṧ",ŝ:"ŝ",ṡ:"ṡ",ş:"ş",ẗ:"ẗ",ť:"ť",ṫ:"ṫ",ţ:"ţ",ú:"ú",ù:"ù",ü:"ü",ǘ:"ǘ",ǜ:"ǜ",ǖ:"ǖ",ǚ:"ǚ",ũ:"ũ",ṹ:"ṹ",ū:"ū",ṻ:"ṻ",ŭ:"ŭ",ǔ:"ǔ",û:"û",ů:"ů",ű:"ű",ṽ:"ṽ",ẃ:"ẃ",ẁ:"ẁ",ẅ:"ẅ",ŵ:"ŵ",ẇ:"ẇ",ẘ:"ẘ",ẍ:"ẍ",ẋ:"ẋ",ý:"ý",ỳ:"ỳ",ÿ:"ÿ",ỹ:"ỹ",ȳ:"ȳ",ŷ:"ŷ",ẏ:"ẏ",ẙ:"ẙ",ź:"ź",ž:"ž",ẑ:"ẑ",ż:"ż",Á:"Á",À:"À",Ä:"Ä",Ǟ:"Ǟ",Ã:"Ã",Ā:"Ā",Ă:"Ă",Ắ:"Ắ",Ằ:"Ằ",Ẵ:"Ẵ",Ǎ:"Ǎ",Â:"Â",Ấ:"Ấ",Ầ:"Ầ",Ẫ:"Ẫ",Ȧ:"Ȧ",Ǡ:"Ǡ",Å:"Å",Ǻ:"Ǻ",Ḃ:"Ḃ",Ć:"Ć",Ḉ:"Ḉ",Č:"Č",Ĉ:"Ĉ",Ċ:"Ċ",Ç:"Ç",Ď:"Ď",Ḋ:"Ḋ",Ḑ:"Ḑ",É:"É",È:"È",Ë:"Ë",Ẽ:"Ẽ",Ē:"Ē",Ḗ:"Ḗ",Ḕ:"Ḕ",Ĕ:"Ĕ",Ḝ:"Ḝ",Ě:"Ě",Ê:"Ê",Ế:"Ế",Ề:"Ề",Ễ:"Ễ",Ė:"Ė",Ȩ:"Ȩ",Ḟ:"Ḟ",Ǵ:"Ǵ",Ḡ:"Ḡ",Ğ:"Ğ",Ǧ:"Ǧ",Ĝ:"Ĝ",Ġ:"Ġ",Ģ:"Ģ",Ḧ:"Ḧ",Ȟ:"Ȟ",Ĥ:"Ĥ",Ḣ:"Ḣ",Ḩ:"Ḩ",Í:"Í",Ì:"Ì",Ï:"Ï",Ḯ:"Ḯ",Ĩ:"Ĩ",Ī:"Ī",Ĭ:"Ĭ",Ǐ:"Ǐ",Î:"Î",İ:"İ",Ĵ:"Ĵ",Ḱ:"Ḱ",Ǩ:"Ǩ",Ķ:"Ķ",Ĺ:"Ĺ",Ľ:"Ľ",Ļ:"Ļ",Ḿ:"Ḿ",Ṁ:"Ṁ",Ń:"Ń",Ǹ:"Ǹ",Ñ:"Ñ",Ň:"Ň",Ṅ:"Ṅ",Ņ:"Ņ",Ó:"Ó",Ò:"Ò",Ö:"Ö",Ȫ:"Ȫ",Õ:"Õ",Ṍ:"Ṍ",Ṏ:"Ṏ",Ȭ:"Ȭ",Ō:"Ō",Ṓ:"Ṓ",Ṑ:"Ṑ",Ŏ:"Ŏ",Ǒ:"Ǒ",Ô:"Ô",Ố:"Ố",Ồ:"Ồ",Ỗ:"Ỗ",Ȯ:"Ȯ",Ȱ:"Ȱ",Ő:"Ő",Ṕ:"Ṕ",Ṗ:"Ṗ",Ŕ:"Ŕ",Ř:"Ř",Ṙ:"Ṙ",Ŗ:"Ŗ",Ś:"Ś",Ṥ:"Ṥ",Š:"Š",Ṧ:"Ṧ",Ŝ:"Ŝ",Ṡ:"Ṡ",Ş:"Ş",Ť:"Ť",Ṫ:"Ṫ",Ţ:"Ţ",Ú:"Ú",Ù:"Ù",Ü:"Ü",Ǘ:"Ǘ",Ǜ:"Ǜ",Ǖ:"Ǖ",Ǚ:"Ǚ",Ũ:"Ũ",Ṹ:"Ṹ",Ū:"Ū",Ṻ:"Ṻ",Ŭ:"Ŭ",Ǔ:"Ǔ",Û:"Û",Ů:"Ů",Ű:"Ű",Ṽ:"Ṽ",Ẃ:"Ẃ",Ẁ:"Ẁ",Ẅ:"Ẅ",Ŵ:"Ŵ",Ẇ:"Ẇ",Ẍ:"Ẍ",Ẋ:"Ẋ",Ý:"Ý",Ỳ:"Ỳ",Ÿ:"Ÿ",Ỹ:"Ỹ",Ȳ:"Ȳ",Ŷ:"Ŷ",Ẏ:"Ẏ",Ź:"Ź",Ž:"Ž",Ẑ:"Ẑ",Ż:"Ż",ά:"ά",ὰ:"ὰ",ᾱ:"ᾱ",ᾰ:"ᾰ",έ:"έ",ὲ:"ὲ",ή:"ή",ὴ:"ὴ",ί:"ί",ὶ:"ὶ",ϊ:"ϊ",ΐ:"ΐ",ῒ:"ῒ",ῑ:"ῑ",ῐ:"ῐ",ό:"ό",ὸ:"ὸ",ύ:"ύ",ὺ:"ὺ",ϋ:"ϋ",ΰ:"ΰ",ῢ:"ῢ",ῡ:"ῡ",ῠ:"ῠ",ώ:"ώ",ὼ:"ὼ",Ύ:"Ύ",Ὺ:"Ὺ",Ϋ:"Ϋ",Ῡ:"Ῡ",Ῠ:"Ῠ",Ώ:"Ώ",Ὼ:"Ὼ"},ni=function(){function u(t,r){this.mode=void 0,this.gullet=void 0,this.settings=void 0,this.leftrightDepth=void 0,this.nextToken=void 0,this.mode="math",this.gullet=new Ms(t,r,this.mode),this.settings=r,this.leftrightDepth=0}var e=u.prototype;return e.expect=function(r,n){if(n===void 0&&(n=!0),this.fetch().text!==r)throw new p("Expected '"+r+"', got '"+this.fetch().text+"'",this.fetch());n&&this.consume()},e.consume=function(){this.nextToken=null},e.fetch=function(){return this.nextToken==null&&(this.nextToken=this.gullet.expandNextToken()),this.nextToken},e.switchMode=function(r){this.mode=r,this.gullet.switchMode(r)},e.parse=function(){this.settings.globalGroup||this.gullet.beginGroup(),this.settings.colorIsTextColor&&this.gullet.macros.set("\\color","\\textcolor");try{var r=this.parseExpression(!1);return this.expect("EOF"),this.settings.globalGroup||this.gullet.endGroup(),r}finally{this.gullet.endGroups()}},e.subparse=function(r){var n=this.nextToken;this.consume(),this.gullet.pushToken(new e0("}")),this.gullet.pushTokens(r);var a=this.parseExpression(!1);return this.expect("}"),this.nextToken=n,a},e.parseExpression=function(r,n){for(var a=[];;){this.mode==="math"&&this.consumeSpaces();var c=this.fetch();if(u.endOfExpression.indexOf(c.text)!==-1||n&&c.text===n||r&&r0[c.text]&&r0[c.text].infix)break;var d=this.parseAtom(n);if(d){if(d.type==="internal")continue}else break;a.push(d)}return this.mode==="text"&&this.formLigatures(a),this.handleInfixNodes(a)},e.handleInfixNodes=function(r){for(var n=-1,a,c=0;c=0&&this.settings.reportNonstrict("unicodeTextInMathMode",'Latin-1/Unicode text character "'+n[0]+'" used in math mode',r);var y=Ne[this.mode][n].group,T=_t.range(r),B;if(Ot.hasOwnProperty(y)){var F=y;B={type:"atom",mode:this.mode,family:F,loc:T,text:n}}else B={type:y,mode:this.mode,loc:T,text:n};g=B}else if(n.charCodeAt(0)>=128)this.settings.strict&&(pt(n.charCodeAt(0))?this.mode==="math"&&this.settings.reportNonstrict("unicodeTextInMathMode",'Unicode text character "'+n[0]+'" used in math mode',r):this.settings.reportNonstrict("unknownSymbol",'Unrecognized Unicode character "'+n[0]+'"'+(" ("+n.charCodeAt(0)+")"),r)),g={type:"textord",mode:"text",loc:_t.range(r),text:n};else return null;if(this.consume(),d)for(var R=0;R0&&(I.push({type:"text",data:_.slice(0,D)}),_=_.slice(D));var re=N.findIndex(function(fe){return _.startsWith(fe.left)});if(D=L(N[re].right,_,N[re].left.length),D===-1)break;var Z=_.slice(0,D+N[re].right.length),U=K.test(Z)?Z:_.slice(N[re].left.length,D);I.push({type:"math",data:U,rawData:Z,display:N[re].display}),_=_.slice(D+N[re].right.length)}return _!==""&&I.push({type:"text",data:_}),I},V=ne,xe=function(_,N){var D=V(_,N.delimiters);if(D.length===1&&D[0].type==="text")return null;for(var I=document.createDocumentFragment(),j=0;j{Is().then(()=>{m!==p&&requestAnimationFrame(()=>{s(0,w.innerHTML=Ei.sanitize(he.parse(m)),w),s(3,z=!0),p=m,o("load")})})});function L(G){Yi[G?"unshift":"push"](()=>{w=G,s(0,w)})}return v.$$set=G=>{"message"in G&&s(1,m=G.message),"latex_delimiters"in G&&s(2,x=G.latex_delimiters)},v.$$.update=()=>{v.$$.dirty&13&&z&&x.length>0&&Zo(w,{delimiters:x,throwOnError:!1})},[w,m,x,z,L]}class Jo extends Or{constructor(i){super(),qr(this,i,Qo,Ko,Pr,{message:1,latex_delimiters:2})}}function Ci(v,i,s){const o=v.slice();return o[25]=i[s],o[27]=s,o}function Di(v,i,s){const o=v.slice();return o[28]=i[s],o[30]=s,o}function Ni(v,i,s){const o=v.slice();return o[31]=i[s],o}function Ri(v){let i,s,o;return s=new Vs({props:{formatter:zo,value:v[0]}}),s.$on("error",v[18]),s.$on("share",v[19]),{c(){i=ct("div"),H0(s.$$.fragment),de(i,"class","icon-button svelte-1fzvtqo")},m(m,p){Xe(m,i,p),U0(s,i,null),o=!0},p(m,p){const x={};p[0]&1&&(x.value=m[0]),s.$set(x)},i(m){o||(Re(s.$$.fragment,m),o=!0)},o(m){Ye(s.$$.fragment,m),o=!1},d(m){m&&$e(i),G0(s)}}}function Fi(v){let i,s,o=q0(v[0]),m=[];for(let x=0;xYe(m[x],1,1,()=>{m[x]=null});return{c(){for(let x=0;x{V[C]=null}),x0()),~p?(x=V[p],x?x.p(v,q):(x=V[p]=ne[p](v),x.c()),Re(x,1),x.m(i,w)):x=null),(!L||q[0]&64&&z!==(z=v[6]?"rtl":"ltr"))&&de(i,"dir",z),(!L||q[0]&1)&&I0(i,"latest",v[27]===v[0].length-1),(!L||q[0]&1)&&I0(i,"hide",v[28]===null),(!L||q[0]&16)&&I0(i,"selectable",v[4])},i(le){L||(Re(x),L=!0)},o(le){Ye(x),L=!1},d(le){le&&$e(i),~p&&V[p].d(),G=!1,K()}}}function qi(v){let i,s,o=q0(v[25]),m=[];for(let x=0;xYe(m[x],1,1,()=>{m[x]=null});return{c(){for(let x=0;x -   -
    -   -
    `,de(i,"class","message pending svelte-1fzvtqo")},m(s,o){Xe(s,i,o)},d(s){s&&$e(i)}}}function i1(v){let i,s,o,m,p,x,w,z=v[5]&&v[0]!==null&&v[0].length>0&&Ri(v),L=v[0]!==null&&Fi(v),G=v[2]&&Pi();return{c(){z&&z.c(),i=O0(),s=ct("div"),o=ct("div"),L&&L.c(),m=O0(),G&&G.c(),de(o,"class","message-wrap svelte-1fzvtqo"),de(s,"class","wrap svelte-1fzvtqo")},m(K,ne){z&&z.m(K,ne),Xe(K,i,ne),Xe(K,s,ne),Gt(s,o),L&&L.m(o,null),Gt(o,m),G&&G.m(o,null),v[21](s),p=!0,x||(w=Ls(To.call(null,o)),x=!0)},p(K,ne){K[5]&&K[0]!==null&&K[0].length>0?z?(z.p(K,ne),ne[0]&33&&Re(z,1)):(z=Ri(K),z.c(),Re(z,1),z.m(i.parentNode,i)):z&&(y0(),Ye(z,1,1,()=>{z=null}),x0()),K[0]!==null?L?(L.p(K,ne),ne[0]&1&&Re(L,1)):(L=Fi(K),L.c(),Re(L,1),L.m(o,m)):L&&(y0(),Ye(L,1,1,()=>{L=null}),x0()),K[2]?G||(G=Pi(),G.c(),G.m(o,null)):G&&(G.d(1),G=null)},i(K){p||(Re(z),Re(L),p=!0)},o(K){Ye(z),Ye(L),p=!1},d(K){K&&($e(i),$e(s)),z&&z.d(K),L&&L.d(),G&&G.d(),v[21](null),x=!1,w()}}}function l1(v,i,s){const o={light:()=>ui(()=>Promise.resolve({}),["assets/prism-0efcbb52.css"]),dark:()=>ui(()=>Promise.resolve({}),["assets/prism-dark-490e4a1c.css"])};let{value:m}=i,p=null,{latex_delimiters:x}=i,{pending_message:w=!1}=i,{feedback:z=null}=i,{selectable:L=!1}=i,{show_share_button:G=!1}=i,{theme_mode:K}=i,{rtl:ne=!1}=i,V,xe;const ge=Vi();Os(()=>{xe=V&&V.offsetHeight+V.scrollTop>V.scrollHeight-100});const le=()=>{xe&&V.scrollTo(0,V.scrollHeight)};Wi(()=>{xe&&(le(),V.querySelectorAll("img").forEach(se=>{se.addEventListener("load",()=>{le()})}))});function q(se,ze,Pe){ge("select",{index:[se,ze],value:Pe})}function C(se){At.call(this,v,se)}function _(se){At.call(this,v,se)}function N(se){At.call(this,v,se)}function D(se){At.call(this,v,se)}function I(se){At.call(this,v,se)}function j(se){At.call(this,v,se)}function re(se){At.call(this,v,se)}function Z(se){At.call(this,v,se)}const U=(se,ze,Pe)=>q(se,ze,Pe);function fe(se){Yi[se?"unshift":"push"](()=>{V=se,s(7,V)})}return v.$$set=se=>{"value"in se&&s(0,m=se.value),"latex_delimiters"in se&&s(1,x=se.latex_delimiters),"pending_message"in se&&s(2,w=se.pending_message),"feedback"in se&&s(3,z=se.feedback),"selectable"in se&&s(4,L=se.selectable),"show_share_button"in se&&s(5,G=se.show_share_button),"theme_mode"in se&&s(10,K=se.theme_mode),"rtl"in se&&s(6,ne=se.rtl)},v.$$.update=()=>{v.$$.dirty[0]&1024&&(K=="dark"?o.dark():o.light()),v.$$.dirty[0]&2049&&m!==p&&(s(11,p=m),ge("change"))},[m,x,w,z,L,G,ne,V,le,q,K,p,C,_,N,D,I,j,re,Z,U,fe]}class s1 extends Or{constructor(i){super(),qr(this,i,l1,i1,Pr,{value:0,latex_delimiters:1,pending_message:2,feedback:3,selectable:4,show_share_button:5,theme_mode:10,rtl:6},null,[-1,-1])}}function Hi(v){let i,s;const o=[v[12],{show_progress:v[12].show_progress==="hidden"?"hidden":"minimal"}];let m={};for(let p=0;p{x=null}),x0()),z[7]?w?(w.p(z,L),L&128&&Re(w,1)):(w=Ui(z),w.c(),Re(w,1),w.m(s,o)):w&&(y0(),Ye(w,1,1,()=>{w=null}),x0());const G={};L&256&&(G.selectable=z[8]),L&1024&&(G.show_share_button=z[10]),L&512&&(G.theme_mode=z[9]),L&16384&&(G.value=z[14]),L&8&&(G.latex_delimiters=z[3]),L&4096&&(G.pending_message=z[12]?.status==="pending"),L&2048&&(G.rtl=z[11]),m.$set(G)},i(z){p||(Re(x),Re(w),Re(m.$$.fragment,z),p=!0)},o(z){Ye(x),Ye(w),Ye(m.$$.fragment,z),p=!1},d(z){z&&($e(i),$e(s)),x&&x.d(z),w&&w.d(),G0(m)}}}function u1(v){let i,s;return i=new Ws({props:{elem_id:v[0],elem_classes:v[1],visible:v[2],padding:!1,scale:v[4],min_width:v[5],height:v[13],allow_overflow:!1,$$slots:{default:[o1]},$$scope:{ctx:v}}}),{c(){H0(i.$$.fragment)},m(o,m){U0(i,o,m),s=!0},p(o,[m]){const p={};m&1&&(p.elem_id=o[0]),m&2&&(p.elem_classes=o[1]),m&4&&(p.visible=o[2]),m&16&&(p.scale=o[4]),m&32&&(p.min_width=o[5]),m&8192&&(p.height=o[13]),m&8413128&&(p.$$scope={dirty:m,ctx:o}),i.$set(p)},i(o){s||(Re(i.$$.fragment,o),s=!0)},o(o){Ye(i.$$.fragment,o),s=!1},d(o){G0(i,o)}}}function c1(v,i,s){let{elem_id:o=""}=i,{elem_classes:m=[]}=i,{visible:p=!0}=i,{value:x=[]}=i,w,{latex_delimiters:z}=i,{scale:L=null}=i,{min_width:G=void 0}=i,{label:K}=i,{show_label:ne=!0}=i,{root:V}=i,{root_url:xe}=i,{selectable:ge=!1}=i,{theme_mode:le}=i,{show_share_button:q=!1}=i,{rtl:C=!1}=i;const _=U=>U.replace('src="/file',`src="${V}file`);let{loading_status:N=void 0}=i,{height:D=400}=i;function I(U){At.call(this,v,U)}function j(U){At.call(this,v,U)}function re(U){At.call(this,v,U)}function Z(U){At.call(this,v,U)}return v.$$set=U=>{"elem_id"in U&&s(0,o=U.elem_id),"elem_classes"in U&&s(1,m=U.elem_classes),"visible"in U&&s(2,p=U.visible),"value"in U&&s(15,x=U.value),"latex_delimiters"in U&&s(3,z=U.latex_delimiters),"scale"in U&&s(4,L=U.scale),"min_width"in U&&s(5,G=U.min_width),"label"in U&&s(6,K=U.label),"show_label"in U&&s(7,ne=U.show_label),"root"in U&&s(16,V=U.root),"root_url"in U&&s(17,xe=U.root_url),"selectable"in U&&s(8,ge=U.selectable),"theme_mode"in U&&s(9,le=U.theme_mode),"show_share_button"in U&&s(10,q=U.show_share_button),"rtl"in U&&s(11,C=U.rtl),"loading_status"in U&&s(12,N=U.loading_status),"height"in U&&s(13,D=U.height)},v.$$.update=()=>{v.$$.dirty&229376&&s(14,w=x?x.map(([U,fe])=>[typeof U=="string"?_(U):ci(U,V,xe),typeof fe=="string"?_(fe):ci(fe,V,xe)]):[])},[o,m,p,z,L,G,K,ne,ge,le,q,C,N,D,w,x,V,xe,I,j,re,Z]}class h1 extends Or{constructor(i){super(),qr(this,i,c1,u1,Pr,{elem_id:0,elem_classes:1,visible:2,value:15,latex_delimiters:3,scale:4,min_width:5,label:6,show_label:7,root:16,root_url:17,selectable:8,theme_mode:9,show_share_button:10,rtl:11,loading_status:12,height:13})}}const b1=h1,y1=["static"];export{b1 as Component,y1 as modes}; -//# sourceMappingURL=index-37e7aa9b.js.map diff --git a/spaces/Damnbro/andite-anything-v4.0/README.md b/spaces/Damnbro/andite-anything-v4.0/README.md deleted file mode 100644 index 70790c16004f4fc0d84b3a001c44d5c393b45ebb..0000000000000000000000000000000000000000 --- a/spaces/Damnbro/andite-anything-v4.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Andite Anything V4.0 -emoji: 🔥 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Datasculptor/OpenAI-Chatbot_App/README.md b/spaces/Datasculptor/OpenAI-Chatbot_App/README.md deleted file mode 100644 index 5b14cfbf8b97cab833b601a7300afcc5cca988ad..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/OpenAI-Chatbot_App/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: OpenAI Chatbot App -emoji: 🤖 -colorFrom: green -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: Kaludi/OpenAI-Chatbot_App ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/alignment.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/alignment.py deleted file mode 100644 index a02798f0f7c9fdcc319f7884a491b9e6580cc8aa..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/alignment.py +++ /dev/null @@ -1,115 +0,0 @@ -import numpy as np -import PIL -import PIL.Image -import scipy -import scipy.ndimage -import dlib - - -def get_landmark(filepath, predictor): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - - img = dlib.load_rgb_image(filepath) - dets = detector(img, 1) - - for k, d in enumerate(dets): - shape = predictor(img, d) - - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - return lm - - -def align_face(filepath, predictor): - """ - :param filepath: str - :return: PIL Image - """ - - lm = get_landmark(filepath, predictor) - - lm_chin = lm[0: 17] # left-right - lm_eyebrow_left = lm[17: 22] # left-right - lm_eyebrow_right = lm[22: 27] # left-right - lm_nose = lm[27: 31] # top-down - lm_nostrils = lm[31: 36] # top-down - lm_eye_left = lm[36: 42] # left-clockwise - lm_eye_right = lm[42: 48] # left-clockwise - lm_mouth_outer = lm[48: 60] # left-clockwise - lm_mouth_inner = lm[60: 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - img = PIL.Image.open(filepath) - - output_size = 256 - transform_size = 256 - enable_padding = True - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), - min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), - max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), - 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - # Transform. - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Return aligned image. - return img diff --git a/spaces/DragGan/DragGan/viz/drag_widget.py b/spaces/DragGan/DragGan/viz/drag_widget.py deleted file mode 100644 index aa0c454b60da12fec06fe804e21b209e0cb536d1..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/viz/drag_widget.py +++ /dev/null @@ -1,168 +0,0 @@ -import os -import torch -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_utils - -#---------------------------------------------------------------------------- - -class DragWidget: - def __init__(self, viz): - self.viz = viz - self.point = [-1, -1] - self.points = [] - self.targets = [] - self.is_point = True - self.last_click = False - self.is_drag = False - self.iteration = 0 - self.mode = 'point' - self.r_mask = 50 - self.show_mask = False - self.mask = torch.ones(256, 256) - self.lambda_mask = 20 - self.feature_idx = 5 - self.r1 = 3 - self.r2 = 12 - self.path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_screenshots')) - self.defer_frames = 0 - self.disabled_time = 0 - - def action(self, click, down, x, y): - if self.mode == 'point': - self.add_point(click, x, y) - elif down: - self.draw_mask(x, y) - - def add_point(self, click, x, y): - if click: - self.point = [y, x] - elif self.last_click: - if self.is_drag: - self.stop_drag() - if self.is_point: - self.points.append(self.point) - self.is_point = False - else: - self.targets.append(self.point) - self.is_point = True - self.last_click = click - - def init_mask(self, w, h): - self.width, self.height = w, h - self.mask = torch.ones(h, w) - - def draw_mask(self, x, y): - X = torch.linspace(0, self.width, self.width) - Y = torch.linspace(0, self.height, self.height) - yy, xx = torch.meshgrid(Y, X) - circle = (xx - x)**2 + (yy - y)**2 < self.r_mask**2 - if self.mode == 'flexible': - self.mask[circle] = 0 - elif self.mode == 'fixed': - self.mask[circle] = 1 - - def stop_drag(self): - self.is_drag = False - self.iteration = 0 - - def set_points(self, points): - self.points = points - - def reset_point(self): - self.points = [] - self.targets = [] - self.is_point = True - - def load_points(self, suffix): - points = [] - point_path = self.path + f'_{suffix}.txt' - try: - with open(point_path, "r") as f: - for line in f.readlines(): - y, x = line.split() - points.append([int(y), int(x)]) - except: - print(f'Wrong point file path: {point_path}') - return points - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - reset = False - if show: - with imgui_utils.grayed_out(self.disabled_time != 0): - imgui.text('Drag') - imgui.same_line(viz.label_w) - - if imgui_utils.button('Add point', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'point' - - imgui.same_line() - reset = False - if imgui_utils.button('Reset point', width=viz.button_w, enabled='image' in viz.result): - self.reset_point() - reset = True - - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Start', width=viz.button_w, enabled='image' in viz.result): - self.is_drag = True - if len(self.points) > len(self.targets): - self.points = self.points[:len(self.targets)] - - imgui.same_line() - if imgui_utils.button('Stop', width=viz.button_w, enabled='image' in viz.result): - self.stop_drag() - - imgui.text(' ') - imgui.same_line(viz.label_w) - imgui.text(f'Steps: {self.iteration}') - - imgui.text('Mask') - imgui.same_line(viz.label_w) - if imgui_utils.button('Flexible area', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'flexible' - self.show_mask = True - - imgui.same_line() - if imgui_utils.button('Fixed area', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'fixed' - self.show_mask = True - - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Reset mask', width=viz.button_w, enabled='image' in viz.result): - self.mask = torch.ones(self.height, self.width) - imgui.same_line() - _clicked, self.show_mask = imgui.checkbox('Show mask', self.show_mask) - - imgui.text(' ') - imgui.same_line(viz.label_w) - with imgui_utils.item_width(viz.font_size * 6): - changed, self.r_mask = imgui.input_int('Radius', self.r_mask) - - imgui.text(' ') - imgui.same_line(viz.label_w) - with imgui_utils.item_width(viz.font_size * 6): - changed, self.lambda_mask = imgui.input_int('Lambda', self.lambda_mask) - - self.disabled_time = max(self.disabled_time - viz.frame_delta, 0) - if self.defer_frames > 0: - self.defer_frames -= 1 - viz.args.is_drag = self.is_drag - if self.is_drag: - self.iteration += 1 - viz.args.iteration = self.iteration - viz.args.points = [point for point in self.points] - viz.args.targets = [point for point in self.targets] - viz.args.mask = self.mask - viz.args.lambda_mask = self.lambda_mask - viz.args.feature_idx = self.feature_idx - viz.args.r1 = self.r1 - viz.args.r2 = self.r2 - viz.args.reset = reset - - -#---------------------------------------------------------------------------- diff --git a/spaces/EPFL-VILAB/MultiMAE/multimae/output_adapters.py b/spaces/EPFL-VILAB/MultiMAE/multimae/output_adapters.py deleted file mode 100644 index 328c2ba0652efb4673277ec0ca50f35b387691bf..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/multimae/output_adapters.py +++ /dev/null @@ -1,759 +0,0 @@ -# Copyright (c) EPFL VILAB. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -------------------------------------------------------- -# Based on timm, DeiT, DINO, MoCo-v3, BEiT, MAE-priv MAE, DPT and ConvNeXt code bases -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/facebookresearch/deit -# https://github.com/facebookresearch/dino -# https://github.com/facebookresearch/moco-v3 -# https://github.com/microsoft/unilm/tree/master/beit -# https://github.com/BUPT-PRIV/MAE-priv -# https://github.com/facebookresearch/mae -# https://github.com/isl-org/DPT -# https://github.com/facebookresearch/ConvNeXt -# -------------------------------------------------------- - -from functools import partial -from typing import Dict, Iterable, List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange, repeat - -from .multimae_utils import (Block, CrossAttention, Mlp, - build_2d_sincos_posemb, pair, trunc_normal_) -from .output_adapter_utils import (ConvNeXtBlock, Interpolate, - make_fusion_block, make_scratch) - - -class SpatialOutputAdapter(nn.Module): - """Cross-attention adapter for spatial outputs, like images or feature maps. - - :param num_channels: Number of input channels of the image/feature map - :param stride_level: Stride level compared to the full-sized image. - E.g. 4 for 1/4th the size of the image. - :param patch_size_full: Int or tuple of the patch size over the full image size. - Patch size for smaller inputs will be computed accordingly. - :param dim_tokens_enc: Dimension of tokens coming from encoder. Can be set using init method. - :param dim_tokens: Dimension of decoder tokens - :param depth: Number of additional (full self-attention) transformer layers after initial cross attention and MLP - :param learnable_pos_emb: Set to True to learn positional embeddings instead - :param image_size: Default image size. Used to initialize size of positional embeddings. - :param mlp_ratio: MLP hidden dim ratio - :param num_heads: Number of attention heads - :param qkv_bias: Set to True to enable bias - :param drop_rate: Probability of dropping attention layer outputs - :param attn_drop_rate: Probability of dropping attention matrix elements - :param drop_path_rate: DropPath drop rate - :param norm_layer: Type of normalization layer - :param use_task_queries: When set to True, adds task specific tokens from encoder (if available) - to the corresponding query entries - :param task: Task for which encoder tokens are added to the queries of the decoder (e.g. RGB if decoder is used for RGB) - :param context_tasks: Tasks / modalities from the encoder. Used to create learned embeddings for each task. - :param use_xattn: When set to True, attend to the tokens from the encoder through a cross-attention layer - """ - - def __init__(self, - num_channels: int, - stride_level: int, - patch_size_full: Union[int, Tuple[int, int]], - dim_tokens_enc: Optional[int] = None, - dim_tokens: int = 256, - depth: int = 0, - learnable_pos_emb: int = False, - image_size: Union[int, Tuple[int]] = 224, - mlp_ratio: int = 4.0, - num_heads: int = 8, - qkv_bias: bool = True, - drop_rate: float = 0.0, - attn_drop_rate: float = 0.0, - drop_path_rate: float = 0.0, - norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6), - use_task_queries: bool = True, - task: Optional[str] = None, - context_tasks: Optional[list] = None, - use_xattn: bool = True - ): - super().__init__() - self.num_channels = num_channels - self.stride_level = stride_level - self.patch_size_full = pair(patch_size_full) - self.dim_tokens_enc = dim_tokens_enc - self.dim_tokens = dim_tokens - self.learnable_pos_emb = learnable_pos_emb - self.image_size = pair(image_size) - self.use_task_queries = use_task_queries - self.task = task - self.use_xattn = use_xattn - - # Actual patch height and width, taking into account stride of input - self.P_H = max(1, self.patch_size_full[0] // stride_level) - self.P_W = max(1, self.patch_size_full[1] // stride_level) - - if context_tasks is not None: - self.task_embeddings = nn.ParameterDict( - {task: nn.Parameter(torch.zeros(1, 1, self.dim_tokens)) for task in context_tasks}) - for embedding in self.task_embeddings.values(): - trunc_normal_(embedding, std=0.02) - - self.mask_token = nn.Parameter(torch.zeros(1, 1, self.dim_tokens)) - - # Fixed-size positional embeddings. Can be interpolated to different input sizes - h_posemb = self.image_size[0] // (self.stride_level * self.P_H) - w_posemb = self.image_size[1] // (self.stride_level * self.P_W) - if not self.learnable_pos_emb: - self.pos_emb = build_2d_sincos_posemb(h=h_posemb, w=w_posemb, embed_dim=self.dim_tokens) - self.pos_emb = nn.Parameter(self.pos_emb, requires_grad=False) - else: - self.pos_emb = nn.Parameter(torch.zeros(1, h_posemb, w_posemb, self.dim_tokens)) - trunc_normal_(self.pos_emb, std=0.02) - - # One cross attention layer followed by MLP block, an optional transformer, and an output projection - if self.use_xattn: - self.decoder = CrossAttention( - dim=self.dim_tokens, num_heads=num_heads, qkv_bias=qkv_bias, - attn_drop=attn_drop_rate, proj_drop=drop_rate) - self.context_norm = norm_layer(self.dim_tokens) - self.query_norm = norm_layer(self.dim_tokens) - self.out_norm = norm_layer(self.dim_tokens) - - mlp_hidden_dim = int(self.dim_tokens * mlp_ratio) - self.mlp = Mlp(in_features=self.dim_tokens, hidden_features=mlp_hidden_dim) - - # Optional full self-attention transformer layers - if depth > 0: - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.decoder_transformer = nn.Sequential(*[ - Block(dim=self.dim_tokens, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, - attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth) - ]) - else: - self.decoder_transformer = nn.Identity() - - self.dim_patch = self.num_channels * self.P_H * self.P_W - self.out_proj = nn.Linear(self.dim_tokens, self.dim_patch) - - if self.dim_tokens_enc is not None: - self.init(dim_tokens_enc=dim_tokens_enc) - - def init(self, dim_tokens_enc: int = 768): - ''' - Initialize parts of decoder that are dependent on dimension of encoder tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens_enc: Dimension of tokens coming from encoder - ''' - self.dim_tokens_enc = dim_tokens_enc - - # Projection of encoder tokens to the patch dimension - self.proj_context = nn.Linear(self.dim_tokens_enc, self.dim_tokens) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_emb', 'mask_token', 'task_embeddings'} - - def generate_context_embeddings(self, input_info, - bs: int, - size: Tuple[int, int], - device: Optional[torch.device] = None): - context_embeddings = [] - for task, info in input_info["tasks"].items(): - if self.task_embeddings is not None and task in self.task_embeddings: - task_emb = repeat(self.task_embeddings[task], '() () d -> b n d', b=bs, n=info['num_tokens']) - else: - task_emb = torch.zeros((bs, info['num_tokens'], self.dim_tokens), device=device) - - if info['has_2d_posemb']: - pos_emb = F.interpolate(self.pos_emb, size=size, mode='bilinear', align_corners=False) - pos_emb = rearrange(pos_emb, 'b d nh nw -> b (nh nw) d') - assert info['num_tokens'] == pos_emb.shape[1] - task_emb = task_emb + pos_emb - - context_embeddings.append(task_emb) - - context_embeddings = torch.cat(context_embeddings, dim=1) - - return context_embeddings - - def get_queries_and_context(self, context_tokens, input_info, ids_keep, ids_restore): - B = context_tokens.shape[0] - H, W = input_info['image_size'] - # Number of patches in height and width - N_H = H // (self.stride_level * self.P_H) - N_W = W // (self.stride_level * self.P_W) - - if 'num_global_tokens' in input_info: - context_tokens_without_global = context_tokens[:, :-input_info['num_global_tokens']] - else: - context_tokens_without_global = context_tokens - - # Add mask tokens - mask_tokens = repeat(self.mask_token, '() () d -> b n d', b=B, - n=input_info['num_task_tokens'] - context_tokens_without_global.shape[1]) - context_with_mask = torch.cat([context_tokens_without_global, mask_tokens], dim=1) - - # Unshuffle context_with_mask - context_with_mask = torch.gather(context_with_mask, dim=1, - index=ids_restore.unsqueeze(-1).repeat(1, 1, context_with_mask.shape[2])) - - # Generate context_emb and add them to context - context_emb = self.generate_context_embeddings(input_info=input_info, bs=B, size=(N_H, N_W), - device=context_tokens.device) - context_with_mask = context_with_mask + context_emb - - # Generate queries - if self.use_task_queries and self.task in input_info['tasks']: - start_idx = input_info['tasks'][self.task]['start_idx'] - end_idx = input_info['tasks'][self.task]['end_idx'] - queries = context_with_mask[:, start_idx:end_idx] - else: - queries = repeat(self.mask_token, '() () d -> b n d', b=B, n=N_H * N_W) - queries_pos_emb = F.interpolate(self.pos_emb, size=(N_H, N_W), mode='bilinear', align_corners=False) - queries_pos_emb = rearrange(queries_pos_emb, 'b d nh nw -> b (nh nw) d') - queries = queries + queries_pos_emb - if self.task_embeddings is not None and self.task in self.task_embeddings: - queries_task_emb = repeat(self.task_embeddings[self.task], '() () d -> b n d', b=B, n=N_H * N_W) - queries = queries + queries_task_emb - - # Unshuffle context and keep only initial context (yes, again) - context_tokens_without_global = torch.gather(context_with_mask, dim=1, - index=ids_keep.unsqueeze(-1).repeat(1, 1, context_with_mask.shape[2])) - - # Add back global tokens - if 'num_global_tokens' in input_info: - context_tokens = torch.cat( - [context_tokens_without_global, context_tokens[:, -input_info['num_global_tokens']:]], dim=1) - else: - context_tokens = context_tokens_without_global - - return queries, context_tokens - - def forward(self, - encoder_tokens: torch.Tensor, - input_info: Dict, - ids_keep: torch.Tensor, - ids_restore: torch.Tensor, - ): - """ - Forward pass taking output tokens from encoder and optionally a subset of them corresponding - to this output adapter's task (needs an additional mask describing position of these tokens in the queries). - - :param encoder_tokens: Output of encoder - :param input_info: Dictionary with information about the input modalities - :param ids_keep: IDs of unmasked tokens (tokens given to the encoder) - :param ids_restore: IDs to unshuffle tokens - """ - assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' - H, W = input_info['image_size'] - # Number of patches in height and width - N_H = H // (self.stride_level * self.P_H) - N_W = W // (self.stride_level * self.P_W) - - # Project encoder tokens to decoder tokens - context_tokens = self.proj_context(encoder_tokens) - - # Get queries and context - queries, context_tokens = self.get_queries_and_context(context_tokens, input_info, ids_keep, ids_restore) - - # Perform cross attention of queries to context tokens, followed by an MLP - if self.use_xattn: - x = self.decoder(self.query_norm(queries), self.context_norm(context_tokens)) - x = x + self.mlp(self.out_norm(x)) - else: - x = queries - - # Optional transformer layers if depth > 0 - x = self.decoder_transformer(x) - - # Project each token to (C * P_H * P_W) - x = self.out_proj(x) - - # Reshape sequence of patches into image - x = rearrange( - x, 'b (nh nw) (c ph pw) -> b c (nh ph) (nw pw)', - nh=N_H, nw=N_W, ph=self.P_H, pw=self.P_W, c=self.num_channels - ) - - return x - - -class LinearOutputAdapter(nn.Module): - """ - Linear output adapter. - - :param num_classes: Number of classes - :param dim_tokens_enc: Dimension of tokens from the encoder - :param use_mean_pooling: When set to True, uses mean pooling before linear classification head. - Otherwise, use last token (usually the global token) - :param norm_layer: Normalization layer - :param init_scale: Initialization scale for linear classification head - """ - - def __init__(self, - num_classes: int, - dim_tokens_enc: Optional[int] = None, - use_mean_pooling: bool = True, - norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6), - init_scale: float = 1.0): - super().__init__() - self.num_classes = num_classes - self.dim_tokens_enc = dim_tokens_enc - self.use_mean_pooling = use_mean_pooling - self.norm_layer = norm_layer - self.init_scale = init_scale - - if self.dim_tokens_enc is not None: - self.init(dim_tokens_enc=dim_tokens_enc) - - def init(self, dim_tokens_enc: int = 768): - """ - Initialize parts of decoder that are dependent on dimension of encoder tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens_enc: Dimension of tokens coming from encoder - """ - self.dim_tokens_enc = dim_tokens_enc - - self.norm = self.norm_layer(self.dim_tokens_enc) - self.head = nn.Linear(dim_tokens_enc, self.num_classes) if self.num_classes > 0 else nn.Identity() - - self.apply(self._init_weights) - self.head.weight.data.mul_(self.init_scale) - self.head.bias.data.mul_(self.init_scale) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def get_classifier(self): - return self.head - - def reset_classifier(self, num_classes, global_pool=''): - self.num_classes = num_classes - self.init(dim_tokens_enc=self.dim_tokens_enc) - - def forward(self, - encoder_tokens: torch.Tensor, - **kwargs): - - if self.use_mean_pooling: - x = encoder_tokens.mean(1) - else: - # Global token is added at the end - x = encoder_tokens[:, -1] - - x = self.head(self.norm(x)) - return x - - -class SegmenterMaskTransformerAdapter(nn.Module): - """Output adapter inspired by the Segmenter-Mask architecture - - This head is the implementation of `Segmenter: `_. - - :param num_classes: Number of classes - :param depth: Depth of decoder - :param num_heads: Number of attention heads - :param embed_dim: Dimension of decoder tokens - :param mlp_ratio: MLP hidden dim ratio - :param drop_path_rate: DropPath drop rate - :param drop_rate: Dropout after MLPs and Attention - :param attn_drop_rate: Attention matrix drop rate - :param qkv_bias: Set to False to disable bias - :param main_tasks: Tasks to use for the adapter. Only tokens coming from these tasks are kept. - :param patch_size: Size of patches - :param norm_layer: Type of normalization layer - """ - - def __init__( - self, - num_classes, - depth: int = 2, - num_heads: int = 12, - embed_dim: int = 768, - mlp_ratio=4, - drop_path_rate=0.1, - drop_rate=0.0, - attn_drop_rate=0.0, - qkv_bias=True, - main_tasks: str = ('rgb',), - patch_size: int = 16, - norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6), - **kwargs, - ): - super().__init__() - self.main_tasks = main_tasks - self.patch_size = patch_size - self.embed_dim = embed_dim - self.num_classes = num_classes - - self.cls_emb = nn.Parameter(torch.zeros(1, num_classes, embed_dim)) - trunc_normal_(self.cls_emb, std=0.02) - - self.patch_proj = nn.Linear(embed_dim, embed_dim, bias=False) - self.classes_proj = nn.Linear(embed_dim, embed_dim, bias=False) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] - self.blocks = nn.ModuleList([ - Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, - attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth) - ]) - - self.decoder_norm = norm_layer(embed_dim) - self.mask_norm = norm_layer(num_classes) - self.apply(self._init_weights) - - def init(self, dim_tokens_enc: int = 768): - """ - Initialize parts of decoder that are dependent on dimension of encoder tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens_enc: Dimension of tokens coming from encoder - """ - self.in_channels = dim_tokens_enc * len(self.main_tasks) - - # Projection of encoder tokens to the patch dimension - self.proj_dec = nn.Linear(self.in_channels, self.embed_dim) - self._init_weights(self.proj_dec) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def adapt_tokens(self, encoder_tokens, input_info): - # Adapt tokens - x = [] - for task in self.main_tasks: - start_idx = input_info['tasks'][task]['start_idx'] - end_idx = input_info['tasks'][task]['end_idx'] - x.append(encoder_tokens[:, start_idx:end_idx]) - - x = torch.cat(x, dim=-1) - return x - - def forward(self, encoder_tokens: torch.Tensor, input_info: Dict): - H, W = input_info['image_size'] - N_H, N_W = H // self.patch_size, W // self.patch_size - - x = self.adapt_tokens(encoder_tokens, input_info) - - x = self.proj_dec(x) - cls_emb = self.cls_emb.expand(x.shape[0], -1, -1) - x = torch.cat((x, cls_emb), 1) - - for blk in self.blocks: - x = blk(x) - - x = self.decoder_norm(x) - - patches = self.patch_proj(x[:, :-self.num_classes]) - cls_seg_feat = self.classes_proj(x[:, -self.num_classes:]) - - patches = F.normalize(patches, dim=2, p=2) - cls_seg_feat = F.normalize(cls_seg_feat, dim=2, p=2) - - masks = patches @ cls_seg_feat.transpose(1, 2) - masks = self.mask_norm(masks) - masks = rearrange(masks, "b (nh nw) c -> b c nh nw", nh=N_H, nw=N_W) - - # Interpolate to semseg res - masks = F.interpolate(masks, size=(H, W), mode="bilinear") - - return masks - - -class ConvNeXtAdapter(nn.Module): - """Output adapter with ConvNext blocks for semantic segmentation - - :param num_classes: Number of classes - :param num_heads: Number of attention heads - :param embed_dim: Token dimension after projection, and before reshaping operation. - :param preds_per_patch: Increases size of feature map by reshaping each patch Each patch gets reshaped - from embed_dim x 1 x 1 to (embed_dim / preds_per_patch) x (preds_per_patch ** 0.5) x (preds_per_patch ** 0.5) - :param main_tasks: Tasks to use for the adapter. Only tokens coming from these tasks are kept. - :param patch_size: Size of patches - :param depth: Number of ConvNeXt blocks - :interpolate_mode: Interpolation mode for final upsampling - """ - - def __init__( - self, - num_classes, - embed_dim: int = 6144, - preds_per_patch: int = 16, - main_tasks: Iterable[str] = ('rgb',), - patch_size: int = 16, - depth: int = 4, - interpolate_mode: str = 'bilinear', - **kwargs, - ): - super().__init__() - self.main_tasks = main_tasks - self.patch_size = patch_size - self.embed_dim = embed_dim - self.preds_per_patch = preds_per_patch - self.class_dim = embed_dim // preds_per_patch - self.num_classes = num_classes - self.interpolate_mode = interpolate_mode - - self.blocks = nn.Sequential(*[ - ConvNeXtBlock(dim=self.class_dim) - for _ in range(depth) - ]) - self.final_layer = nn.Conv2d(self.class_dim, self.num_classes, 1) - self.apply(self._init_weights) - - def init(self, dim_tokens_enc: int = 768): - """ - Initialize parts of decoder that are dependent on dimension of encoder tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens_enc: Dimension of tokens coming from encoder - """ - self.in_channels = dim_tokens_enc * len(self.main_tasks) - - # Projection of encoder tokens to the patch dimension - self.proj_dec = nn.Linear(self.in_channels, self.embed_dim) - self._init_weights(self.proj_dec) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def adapt_tokens(self, encoder_tokens, input_info): - # Adapt tokens - x = [] - for task in self.main_tasks: - start_idx = input_info['tasks'][task]['start_idx'] - end_idx = input_info['tasks'][task]['end_idx'] - x.append(encoder_tokens[:, start_idx:end_idx]) - - x = torch.cat(x, dim=-1) - return x - - def forward(self, encoder_tokens: torch.Tensor, input_info: Dict): - H, W = input_info['image_size'] - N_H, N_W = H // self.patch_size, W // self.patch_size - - x = self.adapt_tokens(encoder_tokens, input_info) - - x = self.proj_dec(x) - x = rearrange(x, "b n (p c) -> b (n p) c", n=N_H * N_W, p=self.preds_per_patch, c=self.class_dim) - x = rearrange(x, "b (nh nw ph pw) c -> b c (nh ph) (nw pw)", - nh=N_H, nw=N_W, - ph=int(self.preds_per_patch ** 0.5), - pw=int(self.preds_per_patch ** 0.5)) - x = self.blocks(x) - x = self.final_layer(x) - - # Interpolate to semseg res - x = F.interpolate(x, size=(H, W), mode=self.interpolate_mode) - - return x - - -class DPTOutputAdapter(nn.Module): - """DPT output adapter. - - :param num_classes: Number of output channels - :param stride_level: tride level compared to the full-sized image. - E.g. 4 for 1/4th the size of the image. - :param patch_size_full: Int or tuple of the patch size over the full image size. - Patch size for smaller inputs will be computed accordingly. - :param hooks: Index of intermediate layers - :param layer_dims: Dimension of intermediate layers - :param feature_dim: Feature dimension - :param use_bn: If set to True, activates batch norm - :param dim_tokens_enc: Dimension of tokens coming from encoder - """ - - def __init__(self, - num_classes: int = 3, - stride_level: int = 1, - patch_size: Union[int, Tuple[int, int]] = 16, - main_tasks: Iterable[str] = ('rgb',), - hooks: List[int] = [2, 5, 8, 11], - layer_dims: List[int] = [96, 192, 384, 768], - feature_dim: int = 256, - use_bn: bool = False, - dim_tokens_enc: Optional[int] = None, - head_type: str = 'regression', - **kwargs): - super().__init__() - self.num_channels = num_classes - self.stride_level = stride_level - self.patch_size = pair(patch_size) - self.main_tasks = main_tasks - self.hooks = hooks - self.layer_dims = layer_dims - self.feature_dim = feature_dim - self.dim_tokens_enc = dim_tokens_enc * len(self.main_tasks) if dim_tokens_enc is not None else None - self.head_type = head_type - - # Actual patch height and width, taking into account stride of input - self.P_H = max(1, self.patch_size[0] // stride_level) - self.P_W = max(1, self.patch_size[1] // stride_level) - - self.scratch = make_scratch(layer_dims, feature_dim, groups=1, expand=False) - - self.scratch.refinenet1 = make_fusion_block(feature_dim, use_bn) - self.scratch.refinenet2 = make_fusion_block(feature_dim, use_bn) - self.scratch.refinenet3 = make_fusion_block(feature_dim, use_bn) - self.scratch.refinenet4 = make_fusion_block(feature_dim, use_bn) - - if self.head_type == 'regression': - # The "DPTDepthModel" head - self.head = nn.Sequential( - nn.Conv2d(feature_dim, feature_dim // 2, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode="bilinear", align_corners=True), - nn.Conv2d(feature_dim // 2, 32, kernel_size=3, stride=1, padding=1), - nn.ReLU(True), - nn.Conv2d(32, self.num_channels, kernel_size=1, stride=1, padding=0) - ) - elif self.head_type == 'semseg': - # The "DPTSegmentationModel" head - self.head = nn.Sequential( - nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1, bias=False), - nn.BatchNorm2d(feature_dim) if use_bn else nn.Identity(), - nn.ReLU(True), - nn.Dropout(0.1, False), - nn.Conv2d(feature_dim, self.num_channels, kernel_size=1), - Interpolate(scale_factor=2, mode="bilinear", align_corners=True), - ) - else: - raise ValueError('DPT head_type must be "regression" or "semseg".') - - if self.dim_tokens_enc is not None: - self.init(dim_tokens_enc=dim_tokens_enc) - - def init(self, dim_tokens_enc: int = 768): - """ - Initialize parts of decoder that are dependent on dimension of encoder tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens_enc: Dimension of tokens coming from encoder - """ - self.dim_tokens_enc = dim_tokens_enc * len(self.main_tasks) - - # Set up activation postprocessing layers - - self.act_1_postprocess = nn.Sequential( - nn.Conv2d( - in_channels=self.dim_tokens_enc, - out_channels=self.layer_dims[0], - kernel_size=1, stride=1, padding=0, - ), - nn.ConvTranspose2d( - in_channels=self.layer_dims[0], - out_channels=self.layer_dims[0], - kernel_size=4, stride=4, padding=0, - bias=True, dilation=1, groups=1, - ) - ) - - self.act_2_postprocess = nn.Sequential( - nn.Conv2d( - in_channels=self.dim_tokens_enc, - out_channels=self.layer_dims[1], - kernel_size=1, stride=1, padding=0, - ), - nn.ConvTranspose2d( - in_channels=self.layer_dims[1], - out_channels=self.layer_dims[1], - kernel_size=2, stride=2, padding=0, - bias=True, dilation=1, groups=1, - ) - ) - - self.act_3_postprocess = nn.Sequential( - nn.Conv2d( - in_channels=self.dim_tokens_enc, - out_channels=self.layer_dims[2], - kernel_size=1, stride=1, padding=0, - ) - ) - - self.act_4_postprocess = nn.Sequential( - nn.Conv2d( - in_channels=self.dim_tokens_enc, - out_channels=self.layer_dims[3], - kernel_size=1, stride=1, padding=0, - ), - nn.Conv2d( - in_channels=self.layer_dims[3], - out_channels=self.layer_dims[3], - kernel_size=3, stride=2, padding=1, - ) - ) - - self.act_postprocess = nn.ModuleList([ - self.act_1_postprocess, - self.act_2_postprocess, - self.act_3_postprocess, - self.act_4_postprocess - ]) - - def adapt_tokens(self, encoder_tokens, input_info): - # Adapt tokens - x = [] - for task in self.main_tasks: - start_idx = input_info['tasks'][task]['start_idx'] - end_idx = input_info['tasks'][task]['end_idx'] - x.append(encoder_tokens[:, start_idx:end_idx]) - - x = torch.cat(x, dim=-1) - return x - - def forward(self, encoder_tokens: List[torch.Tensor], input_info: Dict): - assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' - H, W = input_info['image_size'] - # Number of patches in height and width - N_H = H // (self.stride_level * self.P_H) - N_W = W // (self.stride_level * self.P_W) - - # Hook decoder onto 4 layers from specified ViT layers - layers = [encoder_tokens[hook] for hook in self.hooks] - - # Extract only task-relevant tokens and ignore global tokens. - layers = [self.adapt_tokens(l, input_info) for l in layers] - - # Reshape tokens to spatial representation - layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] - - # Postprocess activations - layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] - - # Project layers to chosen feature dim - layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] - - # Fuse layers using refinement stages - path_4 = self.scratch.refinenet4(layers[3]) - path_3 = self.scratch.refinenet3(path_4, layers[2]) - path_2 = self.scratch.refinenet2(path_3, layers[1]) - path_1 = self.scratch.refinenet1(path_2, layers[0]) - - # Output head - out = self.head(path_1) - - return out diff --git a/spaces/EdBianchi/JustMovie/app.py b/spaces/EdBianchi/JustMovie/app.py deleted file mode 100644 index 5b137af386f682659e5e1e1d26a1fe78ea7a36c5..0000000000000000000000000000000000000000 --- a/spaces/EdBianchi/JustMovie/app.py +++ /dev/null @@ -1,135 +0,0 @@ -import pickle -import streamlit as st -import requests -import pandas as pd - -# set page setting -st.set_page_config(page_title='TopMovies') - -# set history var -if 'history' not in st.session_state: - st.session_state.history = [] - -# import preprocessed data -data = pd.read_csv("./data/tags.csv") - -# import similarity (to be cached) -def importSim(filename): - sim = pickle.load(open(filename, 'rb')) - return sim - -similarity = importSim('similarity.pkl') - -# recommender function -def recommend_image(movie, sim): - poster = [] - plot = [] - # index from dataframe - index = data[data['title'] == movie].index[0] - dist = dict(enumerate(sim[index])) - dist = dict(sorted(dist.items(), reverse=True, key = lambda item: item[1])) - #index from 1 because the first is the movie itself - cnt = 0 - for key in dist: - cnt = cnt+1 - if cnt < 15: - title = data.iloc[key].title - try: - posterRes, plotRes = get_poster_plot(title) - poster.append(posterRes) - plot.append(plotRes) - except: - pass - else: - break - - return poster[1:], plot[1:] - -# get poster -def get_poster_plot(title): - r = requests.get("http://www.omdbapi.com/?i=tt3896198&apikey=37765f04&t=" + title).json() - posterElement = r["Poster"] - plotElement = r["Plot"] - return posterElement, plotElement - -# update last viewed list -def update_las_viewed(): - if len(st.session_state.history) > 3: - st.session_state.history.pop() - -# sidebar -st.sidebar.write(""" -This is a content based recommender system. Pick a movie from the list or search for it and then wait for the reccomendations. -You will get six movies, posters and plots. -""") - -# title -st.write("# Movie Recommendation System") -st.write("Pick a movie from the list and enjoy some new stuffs!") - -# select box -title = st.selectbox("", data["title"]) -if title not in st.session_state.history: - st.session_state.history.insert(0, title) -update_las_viewed() - -# recommend -with st.spinner("Getting the best movies..."): - recs, plots = recommend_image(title, similarity) - -# recommendation cols -st.write("## What to watch next....") -col1, col2, col3 = st.columns(3) -with col1: - st.image(recs[0]) - st.write(plots[0]) -with col2: - st.image(recs[1]) - st.write(plots[1]) -with col3: - st.image(recs[2]) - st.write(plots[2]) - -col4, col5, col6 = st.columns(3) -with col4: - st.image(recs[3]) - st.write(plots[3]) -with col5: - st.image(recs[4]) - st.write(plots[4]) -with col6: - st.image(recs[5]) - st.write(plots[5]) - -col7, col8, col9 = st.columns(3) -with col7: - st.image(recs[6]) - st.write(plots[6]) -with col8: - st.image(recs[7]) - st.write(plots[7]) -with col9: - st.image(recs[8]) - st.write(plots[8]) - -# last viewed -st.write("## Last viewed:") -r1, r2, r3 = st.columns(3) -with r1: - try: - st.image(get_poster_plot(st.session_state.history[0])[0]) - except IndexError: - pass - -with r2: - try: - st.image(get_poster_plot(st.session_state.history[1])[0]) - except IndexError: - pass - -with r3: - try: - st.image(get_poster_plot(st.session_state.history[2])[0]) - except IndexError: - pass - diff --git a/spaces/Eddevs/brian-challenge/README.md b/spaces/Eddevs/brian-challenge/README.md deleted file mode 100644 index 83ee0190328e754314f4087e95955540b60b06e8..0000000000000000000000000000000000000000 --- a/spaces/Eddevs/brian-challenge/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Brian Challenge -emoji: 📚 -colorFrom: indigo -colorTo: yellow -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/dependency.py b/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/dependency.py deleted file mode 100644 index b70338b02d31b1ef455fbac817d418d328db518d..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/dependency.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import csv -import shutil -import tarfile -import subprocess -from pathlib import Path -from datetime import datetime - -def install_packages_but_jank_af(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - print('Packages up to date.') - - -def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage): - # Mounting Google Drive - if not ForceTemporaryStorage: - from google.colab import drive - - if not os.path.exists('/content/drive'): - drive.mount('/content/drive') - else: - print('Drive is already mounted. Proceeding...') - - # Function to install dependencies with progress - def install_packages(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - - print('Packages up to date.') - - # Function to scan a directory and writes filenames and timestamps - def scan_and_write(base_path, output_file): - with open(output_file, 'w', newline='') as f: - writer = csv.writer(f) - for dirpath, dirs, files in os.walk(base_path): - for filename in files: - fname = os.path.join(dirpath, filename) - try: - mtime = os.path.getmtime(fname) - writer.writerow([fname, mtime]) - except Exception as e: - print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}') - print(f'Finished recording filesystem timestamps to {output_file}.') - - # Function to compare files - def compare_files(old_file, new_file): - old_files = {} - new_files = {} - - with open(old_file, 'r') as f: - reader = csv.reader(f) - old_files = {rows[0]:rows[1] for rows in reader} - - with open(new_file, 'r') as f: - reader = csv.reader(f) - new_files = {rows[0]:rows[1] for rows in reader} - - removed_files = old_files.keys() - new_files.keys() - added_files = new_files.keys() - old_files.keys() - unchanged_files = old_files.keys() & new_files.keys() - - changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]} - - for file in removed_files: - print(f'File has been removed: {file}') - - for file in changed_files: - print(f'File has been updated: {file}') - - return list(added_files) + list(changed_files) - - # Check if CachedRVC.tar.gz exists - if ForceTemporaryStorage: - file_path = '/content/CachedRVC.tar.gz' - else: - file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz' - - content_file_path = '/content/CachedRVC.tar.gz' - extract_path = '/' - - if not os.path.exists(file_path): - folder_path = os.path.dirname(file_path) - os.makedirs(folder_path, exist_ok=True) - print('No cached dependency install found. Attempting to download GitHub backup..') - - try: - download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz" - subprocess.run(["wget", "-O", file_path, download_url]) - print('Download completed successfully!') - except Exception as e: - print('Download failed:', str(e)) - - # Delete the failed download file - if os.path.exists(file_path): - os.remove(file_path) - print('Failed download file deleted. Continuing manual backup..') - - if Path(file_path).exists(): - if ForceTemporaryStorage: - print('Finished downloading CachedRVC.tar.gz.') - else: - print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...') - - # Check if ForceTemporaryStorage is True and skip copying if it is - if ForceTemporaryStorage: - pass - else: - shutil.copy(file_path, content_file_path) - - print('Beginning backup copy operation...') - - with tarfile.open(content_file_path, 'r:gz') as tar: - for member in tar.getmembers(): - target_path = os.path.join(extract_path, member.name) - try: - tar.extract(member, extract_path) - except Exception as e: - print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate') - ForceUpdateDependencies = True - print(f'Extraction of {content_file_path} to {extract_path} completed.') - - if ForceUpdateDependencies: - install_packages() - ForceUpdateDependencies = False - else: - print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...') - scan_and_write('/usr/', '/content/usr_files.csv') - - install_packages() - - scan_and_write('/usr/', '/content/usr_files_new.csv') - changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv') - - with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar: - for file in changed_files: - new_tar.add(file) - print(f'Added to tar: {file}') - - os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True) - shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz') - print('Updated CachedRVC.tar.gz copied to Google Drive.') - print('Dependencies fully up to date; future runs should be faster.') - diff --git a/spaces/EronSamez/RVC_HFmeu/MDXNet.py b/spaces/EronSamez/RVC_HFmeu/MDXNet.py deleted file mode 100644 index 9b7eb43844ad0d4f9ce61287ccf9a8a4206d3853..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/MDXNet.py +++ /dev/null @@ -1,272 +0,0 @@ -import soundfile as sf -import torch, pdb, os, warnings, librosa -import numpy as np -import onnxruntime as ort -from tqdm import tqdm -import torch - -dim_c = 4 - - -class Conv_TDF_net_trim: - def __init__( - self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024 - ): - super(Conv_TDF_net_trim, self).__init__() - - self.dim_f = dim_f - self.dim_t = 2**dim_t - self.n_fft = n_fft - self.hop = hop - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to( - device - ) - self.target_name = target_name - self.blender = "blender" in model_name - - out_c = dim_c * 4 if target_name == "*" else dim_c - self.freq_pad = torch.zeros( - [1, out_c, self.n_bins - self.dim_f, self.dim_t] - ).to(device) - - self.n = L // 2 - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop, - window=self.window, - center=True, - return_complex=True, - ) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape( - [-1, dim_c, self.n_bins, self.dim_t] - ) - return x[:, :, : self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = ( - self.freq_pad.repeat([x.shape[0], 1, 1, 1]) - if freq_pad is None - else freq_pad - ) - x = torch.cat([x, freq_pad], -2) - c = 4 * 2 if self.target_name == "*" else 2 - x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape( - [-1, 2, self.n_bins, self.dim_t] - ) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft( - x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True - ) - return x.reshape([-1, c, self.chunk_size]) - - -def get_models(device, dim_f, dim_t, n_fft): - return Conv_TDF_net_trim( - device=device, - model_name="Conv-TDF", - target_name="vocals", - L=11, - dim_f=dim_f, - dim_t=dim_t, - n_fft=n_fft, - ) - - -warnings.filterwarnings("ignore") -cpu = torch.device("cpu") -if torch.cuda.is_available(): - device = torch.device("cuda:0") -elif torch.backends.mps.is_available(): - device = torch.device("mps") -else: - device = torch.device("cpu") - - -class Predictor: - def __init__(self, args): - self.args = args - self.model_ = get_models( - device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft - ) - self.model = ort.InferenceSession( - os.path.join(args.onnx, self.model_.target_name + ".onnx"), - providers=["CUDAExecutionProvider", "CPUExecutionProvider"], - ) - print("onnx load done") - - def demix(self, mix): - samples = mix.shape[-1] - margin = self.args.margin - chunk_size = self.args.chunks * 44100 - assert not margin == 0, "margin cannot be zero!" - if margin > chunk_size: - margin = chunk_size - - segmented_mix = {} - - if self.args.chunks == 0 or samples < chunk_size: - chunk_size = samples - - counter = -1 - for skip in range(0, samples, chunk_size): - counter += 1 - - s_margin = 0 if counter == 0 else margin - end = min(skip + chunk_size + margin, samples) - - start = skip - s_margin - - segmented_mix[skip] = mix[:, start:end].copy() - if end == samples: - break - - sources = self.demix_base(segmented_mix, margin_size=margin) - """ - mix:(2,big_sample) - segmented_mix:offset->(2,small_sample) - sources:(1,2,big_sample) - """ - return sources - - def demix_base(self, mixes, margin_size): - chunked_sources = [] - progress_bar = tqdm(total=len(mixes)) - progress_bar.set_description("Processing") - for mix in mixes: - cmix = mixes[mix] - sources = [] - n_sample = cmix.shape[1] - model = self.model_ - trim = model.n_fft // 2 - gen_size = model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - mix_p = np.concatenate( - (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1 - ) - mix_waves = [] - i = 0 - while i < n_sample + pad: - waves = np.array(mix_p[:, i : i + model.chunk_size]) - mix_waves.append(waves) - i += gen_size - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) - with torch.no_grad(): - _ort = self.model - spek = model.stft(mix_waves) - if self.args.denoise: - spec_pred = ( - -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 - + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 - ) - tar_waves = model.istft(torch.tensor(spec_pred)) - else: - tar_waves = model.istft( - torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0]) - ) - tar_signal = ( - tar_waves[:, :, trim:-trim] - .transpose(0, 1) - .reshape(2, -1) - .numpy()[:, :-pad] - ) - - start = 0 if mix == 0 else margin_size - end = None if mix == list(mixes.keys())[::-1][0] else -margin_size - if margin_size == 0: - end = None - sources.append(tar_signal[:, start:end]) - - progress_bar.update(1) - - chunked_sources.append(sources) - _sources = np.concatenate(chunked_sources, axis=-1) - # del self.model - progress_bar.close() - return _sources - - def prediction(self, m, vocal_root, others_root, format): - os.makedirs(vocal_root, exist_ok=True) - os.makedirs(others_root, exist_ok=True) - basename = os.path.basename(m) - mix, rate = librosa.load(m, mono=False, sr=44100) - if mix.ndim == 1: - mix = np.asfortranarray([mix, mix]) - mix = mix.T - sources = self.demix(mix.T) - opt = sources[0].T - if format in ["wav", "flac"]: - sf.write( - "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate - ) - sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) - else: - path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) - path_other = "%s/%s_others.wav" % (others_root, basename) - sf.write(path_vocal, mix - opt, rate) - sf.write(path_other, opt, rate) - if os.path.exists(path_vocal): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_vocal, path_vocal[:-4] + ".%s" % format) - ) - if os.path.exists(path_other): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_other, path_other[:-4] + ".%s" % format) - ) - - -class MDXNetDereverb: - def __init__(self, chunks): - self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy" - self.shifts = 10 #'Predict with randomised equivariant stabilisation' - self.mixing = "min_mag" # ['default','min_mag','max_mag'] - self.chunks = chunks - self.margin = 44100 - self.dim_t = 9 - self.dim_f = 3072 - self.n_fft = 6144 - self.denoise = True - self.pred = Predictor(self) - - def _path_audio_(self, input, vocal_root, others_root, format): - self.pred.prediction(input, vocal_root, others_root, format) - - -if __name__ == "__main__": - dereverb = MDXNetDereverb(15) - from time import time as ttime - - t0 = ttime() - dereverb._path_audio_( - "雪雪伴奏对消HP5.wav", - "vocal", - "others", - ) - t1 = ttime() - print(t1 - t0) - - -""" - -runtime\python.exe MDXNet.py - -6G: -15/9:0.8G->6.8G -14:0.8G->6.5G -25:炸 - -half15:0.7G->6.6G,22.69s -fp32-15:0.7G->6.6G,20.85s - -""" diff --git a/spaces/FauziNL/Voice_anime2/infer_pack/attentions.py b/spaces/FauziNL/Voice_anime2/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/FauziNL/Voice_anime2/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/FoxMeo/fire-detector/utils/aws/mime.sh b/spaces/FoxMeo/fire-detector/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000 --- a/spaces/FoxMeo/fire-detector/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/spaces/GXSA/bingo/src/components/chat-notification.tsx b/spaces/GXSA/bingo/src/components/chat-notification.tsx deleted file mode 100644 index 3474e522992c43a4d1d0eadcf205a9760d5b930b..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/chat-notification.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
    - ) - } - if (error.code === ErrorCode.BING_IP_FORBIDDEN) { - return ( - - 你的服务器或代理已被封禁,请更换服务器或使用代理重试 - - ) - } - if (error.code === ErrorCode.BING_TRY_LATER) { - return ( - - 创建会话失败,请稍候重试 - - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
    - 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
    - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
    -
    -
    -
    -
    - error - {getAction(message.error, () => bot.resetConversation())} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/models/core/attention_image_goal.py b/spaces/Gen-Sim/Gen-Sim/cliport/models/core/attention_image_goal.py deleted file mode 100644 index 1d2a1049be2b03309a85db0337e33551dd561aa2..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/models/core/attention_image_goal.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Attention module.""" - -import numpy as np -import torch -import torch.nn.functional as F - - -from cliport.models.core.attention import Attention - - -class AttentionImageGoal(Attention): - """Attention (a.k.a Pick) with image-goals module.""" - - def __init__(self, stream_fcn, in_shape, n_rotations, preprocess, cfg, device): - super().__init__(stream_fcn, in_shape, n_rotations, preprocess, cfg, device) - - def forward(self, inp_img, goal_img, softmax=True): - """Forward pass.""" - # Input image. - in_data = np.pad(inp_img, self.padding, mode='constant') - in_shape = (1,) + in_data.shape - in_data = in_data.reshape(in_shape) - in_tens = torch.from_numpy(in_data).to(dtype=torch.float, device=self.device) - - goal_tensor = np.pad(goal_img, self.padding, mode='constant') - goal_shape = (1,) + goal_tensor.shape - goal_tensor = goal_tensor.reshape(goal_shape) - goal_tensor = torch.from_numpy(goal_tensor.copy()).to(dtype=torch.float, device=self.device) - in_tens = in_tens * goal_tensor - - # Rotation pivot. - pv = np.array(in_data.shape[1:3]) // 2 - - # Rotate input. - in_tens = in_tens.permute(0, 3, 1, 2) - in_tens = in_tens.repeat(self.n_rotations, 1, 1, 1) - in_tens = self.rotator(in_tens, pivot=pv) - - # Forward pass. - logits = [] - for x in in_tens: - logits.append(self.attend(x)) - logits = torch.cat(logits, dim=0) - - # Rotate back output. - logits = self.rotator(logits, reverse=True, pivot=pv) - logits = torch.cat(logits, dim=0) - c0 = self.padding[:2, 0] - c1 = c0 + inp_img.shape[:2] - logits = logits[:, :, c0[0]:c1[0], c0[1]:c1[1]] - - logits = logits.permute(1, 2, 3, 0) # D H W C - output = logits.reshape(1, np.prod(logits.shape)) - if softmax: - output = F.softmax(output, dim=-1) - output = output.reshape(logits.shape[1:]) - return output \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/misc/add_task_from_code.py b/spaces/Gen-Sim/Gen-Sim/misc/add_task_from_code.py deleted file mode 100644 index 65202a3101532460767c2bdec4b1fb0915401ee7..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/misc/add_task_from_code.py +++ /dev/null @@ -1,72 +0,0 @@ -import re -import os - -import os -import json -import argparse - -import IPython -# "place-blue-on-line-ends": { -# "task-name": "place-blue-on-line-ends", -# "task-description": "Pick up each blue box and accurately place it at the end of a green line.", -# "assets-used": [ -# "line/line-template.urdf", -# "box/box-template.urdf" -# ] -# } -def extract_dict(res, task_name, prefix="new_task"): - """ parse task dictionary from the code itself """ - task_dict = {"task-name": task_name, - "assets-used": []} - pattern = r'\'(.*?).urdf' - asset_string = re.findall(pattern, res) - - pattern = r'"""(.*?)"""' - description_string = re.findall(pattern, res, re.DOTALL) - task_dict["assets-used"] = [file + ".urdf" for file in asset_string] - task_dict["task-description"] = description_string[0] - print(description_string[0]) - print(asset_string) - return task_dict - - -# remove some tasks from the list -parser = argparse.ArgumentParser() - -parser.add_argument( - "--files", "-f", type=str, default="exps" -) -args = parser.parse_args() - - -data_path = "prompts/data" -generated_task_path = os.path.join(data_path, "generated_tasks.json") -generated_task_code_path = os.path.join(data_path, "generated_task_codes.json") - -generated_tasks = json.load(open(generated_task_path)) -generated_task_codes = json.load(open(generated_task_code_path)) - - -task_names = args.files.split(",") -print("Task names:", task_names) - -for task_name in task_names: - - task_name = task_name.replace("_", "-") - task_name_py = task_name.replace("-", "_") + ".py" - file_path = "cliport/generated_tasks/" + task_name_py - if os.path.exists(file_path) and task_name not in generated_tasks: - print("add task:", task_name) - - code = open(file_path).read() - generated_tasks[task_name] = extract_dict(code, task_name) - - if task_name_py not in generated_task_codes: - generated_task_codes.append(task_name_py) - -with open(generated_task_code_path, "w") as outfile: - json.dump(generated_task_codes, outfile, indent=4) - -with open(generated_task_path, "w") as outfile: - json.dump(generated_tasks, outfile, indent=4) - diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts_finetuning/gen5_build_car.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts_finetuning/gen5_build_car.sh deleted file mode 100644 index b0a5302919b8a57d8c6c589a2e01af8c975fcd16..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts_finetuning/gen5_build_car.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -#SBATCH -c 10 -#SBATCH -n 1 -#SBATCH -o logs/%j.out -#SBATCH --exclusive - - -sh scripts/traintest_scripts/train_test_multi_task_finetune_goal.sh data "[align-rope,sweeping-piles,align-box-corner,towers-of-hanoi-seq-seen-colors,assembling-kits-seq-seen]" "[build-car]" 5taskgen_unrelated_finetune - -sh scripts/traintest_scripts/train_test_multi_task_finetune_goal.sh data "[build-two-circles,build-wheel,build-bridge,towers-of-hanoi-seq-seen-colors,stack-block-pyramid]" "[build-car]" 5taskgen_related_finetune diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/match_costs/builder.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/match_costs/builder.py deleted file mode 100644 index 6894017d42eb16ee4a8ae3ed660a71cda3ad9940..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/match_costs/builder.py +++ /dev/null @@ -1,8 +0,0 @@ -from mmcv.utils import Registry, build_from_cfg - -MATCH_COST = Registry('Match Cost') - - -def build_match_cost(cfg, default_args=None): - """Builder of IoU calculator.""" - return build_from_cfg(cfg, MATCH_COST, default_args) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/utils/make_divisible.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/utils/make_divisible.py deleted file mode 100644 index 75ad756052529f52fe83bb95dd1f0ecfc9a13078..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/utils/make_divisible.py +++ /dev/null @@ -1,27 +0,0 @@ -def make_divisible(value, divisor, min_value=None, min_ratio=0.9): - """Make divisible function. - - This function rounds the channel number to the nearest value that can be - divisible by the divisor. It is taken from the original tf repo. It ensures - that all layers have a channel number that is divisible by divisor. It can - be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa - - Args: - value (int): The original channel number. - divisor (int): The divisor to fully divide the channel number. - min_value (int): The minimum value of the output channel. - Default: None, means that the minimum value equal to the divisor. - min_ratio (float): The minimum ratio of the rounded channel number to - the original channel number. Default: 0.9. - - Returns: - int: The modified output channel number. - """ - - if min_value is None: - min_value = divisor - new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than (1-min_ratio). - if new_value < min_ratio * value: - new_value += divisor - return new_value diff --git a/spaces/HIT-TMG/dialogue-bart-large-chinese/app.py b/spaces/HIT-TMG/dialogue-bart-large-chinese/app.py deleted file mode 100644 index a51ab4f9d86b7eb3653b5435875f15b09c0cc802..0000000000000000000000000000000000000000 --- a/spaces/HIT-TMG/dialogue-bart-large-chinese/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import gradio as gr -from typing import List, Optional -from transformers import BertTokenizer, BartForConditionalGeneration - -title = "HIT-TMG/dialogue-bart-large-chinese" -description = """ -This is a seq2seq model pre-trained on several Chinese dialogue datasets, from bart-large-chinese. -However it is just a simple demo for this pre-trained model. It's better to fine-tune it on downstream tasks for better performance \n -See some details of model card at https://huggingface.co/HIT-TMG/dialogue-bart-large-chinese . \n\n -Besides starting the conversation from scratch, you can also input the whole dialogue history utterance by utterance seperated by '[SEP]'. \n -""" - - -tokenizer = BertTokenizer.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") -model = BartForConditionalGeneration.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") - -tokenizer.truncation_side = 'left' -max_length = 512 - -examples = [ - ["你有什么爱好吗"], - ["你好。[SEP]嘿嘿你好,请问你最近在忙什么呢?[SEP]我最近养了一只狗狗,我在训练它呢。"] -] - - -def chat_func(input_utterance: str, history: Optional[List[str]] = None): - if history is not None: - history.extend(input_utterance.split(tokenizer.sep_token)) - else: - history = input_utterance.split(tokenizer.sep_token) - - history_str = "对话历史:" + tokenizer.sep_token.join(history) - - input_ids = tokenizer(history_str, - return_tensors='pt', - truncation=True, - max_length=max_length, - ).input_ids - - output_ids = model.generate(input_ids, - max_new_tokens=30, - top_k=32, - num_beams=4, - repetition_penalty=1.2, - no_repeat_ngram_size=4)[0] - - response = tokenizer.decode(output_ids, skip_special_tokens=True) - - history.append(response) - - - if len(history) % 2 == 0: - display_utterances = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] - else: - display_utterances = [("", history[0])] + [(history[i], history[i + 1]) for i in range(1, len(history) - 1, 2)] - - return display_utterances, history - - -demo = gr.Interface(fn=chat_func, - title=title, - description=description, - inputs=[gr.Textbox(lines=1, placeholder="Input current utterance"), "state"], - examples=examples, - outputs=["chatbot", "state"]) - - -if __name__ == "__main__": - demo.launch() - -# def chat(history): -# history_prefix = "对话历史:" -# history = history_prefix + history -# -# outputs = tokenizer(history, -# return_tensors='pt', -# padding=True, -# truncation=True, -# max_length=512) -# -# input_ids = outputs.input_ids -# output_ids = model.generate(input_ids)[0] -# -# return tokenizer.decode(output_ids, skip_special_tokens=True) -# -# -# chatbot = gr.Chatbot().style(color_map=("green", "pink")) -# demo = gr.Interface( -# chat, -# inputs=gr.Textbox(lines=8, placeholder="输入你的对话历史(请以'[SEP]'作为每段对话的间隔)\nInput the dialogue history (Please split utterances by '[SEP]')"), -# title=title, -# description=description, -# outputs =["text"] -# ) -# -# -# if __name__ == "__main__": -# demo.launch() diff --git a/spaces/HMinions/new-Bing-with_your_cookies/app.py b/spaces/HMinions/new-Bing-with_your_cookies/app.py deleted file mode 100644 index e6f2908c74ccf92d230f500a80e295f7ea21dfb7..0000000000000000000000000000000000000000 --- a/spaces/HMinions/new-Bing-with_your_cookies/app.py +++ /dev/null @@ -1,84 +0,0 @@ -import gradio as gr -import json -import asyncio -import os -from EdgeGPT import Chatbot, ConversationStyle -import re - -#read cookie from local file -# with open('./cookies.json', 'r') as f: -# cookies = json.load(f) -#如果你是选择读取仓库内的cookie.json文件,那么不需要再向函数内传递cookies参数 -# 也可以删去gr.Tab("Cookies"):这一界面对应的代码 -async def get_model_reply(prompt,style,cookies,context=[]): - # combines the new question with a previous context - context += [prompt] - cookies = json.loads(cookies) - # given the most recent context (4096 characters) - # continue the text up to 2048 tokens ~ 8192 charaters - bot = Chatbot(cookies=cookies) - prompt2='\n\n'.join(context)[:4096] - raw_data = await bot.ask(prompt2, conversation_style=style) - await bot.close() - #print(raw_data) - try: - try: - response = raw_data["item"]["messages"][1]["text"] - except: - response = raw_data["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"] - response = re.sub(r'\^', '', response) - response = response.rstrip() - context += [response] - - # list of (user, bot) responses. We will use this format later - responses = [(u, b) for u, b in zip(context[::2], context[1::2])] - return responses, context - except: - try: - if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]: - response="> **Oops, I think we've reached the end of this conversation. Please reset the bot!**" - context += [response] - - # list of (user, bot) responses. We will use this format later - responses = [(u, b) for u, b in zip(context[::2], context[1::2])] - return responses, context - - except: - if raw_data["item"]["result"]["value"] == "Throttled": - response="> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**" - context += [response] - - # list of (user, bot) responses. We will use this format later - responses = [(u, b) for u, b in zip(context[::2], context[1::2])] - return responses, context -# query = 'Which is the largest country by area in the world?' -# style="precise" -# responses, context =asyncio.run(get_model_reply(query,style,context=[])) -# -# print(' ' + responses[-1][0]) -# print(' ' + responses[-1][1]) -with gr.Blocks() as dialog_app: - with gr.Tab("Cookies"): - cookies = gr.Textbox(lines=2, label="输入bing.com中的cookies") - with gr.Tab("New Bing Chat"): - gr.Markdown("# A Simple Web to use New Bing Without Magic") - chatbot = gr.Chatbot() - state = gr.State([]) - markdown = gr.Markdown(label="Output") - - with gr.Row(): - inputs = gr.Textbox( - label="输入问题", - placeholder="Enter text and press enter" - ) - style = gr.Dropdown(label="回答倾向", choices=["creative", "balanced", "precise"], multiselect=False, - value="balanced", type="value") - - inputs.submit(get_model_reply, [inputs, style, cookies, state], [chatbot, state]) - send = gr.Button("Send") - send.click(get_model_reply, [inputs, style, cookies, state], [chatbot, state]) - -# launches the app in a new local port -dialog_app.launch() -# 为网站设置密码防止滥用 -# dialog_app.launch(auth=("admin", "pass1234")) \ No newline at end of file diff --git a/spaces/HaleyCH/HaleyCH_Theme/app.py b/spaces/HaleyCH/HaleyCH_Theme/app.py deleted file mode 100644 index d70c6a474d9cf037518d78728e050dec0a05c256..0000000000000000000000000000000000000000 --- a/spaces/HaleyCH/HaleyCH_Theme/app.py +++ /dev/null @@ -1,147 +0,0 @@ -import time - -from theme_dropdown import create_theme_dropdown # noqa: F401 - -import gradio as gr - -dropdown, js = create_theme_dropdown() - -with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo: - with gr.Row().style(equal_height=True): - with gr.Column(scale=10): - gr.Markdown( - """ - # Theme preview: `HaleyCH_Theme` - To use this theme, set `theme='HaleyCH/HaleyCH_Theme'` in `gr.Blocks()` or `gr.Interface()`. - You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version - of this theme. - """ - ) - with gr.Column(scale=3): - with gr.Box(): - dropdown.render() - toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True) - - dropdown.change(None, dropdown, None, _js=js) - toggle_dark.click( - None, - _js=""" - () => { - document.body.classList.toggle('dark'); - document.querySelector('gradio-app').style.backgroundColor = 'var(--color-background-primary)' - } - """, - ) - - name = gr.Textbox( - label="Name", - info="Full name, including middle name. No special characters.", - placeholder="John Doe", - value="John Doe", - interactive=True, - ) - - with gr.Row(): - slider1 = gr.Slider(label="Slider 1") - slider2 = gr.Slider(label="Slider 2") - gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group") - - with gr.Row(): - with gr.Column(variant="panel", scale=1): - gr.Markdown("## Panel 1") - radio = gr.Radio( - ["A", "B", "C"], - label="Radio", - info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.", - ) - drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False) - drop_2 = gr.Dropdown( - ["Option A", "Option B", "Option C"], - multiselect=True, - value=["Option A"], - label="Dropdown", - interactive=True, - ) - check = gr.Checkbox(label="Go") - with gr.Column(variant="panel", scale=2): - img = gr.Image( - "https://gradio.app/assets/img/header-image.jpg", label="Image" - ).style(height=320) - with gr.Row(): - go_btn = gr.Button("Go", label="Primary Button", variant="primary") - clear_btn = gr.Button( - "Clear", label="Secondary Button", variant="secondary" - ) - - def go(*args): - time.sleep(3) - return "https://gradio.app/assets/img/header-image.jpg" - - go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go") - - def clear(): - time.sleep(0.2) - return None - - clear_btn.click(clear, None, img) - - with gr.Row(): - btn1 = gr.Button("Button 1").style(size="sm") - btn2 = gr.UploadButton().style(size="sm") - stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style( - size="sm" - ) - - with gr.Row(): - gr.Dataframe(value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe") - gr.JSON( - value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON" - ) - gr.Label(value={"cat": 0.7, "dog": 0.2, "fish": 0.1}) - gr.File() - with gr.Row(): - gr.ColorPicker() - gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4") - gr.Gallery( - [ - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg", - "lion", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/logo.png", - "logo", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/tower.jpg", - "tower", - ), - ] - ).style(height="200px", grid=2) - - with gr.Row(): - with gr.Column(scale=2): - chatbot = gr.Chatbot([("Hello", "Hi")], label="Chatbot") - chat_btn = gr.Button("Add messages") - - def chat(history): - time.sleep(2) - yield [["How are you?", "I am good."]] - - chat_btn.click( - lambda history: history - + [["How are you?", "I am good."]] - + (time.sleep(2) or []), - chatbot, - chatbot, - ) - with gr.Column(scale=1): - with gr.Accordion("Advanced Settings"): - gr.Markdown("Hello") - gr.Number(label="Chatbot control 1") - gr.Number(label="Chatbot control 2") - gr.Number(label="Chatbot control 3") - - -if __name__ == "__main__": - demo.queue().launch() diff --git a/spaces/Hallucinate/demo/taming/data/faceshq.py b/spaces/Hallucinate/demo/taming/data/faceshq.py deleted file mode 100644 index 6912d04b66a6d464c1078e4b51d5da290f5e767e..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/taming/data/faceshq.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -import numpy as np -import albumentations -from torch.utils.data import Dataset - -from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex - - -class FacesBase(Dataset): - def __init__(self, *args, **kwargs): - super().__init__() - self.data = None - self.keys = None - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - example = self.data[i] - ex = {} - if self.keys is not None: - for k in self.keys: - ex[k] = example[k] - else: - ex = example - return ex - - -class CelebAHQTrain(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/celebahq" - with open("data/celebahqtrain.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = NumpyPaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class CelebAHQValidation(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/celebahq" - with open("data/celebahqvalidation.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = NumpyPaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FFHQTrain(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/ffhq" - with open("data/ffhqtrain.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FFHQValidation(FacesBase): - def __init__(self, size, keys=None): - super().__init__() - root = "data/ffhq" - with open("data/ffhqvalidation.txt", "r") as f: - relpaths = f.read().splitlines() - paths = [os.path.join(root, relpath) for relpath in relpaths] - self.data = ImagePaths(paths=paths, size=size, random_crop=False) - self.keys = keys - - -class FacesHQTrain(Dataset): - # CelebAHQ [0] + FFHQ [1] - def __init__(self, size, keys=None, crop_size=None, coord=False): - d1 = CelebAHQTrain(size=size, keys=keys) - d2 = FFHQTrain(size=size, keys=keys) - self.data = ConcatDatasetWithIndex([d1, d2]) - self.coord = coord - if crop_size is not None: - self.cropper = albumentations.RandomCrop(height=crop_size,width=crop_size) - if self.coord: - self.cropper = albumentations.Compose([self.cropper], - additional_targets={"coord": "image"}) - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - ex, y = self.data[i] - if hasattr(self, "cropper"): - if not self.coord: - out = self.cropper(image=ex["image"]) - ex["image"] = out["image"] - else: - h,w,_ = ex["image"].shape - coord = np.arange(h*w).reshape(h,w,1)/(h*w) - out = self.cropper(image=ex["image"], coord=coord) - ex["image"] = out["image"] - ex["coord"] = out["coord"] - ex["class"] = y - return ex - - -class FacesHQValidation(Dataset): - # CelebAHQ [0] + FFHQ [1] - def __init__(self, size, keys=None, crop_size=None, coord=False): - d1 = CelebAHQValidation(size=size, keys=keys) - d2 = FFHQValidation(size=size, keys=keys) - self.data = ConcatDatasetWithIndex([d1, d2]) - self.coord = coord - if crop_size is not None: - self.cropper = albumentations.CenterCrop(height=crop_size,width=crop_size) - if self.coord: - self.cropper = albumentations.Compose([self.cropper], - additional_targets={"coord": "image"}) - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - ex, y = self.data[i] - if hasattr(self, "cropper"): - if not self.coord: - out = self.cropper(image=ex["image"]) - ex["image"] = out["image"] - else: - h,w,_ = ex["image"].shape - coord = np.arange(h*w).reshape(h,w,1)/(h*w) - out = self.cropper(image=ex["image"], coord=coord) - ex["image"] = out["image"] - ex["coord"] = out["coord"] - ex["class"] = y - return ex diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/summary/randeng_t5_70M_summary_predict.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/summary/randeng_t5_70M_summary_predict.sh deleted file mode 100644 index ccbf410fa92b1d5e09c97d6ae3af7bb4ff121c64..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/summary/randeng_t5_70M_summary_predict.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=randeng_t5_77M_summary_predict -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=2 -#SBATCH --gres=gpu:2 # number of gpus -#SBATCH --cpus-per-task=30 -#SBATCH -o %x-%j.log - -set -x -e - -echo "START TIME: $(date)" -MODEL_NAME=randeng_t5_77M_summary_predict -MICRO_BATCH_SIZE=16 -ROOT_DIR=/cognitive_comp/ganruyi/experiments/${MODEL_NAME} -if [ ! -d ${ROOT_DIR} ];then - mkdir ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -output_save_path=$ROOT_DIR/randeng_t5_77M_predict_lcsts.json -if [ -f ${output_save_path} ];then - echo ${output_save_path} exist, rm it!!!!!!!!!!!!!!!!! - rm ${output_save_path} -fi - -ZERO_STAGE=1 - -config_json="${ROOT_DIR}/ds_config.${MODEL_NAME}.json" - -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $config_json -{ - "train_micro_batch_size_per_gpu": ${MICRO_BATCH_SIZE}, - "steps_per_print": 100, - "gradient_clipping": 1.0, - "zero_optimization": { - "stage": $ZERO_STAGE, - "contiguous_gradients": false, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 50000000, - "allgather_bucket_size": 500000000 - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-4, - "betas": [ - 0.9, - 0.95 - ], - "eps": 1e-8, - "weight_decay": 5e-2 - } - }, - "scheduler": { - "type": "WarmupLR", - "params":{ - "warmup_min_lr": 5e-6, - "warmup_max_lr": 1e-4 - } - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions -export MASTER_PORT=$[RANDOM%10000+50000] - -# --strategy deepspeed_stage_${ZERO_STAGE} \ -TRAINER_ARGS=" - --max_epochs 1 \ - --gpus 2 \ - --num_nodes 1 \ - --strategy ddp \ - --default_root_dir $ROOT_DIR \ - --dirpath $ROOT_DIR/ckpt \ - --save_top_k 3 \ - --monitor train_loss \ - --mode min \ - --save_last \ - --every_n_train_steps 0 \ -" -DATA_DIR=/cognitive_comp/ganruyi/data_datasets_LCSTS_LCSTS/ -prompt="summary:" -DATA_ARGS=" - --datasets_name lcsts \ - --num_workers 30 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize $MICRO_BATCH_SIZE \ - --test_batchsize $MICRO_BATCH_SIZE \ - --max_enc_length 128 \ - --max_dec_length 64 \ - --val_datasets_field val \ - --prompt $prompt \ -" -# --prompt $prompt \ -# --pretrained_model_path /cognitive_comp/ganruyi/experiments/randeng_t5_77M_summary/ckpt/hf_pretrained_epoch1_step75019 \ - -MODEL_ARGS=" - --pretrained_model_path /cognitive_comp/gaoxinyu/pretrained_model/bart-759M \ - --output_save_path $ROOT_DIR/randeng_t5_77M_predict_lcsts.json \ - --learning_rate 1e-4 \ - --weight_decay 0.1 \ - --precision 16 \ - --warmup 0.01 \ - --do_eval_only \ - --max_dec_length 32 \ -" - -SCRIPTS_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/summary/seq2seq_summary.py -SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " -echo $CMD -source activate base -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c '/home/ganruyi/anaconda3/bin/python $CMD' -python $CMD \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/ulm/README.md b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/ulm/README.md deleted file mode 100644 index 01459121cebefc61fdc2eae201462aa78d699111..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/ulm/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Unit Language Model (ULM) - -Here you can find links to the pre-trained ULMs and instructions on training new models using fairseq. At the end of the page, we also share how to run sampling for those models and provide pointers to the transcribed prompts we used. - -## Pre-trained models - -Using the links below, you can download pre-trained models for various unit types and vocabulary sizes: - -| | 50 | 100 | 200 -|-|-|-|- -| LogMel Filterbank | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/lm_km50/logmel50_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/lm_km100/logmel100_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/lm_km200/logmel200_lm.tgz) -| Modified CPC | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/lm_km50/cpc50_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/lm_km100/cpc100_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/lm_km200/cpc200_lm.tgz) -| HuBERT | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/lm_km50/hubert50_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/lm_km100/hubert100_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/lm_km200/hubert200_lm.tgz) -| Wav2Vec 2.0 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/lm_km50/w2v2_50_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/lm_km100/w2v2_100_lm.tgz) | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/lm_km200/w2v2_200_lm.tgz) - - -## Preprocessing data -Assuming that unit-transcribed train, valid, and test sets are located in `data/train.txt`, `data/valid.txt`, and `data/test.txt`, respectively, -we run the following command to get a preprocessed version of the datast in `data-bin`: - -```bash -fairseq-preprocess --only-source \ - --trainpref data/train.txt --validpref data/valid.txt --testpref data/test.txt \ - --destdir data-bin/ --workers 40 -``` -As a result, the `data-bin` directory should appear. - -## Fitting a Unit Language Model (ULM) -As an ULM, we train a standard fairseq Transformer LM. Assuming 8 GPUs used for training, a good starting point for an ULM training would be: -```bash - fairseq-train data-bin/ \ - --task=language_modeling \ - --arch=transformer_lm_big \ - --share-decoder-input-output-embed \ - --dropout=0.1 \ - --attention-dropout=0.1 \ - --optimizer=adam \ - --adam-betas='(0.9, 0.98)' \ - --clip-norm=1.0 \ - --lr=0.0005 \ - --lr-scheduler=inverse_sqrt \ - --warmup-updates=4000 \ - --warmup-init-lr=1e-07 \ - --tokens-per-sample=3072 \ - --update-freq=16 \ - --max-tokens=4096 \ - --num-workers=4 \ - --skip-invalid-size-inputs-valid-test \ - --max-update=500000 \ - --log-interval=10 \ - --seed=100501 \ - --fp16 \ - --sample-break-mode=eos -``` -This command will train a Transformer-large model (12 layers). You can train other standard LM models provided by fairseq, e.g. specify `--arch=transformer_lm` to train a smaller (6-layer) Transformer model. When training with a different number of GPUs, it might be a good idea to adjust the `update-freq` parameter. To save the GPU memory at an expense of additional computation, it can be useful to enable activation checkpointing with `--checkpoint-activations`. - -## Sampling from an ULM -Once an ULM was trained, we can use it for generating new utterances. Suppose, that the prompts are given in a file named `prompts.txt`. Then we can sample continuations by running the following command: - -```bash - python sample.py data-bin/ \ - --path=checkpoints/checkpoint_best.pt --task=language_modeling --sampling --temperature=0.7 \ - --seed=1 --prompts=prompts.txt --output=samples.txt --max-len-a=0 --max-len-b=500 \ - --prefix-size=-1 --batch-size=16 --fp16 --samples-per-prompt=10 -``` -Here, `--prefix-size` controls the number of tokens that are used to prime the ULM. When set to a positive value, the sampling script will take first `prefix-size` tokens to prompt the ULM; with `0` it runs unconditional sampling and with `-1` the entire prompt is used. -`--samples-per-prompt` specifies how many utterances are generated with every prompt which can be useful when generating multiple prompt continuations. In this command, `--max-len-a` and `--max-len-b` control the number of generated tokens. - -When using a pretrained model from above, `data-bin` should point to the unpacked directory (with `dict.txt` file). - -Evaluation-time, to generate prompts, we used utterances from LibriSpeech dev-clean and test-clean that are longer than 6s. We took first 3s from an utterance as a prompt. Unit transcripts of those prompts can be downloaded here: [[dev]](https://dl.fbaipublicfiles.com/textless_nlp/gslm/eval_data/dev_prompts.tgz) [[test]](https://dl.fbaipublicfiles.com/textless_nlp/gslm/eval_data/test_prompts.tgz) - diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/scripts/binarize_manifest.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/scripts/binarize_manifest.sh deleted file mode 100644 index 6f201bdb524fad51a69d8c45889eaa1578efc62d..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/scripts/binarize_manifest.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -# usage: bash binarize_manifest - -DEST_DIR=$1 -TRAIN_SPLIT=$2 -VALID_SPLIT=$3 -FAIRSEQ_ROOT=$4 - -mkdir -p $DEST_DIR - -# split file path and lengths into separate files -cut -f1 $TRAIN_SPLIT.tsv > $DEST_DIR/train_fnames.txt -cut -f1 $VALID_SPLIT.tsv > $DEST_DIR/valid_fnames.txt -cut -f2 $TRAIN_SPLIT.tsv > $DEST_DIR/train.lengths -cut -f2 $VALID_SPLIT.tsv > $DEST_DIR/valid.lengths - -# copy root directory -head -1 $TRAIN_SPLIT.tsv > $DEST_DIR/train.root -head -1 $VALID_SPLIT.tsv > $DEST_DIR/valid.root - -# remove root directory -sed -i '1d' $DEST_DIR/train_fnames.txt -sed -i '1d' $DEST_DIR/valid_fnames.txt -sed -i '1d' $DEST_DIR/train.lengths -sed -i '1d' $DEST_DIR/valid.lengths - -# insert spaces between characters -sed -i -e 's/\(.\)/\1 /g' $DEST_DIR/train_fnames.txt -sed -i -e 's/\(.\)/\1 /g' $DEST_DIR/valid_fnames.txt - -# run preprocessor -PYTHONPATH=$FAIRSEQ_ROOT python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $DEST_DIR/train_fnames.txt --validpref $DEST_DIR/valid_fnames.txt --workers 60 --only-source --destdir $DEST_DIR diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh deleted file mode 100644 index 1caf13cb6a2a0bd84e5322c92124b2fa37368f9a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env zsh -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -lg=$1 -text_path=$2 -target_dir=$3 -min_phones=$4 -phonemizer=$5 -lid_path=$6 - -if [ -z "$lid_path" ]; then - lid_path="lid.187.bin" -fi - -ph_lg=${lg:l} -if test "$lg" = 'fr'; then - ph_lg='fr-fr' -elif test "$lg" = 'en'; then - ph_lg='en-us' -elif test "$lg" = 'pt'; then - ph_lg='pt-br' -fi - -ESPEAK_PATH='' -if test "$phonemizer" = 'espeak'; then - ESPEAK_PATH=$(which espeak) -elif test "$phonemizer" = 'espeak-ng'; then - ESPEAK_PATH=$(which espeak-ng) -elif test "$phonemizer" = 'G2P'; then - ESPEAK_PATH='' -else - echo "Unknown phonemizer $phonemizer. Valid options are espeak, espean-ng and G2P" - exit 1 -fi - -echo $lg -echo $ph_lg -echo $text_path -echo $target_dir -echo "min phone seen threshold is $min_phones" - -mkdir -p $target_dir -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py --lang $lg --fasttext-model $lid_path < $text_path | grep -v '\-\-\-' >! $target_dir/lm.upper.lid.txt -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/lm.upper.lid.txt --only-source --destdir $target_dir --thresholdsrc 2 --padding-factor 1 --dict-only -cut -f1 -d' ' $target_dir/dict.txt | grep -v -x '[[:punct:]]*' | grep -Pv '\d\d\d\d\d+' >! $target_dir/words.txt - - -if [ -z "$ESPEAK_PATH" ]; then - python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py --compact < $target_dir/words.txt > $target_dir/phones.txt -else - # echoing 1 into corpus will prevent the mismatch lines between lexicon and phones in case the phonemizer fails - one=$(echo "1" | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -p ' ' -w '' -l $ph_lg --language-switch remove-flags) - sed 's/$/ 1/' $target_dir/words.txt | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -o $target_dir/phones.txt -p ' ' -w '' -l $ph_lg -j 70 --language-switch remove-flags - echo "one is ${one}" - sed -i "s/${one}$//" $target_dir/phones.txt -fi - -paste $target_dir/words.txt $target_dir/phones.txt >! $target_dir/lexicon.lst - -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones.txt --only-source --destdir $target_dir/phones --thresholdsrc $min_phones --padding-factor 1 --dict-only - -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/filter_lexicon.py -d $target_dir/phones/dict.txt < $target_dir/lexicon.lst >! $target_dir/lexicon_filtered.lst -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py -s 0.25 --surround --lexicon $target_dir/lexicon_filtered.lst < $target_dir/lm.upper.lid.txt >! $target_dir/phones/lm.phones.filtered.txt -cp $target_dir/phones/dict.txt $target_dir/phones/dict.phn.txt -echo " 0" >> $target_dir/phones/dict.phn.txt -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones/lm.phones.filtered.txt --workers 70 --only-source --destdir $target_dir/phones --srcdict $target_dir/phones/dict.phn.txt - -$KENLM_ROOT/lmplz -o 4 < $target_dir/lm.upper.lid.txt --discount_fallback --prune 0 0 0 3 >! $target_dir/kenlm.wrd.o40003.arpa -$KENLM_ROOT/build_binary $target_dir/kenlm.wrd.o40003.arpa $target_dir/kenlm.wrd.o40003.bin - -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words_sil lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn - -$KENLM_ROOT/lmplz -o 4 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.04.arpa -$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.04.arpa $target_dir/phones/lm.phones.filtered.04.bin -$KENLM_ROOT/lmplz -o 6 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.06.arpa -$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.06.arpa $target_dir/phones/lm.phones.filtered.06.bin - -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_phn_sil lm_arpa=$target_dir/phones/lm.phones.filtered.06.arpa data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lightconv_lm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lightconv_lm.py deleted file mode 100644 index 1d9efc4e42a5ecc1b83338055f18ade5a83ea666..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lightconv_lm.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq import utils -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.lightconv import Embedding, LightConvDecoder -from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder - - -@register_model("lightconv_lm") -class LightConvLanguageModel(FairseqLanguageModel): - def __init__(self, decoder): - super().__init__(decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--dropout", - default=0.1, - type=float, - metavar="D", - help="dropout probability", - ) - parser.add_argument( - "--attention-dropout", - default=0.0, - type=float, - metavar="D", - help="dropout probability for attention weights", - ) - parser.add_argument( - "--relu-dropout", - default=0.0, - type=float, - metavar="D", - help="dropout probability after ReLU in FFN", - ) - parser.add_argument( - "--input-dropout", - type=float, - metavar="D", - help="dropout probability of the inputs", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-output-dim", - type=int, - metavar="N", - help="decoder output dimension", - ) - parser.add_argument( - "--decoder-input-dim", type=int, metavar="N", help="decoder input dimension" - ) - parser.add_argument( - "--decoder-ffn-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension for FFN", - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="num decoder layers" - ) - parser.add_argument( - "--decoder-attention-heads", - type=int, - metavar="N", - help="num decoder attention heads or LightConv/DynamicConv heads", - ) - parser.add_argument( - "--decoder-normalize-before", - default=False, - action="store_true", - help="apply layernorm before each decoder block", - ) - parser.add_argument( - "--adaptive-softmax-cutoff", - metavar="EXPR", - help="comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion", - ) - parser.add_argument( - "--adaptive-softmax-dropout", - type=float, - metavar="D", - help="sets adaptive softmax dropout for the tail projections", - ) - parser.add_argument( - "--adaptive-softmax-factor", - type=float, - metavar="N", - help="adaptive input factor", - ) - parser.add_argument( - "--no-token-positional-embeddings", - default=False, - action="store_true", - help="if set, disables positional embeddings (outside self attention)", - ) - parser.add_argument( - "--share-decoder-input-output-embed", - default=False, - action="store_true", - help="share decoder input and output embeddings", - ) - parser.add_argument( - "--character-embeddings", - default=False, - action="store_true", - help="if set, uses character embedding convolutions to produce token embeddings", - ) - parser.add_argument( - "--character-filters", - type=str, - metavar="LIST", - default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", - help="size of character embeddings", - ) - parser.add_argument( - "--character-embedding-dim", - type=int, - metavar="N", - default=4, - help="size of character embeddings", - ) - parser.add_argument( - "--char-embedder-highway-layers", - type=int, - metavar="N", - default=2, - help="number of highway layers for character token embeddder", - ) - parser.add_argument( - "--adaptive-input", - default=False, - action="store_true", - help="if set, uses adaptive input", - ) - parser.add_argument( - "--adaptive-input-factor", - type=float, - metavar="N", - help="adaptive input factor", - ) - parser.add_argument( - "--adaptive-input-cutoff", - metavar="EXPR", - help="comma separated list of adaptive input cutoff points.", - ) - parser.add_argument( - "--tie-adaptive-weights", - action="store_true", - help="if set, ties the weights of adaptive softmax and adaptive input", - ) - parser.add_argument( - "--tie-adaptive-proj", - action="store_true", - help="if set, ties the projection weights of adaptive softmax and adaptive input", - ) - parser.add_argument( - "--decoder-learned-pos", - action="store_true", - help="use learned positional embeddings in the decoder", - ) - - """LightConv and DynamicConv arguments""" - parser.add_argument( - "--decoder-kernel-size-list", - type=lambda x: utils.eval_str_list(x, int), - help='list of kernel size (default: "[3,7,15,31,31,31]")', - ) - parser.add_argument( - "--decoder-glu", type=utils.eval_bool, help="glu after in proj" - ) - parser.add_argument( - "--decoder-conv-type", - default="dynamic", - type=str, - choices=["dynamic", "lightweight"], - help="type of convolution", - ) - parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) - parser.add_argument( - "--weight-dropout", - type=float, - metavar="D", - help="dropout probability for conv weights", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_lm_architecture(args) - - if getattr(args, "max_source_positions", None) is None: - args.max_source_positions = args.tokens_per_sample - if getattr(args, "max_target_positions", None) is None: - args.max_target_positions = args.tokens_per_sample - - if args.character_embeddings: - embed_tokens = CharacterTokenEmbedder( - task.dictionary, - eval(args.character_filters), - args.character_embedding_dim, - args.decoder_embed_dim, - args.char_embedder_highway_layers, - ) - elif args.adaptive_input: - embed_tokens = AdaptiveInput( - len(task.dictionary), - task.dictionary.pad(), - args.decoder_input_dim, - args.adaptive_input_factor, - args.decoder_embed_dim, - utils.eval_str_list(args.adaptive_input_cutoff, type=int), - ) - else: - embed_tokens = Embedding( - len(task.dictionary), args.decoder_input_dim, task.dictionary.pad() - ) - - if args.tie_adaptive_weights: - assert args.adaptive_input - assert args.adaptive_input_factor == args.adaptive_softmax_factor - assert ( - args.adaptive_softmax_cutoff == args.adaptive_input_cutoff - ), "{} != {}".format( - args.adaptive_softmax_cutoff, args.adaptive_input_cutoff - ) - assert args.decoder_input_dim == args.decoder_output_dim - - decoder = LightConvDecoder( - args, - task.output_dictionary, - embed_tokens, - no_encoder_attn=True, - final_norm=False, - ) - return LightConvLanguageModel(decoder) - - -@register_model_architecture("lightconv_lm", "lightconv_lm") -def base_lm_architecture(args): - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - - args.character_embeddings = getattr(args, "character_embeddings", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) - - # The model training is not stable without this - args.decoder_normalize_before = True - - args.adaptive_input = getattr(args, "adaptive_input", False) - args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) - args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) - - args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) - args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) - - args.decoder_kernel_size_list = getattr( - args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] - ) - if len(args.decoder_kernel_size_list) == 1: - args.decoder_kernel_size_list = ( - args.decoder_kernel_size_list * args.decoder_layers - ) - assert ( - len(args.decoder_kernel_size_list) == args.decoder_layers - ), "decoder_kernel_size_list doesn't match decoder_layers" - args.decoder_glu = getattr(args, "decoder_glu", True) - args.input_dropout = getattr(args, "input_dropout", 0.1) - args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) - - -@register_model_architecture("lightconv_lm", "lightconv_lm_gbw") -def lightconv_lm_gbw(args): - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - base_lm_architecture(args) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lstm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lstm.py deleted file mode 100644 index e1e66a7d50fa1b1b313e9d1a6e7862ac9bfaa074..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/lstm.py +++ /dev/null @@ -1,753 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) -from fairseq.modules import AdaptiveSoftmax, FairseqDropout -from torch import Tensor - - -DEFAULT_MAX_SOURCE_POSITIONS = 1e5 -DEFAULT_MAX_TARGET_POSITIONS = 1e5 - - -@register_model("lstm") -class LSTMModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--dropout', type=float, metavar='D', - help='dropout probability') - parser.add_argument('--encoder-embed-dim', type=int, metavar='N', - help='encoder embedding dimension') - parser.add_argument('--encoder-embed-path', type=str, metavar='STR', - help='path to pre-trained encoder embedding') - parser.add_argument('--encoder-freeze-embed', action='store_true', - help='freeze encoder embeddings') - parser.add_argument('--encoder-hidden-size', type=int, metavar='N', - help='encoder hidden size') - parser.add_argument('--encoder-layers', type=int, metavar='N', - help='number of encoder layers') - parser.add_argument('--encoder-bidirectional', action='store_true', - help='make all layers of encoder bidirectional') - parser.add_argument('--decoder-embed-dim', type=int, metavar='N', - help='decoder embedding dimension') - parser.add_argument('--decoder-embed-path', type=str, metavar='STR', - help='path to pre-trained decoder embedding') - parser.add_argument('--decoder-freeze-embed', action='store_true', - help='freeze decoder embeddings') - parser.add_argument('--decoder-hidden-size', type=int, metavar='N', - help='decoder hidden size') - parser.add_argument('--decoder-layers', type=int, metavar='N', - help='number of decoder layers') - parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', - help='decoder output embedding dimension') - parser.add_argument('--decoder-attention', type=str, metavar='BOOL', - help='decoder attention') - parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', - help='comma separated list of adaptive softmax cutoff points. ' - 'Must be used with adaptive_loss criterion') - parser.add_argument('--share-decoder-input-output-embed', default=False, - action='store_true', - help='share decoder input and output embeddings') - parser.add_argument('--share-all-embeddings', default=False, action='store_true', - help='share encoder, decoder and output embeddings' - ' (requires shared dictionary and embed dim)') - - # Granular dropout settings (if not specified these default to --dropout) - parser.add_argument('--encoder-dropout-in', type=float, metavar='D', - help='dropout probability for encoder input embedding') - parser.add_argument('--encoder-dropout-out', type=float, metavar='D', - help='dropout probability for encoder output') - parser.add_argument('--decoder-dropout-in', type=float, metavar='D', - help='dropout probability for decoder input embedding') - parser.add_argument('--decoder-dropout-out', type=float, metavar='D', - help='dropout probability for decoder output') - # fmt: on - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted (in case there are any new ones) - base_architecture(args) - - if args.encoder_layers != args.decoder_layers: - raise ValueError("--encoder-layers must match --decoder-layers") - - max_source_positions = getattr( - args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS - ) - max_target_positions = getattr( - args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS - ) - - def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - embed_dict = utils.parse_embedding(embed_path) - utils.print_embed_overlap(embed_dict, dictionary) - return utils.load_embedding(embed_dict, dictionary, embed_tokens) - - if args.encoder_embed_path: - pretrained_encoder_embed = load_pretrained_embedding_from_file( - args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim - ) - else: - num_embeddings = len(task.source_dictionary) - pretrained_encoder_embed = Embedding( - num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() - ) - - if args.share_all_embeddings: - # double check all parameters combinations are valid - if task.source_dictionary != task.target_dictionary: - raise ValueError("--share-all-embeddings requires a joint dictionary") - if args.decoder_embed_path and ( - args.decoder_embed_path != args.encoder_embed_path - ): - raise ValueError( - "--share-all-embed not compatible with --decoder-embed-path" - ) - if args.encoder_embed_dim != args.decoder_embed_dim: - raise ValueError( - "--share-all-embeddings requires --encoder-embed-dim to " - "match --decoder-embed-dim" - ) - pretrained_decoder_embed = pretrained_encoder_embed - args.share_decoder_input_output_embed = True - else: - # separate decoder input embeddings - pretrained_decoder_embed = None - if args.decoder_embed_path: - pretrained_decoder_embed = load_pretrained_embedding_from_file( - args.decoder_embed_path, - task.target_dictionary, - args.decoder_embed_dim, - ) - # one last double check of parameter combinations - if args.share_decoder_input_output_embed and ( - args.decoder_embed_dim != args.decoder_out_embed_dim - ): - raise ValueError( - "--share-decoder-input-output-embeddings requires " - "--decoder-embed-dim to match --decoder-out-embed-dim" - ) - - if args.encoder_freeze_embed: - pretrained_encoder_embed.weight.requires_grad = False - if args.decoder_freeze_embed: - pretrained_decoder_embed.weight.requires_grad = False - - encoder = LSTMEncoder( - dictionary=task.source_dictionary, - embed_dim=args.encoder_embed_dim, - hidden_size=args.encoder_hidden_size, - num_layers=args.encoder_layers, - dropout_in=args.encoder_dropout_in, - dropout_out=args.encoder_dropout_out, - bidirectional=args.encoder_bidirectional, - pretrained_embed=pretrained_encoder_embed, - max_source_positions=max_source_positions, - ) - decoder = LSTMDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - hidden_size=args.decoder_hidden_size, - out_embed_dim=args.decoder_out_embed_dim, - num_layers=args.decoder_layers, - dropout_in=args.decoder_dropout_in, - dropout_out=args.decoder_dropout_out, - attention=utils.eval_bool(args.decoder_attention), - encoder_output_units=encoder.output_units, - pretrained_embed=pretrained_decoder_embed, - share_input_output_embed=args.share_decoder_input_output_embed, - adaptive_softmax_cutoff=( - utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) - if args.criterion == "adaptive_loss" - else None - ), - max_target_positions=max_target_positions, - residuals=False, - ) - return cls(encoder, decoder) - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens, - encoder_out=encoder_out, - incremental_state=incremental_state, - ) - return decoder_out - - -class LSTMEncoder(FairseqEncoder): - """LSTM encoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - bidirectional=False, - left_pad=True, - pretrained_embed=None, - padding_idx=None, - max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, - ): - super().__init__(dictionary) - self.num_layers = num_layers - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.bidirectional = bidirectional - self.hidden_size = hidden_size - self.max_source_positions = max_source_positions - - num_embeddings = len(dictionary) - self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.lstm = LSTM( - input_size=embed_dim, - hidden_size=hidden_size, - num_layers=num_layers, - dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, - bidirectional=bidirectional, - ) - self.left_pad = left_pad - - self.output_units = hidden_size - if bidirectional: - self.output_units *= 2 - - def forward( - self, - src_tokens: Tensor, - src_lengths: Tensor, - enforce_sorted: bool = True, - ): - """ - Args: - src_tokens (LongTensor): tokens in the source language of - shape `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of - shape `(batch)` - enforce_sorted (bool, optional): if True, `src_tokens` is - expected to contain sequences sorted by length in a - decreasing order. If False, this condition is not - required. Default: True. - """ - if self.left_pad: - # nn.utils.rnn.pack_padded_sequence requires right-padding; - # convert left-padding to right-padding - src_tokens = utils.convert_padding_direction( - src_tokens, - torch.zeros_like(src_tokens).fill_(self.padding_idx), - left_to_right=True, - ) - - bsz, seqlen = src_tokens.size() - - # embed tokens - x = self.embed_tokens(src_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # pack embedded source tokens into a PackedSequence - packed_x = nn.utils.rnn.pack_padded_sequence( - x, src_lengths.cpu(), enforce_sorted=enforce_sorted - ) - - # apply LSTM - if self.bidirectional: - state_size = 2 * self.num_layers, bsz, self.hidden_size - else: - state_size = self.num_layers, bsz, self.hidden_size - h0 = x.new_zeros(*state_size) - c0 = x.new_zeros(*state_size) - packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) - - # unpack outputs and apply dropout - x, _ = nn.utils.rnn.pad_packed_sequence( - packed_outs, padding_value=self.padding_idx * 1.0 - ) - x = self.dropout_out_module(x) - assert list(x.size()) == [seqlen, bsz, self.output_units] - - if self.bidirectional: - final_hiddens = self.combine_bidir(final_hiddens, bsz) - final_cells = self.combine_bidir(final_cells, bsz) - - encoder_padding_mask = src_tokens.eq(self.padding_idx).t() - - return tuple( - ( - x, # seq_len x batch x hidden - final_hiddens, # num_layers x batch x num_directions*hidden - final_cells, # num_layers x batch x num_directions*hidden - encoder_padding_mask, # seq_len x batch - ) - ) - - def combine_bidir(self, outs, bsz: int): - out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() - return out.view(self.num_layers, bsz, -1) - - def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order): - return tuple( - ( - encoder_out[0].index_select(1, new_order), - encoder_out[1].index_select(1, new_order), - encoder_out[2].index_select(1, new_order), - encoder_out[3].index_select(1, new_order), - ) - ) - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return self.max_source_positions - - -class AttentionLayer(nn.Module): - def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): - super().__init__() - - self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) - self.output_proj = Linear( - input_embed_dim + source_embed_dim, output_embed_dim, bias=bias - ) - - def forward(self, input, source_hids, encoder_padding_mask): - # input: bsz x input_embed_dim - # source_hids: srclen x bsz x source_embed_dim - - # x: bsz x source_embed_dim - x = self.input_proj(input) - - # compute attention - attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) - - # don't attend over padding - if encoder_padding_mask is not None: - attn_scores = ( - attn_scores.float() - .masked_fill_(encoder_padding_mask, float("-inf")) - .type_as(attn_scores) - ) # FP16 support: cast to float and back - - attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz - - # sum weighted sources - x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) - - x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) - return x, attn_scores - - -class LSTMDecoder(FairseqIncrementalDecoder): - """LSTM decoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - out_embed_dim=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - attention=True, - encoder_output_units=512, - pretrained_embed=None, - share_input_output_embed=False, - adaptive_softmax_cutoff=None, - max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, - residuals=False, - ): - super().__init__(dictionary) - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.hidden_size = hidden_size - self.share_input_output_embed = share_input_output_embed - self.need_attn = True - self.max_target_positions = max_target_positions - self.residuals = residuals - self.num_layers = num_layers - - self.adaptive_softmax = None - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.encoder_output_units = encoder_output_units - if encoder_output_units != hidden_size and encoder_output_units != 0: - self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) - self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) - else: - self.encoder_hidden_proj = self.encoder_cell_proj = None - - # disable input feeding if there is no encoder - # input feeding is described in arxiv.org/abs/1508.04025 - input_feed_size = 0 if encoder_output_units == 0 else hidden_size - self.layers = nn.ModuleList( - [ - LSTMCell( - input_size=input_feed_size + embed_dim - if layer == 0 - else hidden_size, - hidden_size=hidden_size, - ) - for layer in range(num_layers) - ] - ) - - if attention: - # TODO make bias configurable - self.attention = AttentionLayer( - hidden_size, encoder_output_units, hidden_size, bias=False - ) - else: - self.attention = None - - if hidden_size != out_embed_dim: - self.additional_fc = Linear(hidden_size, out_embed_dim) - - if adaptive_softmax_cutoff is not None: - # setting adaptive_softmax dropout to dropout_out for now but can be redefined - self.adaptive_softmax = AdaptiveSoftmax( - num_embeddings, - hidden_size, - adaptive_softmax_cutoff, - dropout=dropout_out, - ) - elif not self.share_input_output_embed: - self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) - - def forward( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - src_lengths: Optional[Tensor] = None, - ): - x, attn_scores = self.extract_features( - prev_output_tokens, encoder_out, incremental_state - ) - return self.output_layer(x), attn_scores - - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - """ - Similar to *forward* but only return features. - """ - # get outputs from encoder - if encoder_out is not None: - encoder_outs = encoder_out[0] - encoder_hiddens = encoder_out[1] - encoder_cells = encoder_out[2] - encoder_padding_mask = encoder_out[3] - else: - encoder_outs = torch.empty(0) - encoder_hiddens = torch.empty(0) - encoder_cells = torch.empty(0) - encoder_padding_mask = torch.empty(0) - srclen = encoder_outs.size(0) - - if incremental_state is not None and len(incremental_state) > 0: - prev_output_tokens = prev_output_tokens[:, -1:] - - bsz, seqlen = prev_output_tokens.size() - - # embed tokens - x = self.embed_tokens(prev_output_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # initialize previous states (or get from cache during incremental generation) - if incremental_state is not None and len(incremental_state) > 0: - prev_hiddens, prev_cells, input_feed = self.get_cached_state( - incremental_state - ) - elif encoder_out is not None: - # setup recurrent cells - prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] - prev_cells = [encoder_cells[i] for i in range(self.num_layers)] - if self.encoder_hidden_proj is not None: - prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] - prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] - input_feed = x.new_zeros(bsz, self.hidden_size) - else: - # setup zero cells, since there is no encoder - zero_state = x.new_zeros(bsz, self.hidden_size) - prev_hiddens = [zero_state for i in range(self.num_layers)] - prev_cells = [zero_state for i in range(self.num_layers)] - input_feed = None - - assert ( - srclen > 0 or self.attention is None - ), "attention is not supported if there are no encoder outputs" - attn_scores: Optional[Tensor] = ( - x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None - ) - outs = [] - for j in range(seqlen): - # input feeding: concatenate context vector from previous time step - if input_feed is not None: - input = torch.cat((x[j, :, :], input_feed), dim=1) - else: - input = x[j] - - for i, rnn in enumerate(self.layers): - # recurrent cell - hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) - - # hidden state becomes the input to the next layer - input = self.dropout_out_module(hidden) - if self.residuals: - input = input + prev_hiddens[i] - - # save state for next time step - prev_hiddens[i] = hidden - prev_cells[i] = cell - - # apply attention using the last layer's hidden state - if self.attention is not None: - assert attn_scores is not None - out, attn_scores[:, j, :] = self.attention( - hidden, encoder_outs, encoder_padding_mask - ) - else: - out = hidden - out = self.dropout_out_module(out) - - # input feeding - if input_feed is not None: - input_feed = out - - # save final output - outs.append(out) - - # Stack all the necessary tensors together and store - prev_hiddens_tensor = torch.stack(prev_hiddens) - prev_cells_tensor = torch.stack(prev_cells) - cache_state = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": prev_hiddens_tensor, - "prev_cells": prev_cells_tensor, - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cache_state) - - # collect outputs across time steps - x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) - - # T x B x C -> B x T x C - x = x.transpose(1, 0) - - if hasattr(self, "additional_fc") and self.adaptive_softmax is None: - x = self.additional_fc(x) - x = self.dropout_out_module(x) - # srclen x tgtlen x bsz -> bsz x tgtlen x srclen - if not self.training and self.need_attn and self.attention is not None: - assert attn_scores is not None - attn_scores = attn_scores.transpose(0, 2) - else: - attn_scores = None - return x, attn_scores - - def output_layer(self, x): - """Project features to the vocabulary size.""" - if self.adaptive_softmax is None: - if self.share_input_output_embed: - x = F.linear(x, self.embed_tokens.weight) - else: - x = self.fc_out(x) - return x - - def get_cached_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: - cached_state = self.get_incremental_state(incremental_state, "cached_state") - assert cached_state is not None - prev_hiddens_ = cached_state["prev_hiddens"] - assert prev_hiddens_ is not None - prev_cells_ = cached_state["prev_cells"] - assert prev_cells_ is not None - prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] - prev_cells = [prev_cells_[j] for j in range(self.num_layers)] - input_feed = cached_state[ - "input_feed" - ] # can be None for decoder-only language models - return prev_hiddens, prev_cells, input_feed - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - if incremental_state is None or len(incremental_state) == 0: - return - prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) - prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] - prev_cells = [p.index_select(0, new_order) for p in prev_cells] - if input_feed is not None: - input_feed = input_feed.index_select(0, new_order) - cached_state_new = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": torch.stack(prev_hiddens), - "prev_cells": torch.stack(prev_cells), - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cached_state_new), - return - - def max_positions(self): - """Maximum output length supported by the decoder.""" - return self.max_target_positions - - def make_generation_fast_(self, need_attn=False, **kwargs): - self.need_attn = need_attn - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.uniform_(m.weight, -0.1, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def LSTM(input_size, hidden_size, **kwargs): - m = nn.LSTM(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def LSTMCell(input_size, hidden_size, **kwargs): - m = nn.LSTMCell(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def Linear(in_features, out_features, bias=True, dropout=0.0): - """Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features, bias=bias) - m.weight.data.uniform_(-0.1, 0.1) - if bias: - m.bias.data.uniform_(-0.1, 0.1) - return m - - -@register_model_architecture("lstm", "lstm") -def base_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) - args.encoder_hidden_size = getattr( - args, "encoder_hidden_size", args.encoder_embed_dim - ) - args.encoder_layers = getattr(args, "encoder_layers", 1) - args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) - args.decoder_hidden_size = getattr( - args, "decoder_hidden_size", args.decoder_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 1) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - args.decoder_attention = getattr(args, "decoder_attention", "1") - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,50000,200000" - ) - - -@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") -def lstm_wiseman_iwslt_de_en(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - base_architecture(args) - - -@register_model_architecture("lstm", "lstm_luong_wmt_en_de") -def lstm_luong_wmt_en_de(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) - args.encoder_layers = getattr(args, "encoder_layers", 4) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) - args.decoder_layers = getattr(args, "decoder_layers", 4) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) - base_architecture(args) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/translation_multi_simple_epoch.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/translation_multi_simple_epoch.py deleted file mode 100644 index 6f36e5b93e98497de31969d203ae04dbb4bd9306..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/translation_multi_simple_epoch.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import datetime -import logging -import time - -import torch -from fairseq.data import ( - FairseqDataset, - LanguagePairDataset, - ListDataset, - data_utils, - iterators, -) -from fairseq.data.multilingual.multilingual_data_manager import ( - MultilingualDatasetManager, -) -from fairseq.data.multilingual.sampling_method import SamplingMethod -from fairseq.tasks import LegacyFairseqTask, register_task -from fairseq.utils import FileContentsAction - - -### -def get_time_gap(s, e): - return ( - datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) - ).__str__() - - -### - - -logger = logging.getLogger(__name__) - - -@register_task("translation_multi_simple_epoch") -class TranslationMultiSimpleEpochTask(LegacyFairseqTask): - """ - Translate from one (source) language to another (target) language. - - Args: - langs (List[str]): a list of languages that are being supported - dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries - training (bool): whether the task should be configured for training or not - - .. note:: - - The translation task is compatible with :mod:`fairseq-train`, - :mod:`fairseq-generate` and :mod:`fairseq-interactive`. - - The translation task provides the following additional command-line - arguments: - - .. argparse:: - :ref: fairseq.tasks.translation_parser - :prog: - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', - help='inference source language') - parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', - help='inference target language') - parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', - help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', - action=FileContentsAction) - parser.add_argument('--keep-inference-langtok', action='store_true', - help='keep language tokens in inference output (e.g. for analysis or debugging)') - - SamplingMethod.add_arguments(parser) - MultilingualDatasetManager.add_args(parser) - # fmt: on - - def __init__(self, args, langs, dicts, training): - super().__init__(args) - self.langs = langs - self.dicts = dicts - self.training = training - if training: - self.lang_pairs = args.lang_pairs - else: - self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] - # eval_lang_pairs for multilingual translation is usually all of the - # lang_pairs. However for other multitask settings or when we want to - # optimize for certain languages we want to use a different subset. Thus - # the eval_lang_pairs class variable is provided for classes that extend - # this class. - self.eval_lang_pairs = self.lang_pairs - # model_lang_pairs will be used to build encoder-decoder model pairs in - # models.build_model(). This allows multitask type of sub-class can - # build models other than the input lang_pairs - self.model_lang_pairs = self.lang_pairs - self.source_langs = [d.split("-")[0] for d in self.lang_pairs] - self.target_langs = [d.split("-")[1] for d in self.lang_pairs] - self.check_dicts(self.dicts, self.source_langs, self.target_langs) - - self.sampling_method = SamplingMethod.build_sampler(args, self) - self.data_manager = MultilingualDatasetManager.setup_data_manager( - args, self.lang_pairs, langs, dicts, self.sampling_method - ) - - def check_dicts(self, dicts, source_langs, target_langs): - if self.args.source_dict is not None or self.args.target_dict is not None: - # no need to check whether the source side and target side are sharing dictionaries - return - src_dict = dicts[source_langs[0]] - tgt_dict = dicts[target_langs[0]] - for src_lang in source_langs: - assert ( - src_dict == dicts[src_lang] - ), "Diffrent dictionary are specified for different source languages; " - "TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages" - for tgt_lang in target_langs: - assert ( - tgt_dict == dicts[tgt_lang] - ), "Diffrent dictionary are specified for different target languages; " - "TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages" - - @classmethod - def setup_task(cls, args, **kwargs): - langs, dicts, training = MultilingualDatasetManager.prepare( - cls.load_dictionary, args, **kwargs - ) - return cls(args, langs, dicts, training) - - def has_sharded_data(self, split): - return self.data_manager.has_sharded_data(split) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if split in self.datasets: - dataset = self.datasets[split] - if self.has_sharded_data(split): - if self.args.virtual_epoch_size is not None: - if dataset.load_next_shard: - shard_epoch = dataset.shard_epoch - else: - # no need to load next shard so skip loading - # also this avoid always loading from beginning of the data - return - else: - shard_epoch = epoch - else: - # estimate the shard epoch from virtual data size and virtual epoch size - shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch) - logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}") - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - if split in self.datasets: - del self.datasets[split] - logger.info("old dataset deleted manually") - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - self.datasets[split] = self.data_manager.load_dataset( - split, - self.training, - epoch=epoch, - combine=combine, - shard_epoch=shard_epoch, - **kwargs, - ) - - def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): - if constraints is not None: - raise NotImplementedError( - "Constrained decoding with the multilingual_translation task is not supported" - ) - - src_data = ListDataset(src_tokens, src_lengths) - dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary) - src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"] - if self.args.lang_tok_replacing_bos_eos: - dataset = self.data_manager.alter_dataset_langtok( - dataset, - src_eos=self.source_dictionary.eos(), - src_lang=self.args.source_lang, - tgt_eos=self.target_dictionary.eos(), - tgt_lang=self.args.target_lang, - src_langtok_spec=src_langtok_spec, - tgt_langtok_spec=tgt_langtok_spec, - ) - else: - dataset.src = self.data_manager.src_dataset_tranform_func( - self.args.source_lang, - self.args.target_lang, - dataset=dataset.src, - spec=src_langtok_spec, - ) - return dataset - - def build_generator( - self, - models, - args, - seq_gen_cls=None, - extra_gen_cls_kwargs=None, - ): - if not getattr(args, "keep_inference_langtok", False): - _, tgt_langtok_spec = self.args.langtoks["main"] - if tgt_langtok_spec: - tgt_lang_tok = self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} - extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok} - - return super().build_generator( - models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs - ) - - def build_model(self, args): - return super().build_model(args) - - def valid_step(self, sample, model, criterion): - loss, sample_size, logging_output = super().valid_step(sample, model, criterion) - return loss, sample_size, logging_output - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - with torch.no_grad(): - _, tgt_langtok_spec = self.args.langtoks["main"] - if not self.args.lang_tok_replacing_bos_eos: - if prefix_tokens is None and tgt_langtok_spec: - tgt_lang_tok = self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - src_tokens = sample["net_input"]["src_tokens"] - bsz = src_tokens.size(0) - prefix_tokens = ( - torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens) - ) - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - constraints=constraints, - ) - else: - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - bos_token=self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - if tgt_langtok_spec - else self.target_dictionary.eos(), - ) - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - - def max_positions(self): - """Return the max sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) - - @property - def source_dictionary(self): - return self.data_manager.get_source_dictionary(self.source_langs[0]) - - @property - def target_dictionary(self): - return self.data_manager.get_target_dictionary(self.target_langs[0]) - - def create_batch_sampler_func( - self, - max_positions, - ignore_invalid_inputs, - max_tokens, - max_sentences, - required_batch_size_multiple=1, - seed=1, - ): - def construct_batch_sampler(dataset, epoch): - splits = [ - s for s, _ in self.datasets.items() if self.datasets[s] == dataset - ] - split = splits[0] if len(splits) > 0 else None - # NEW implementation - if epoch is not None: - # initialize the dataset with the correct starting epoch - dataset.set_epoch(epoch) - - # get indices ordered by example size - start_time = time.time() - logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}") - - with data_utils.numpy_seed(seed): - indices = dataset.ordered_indices() - logger.info( - f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - # filter examples that are too large - if max_positions is not None: - my_time = time.time() - indices = self.filter_indices_by_size( - indices, dataset, max_positions, ignore_invalid_inputs - ) - logger.info( - f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - # create mini-batches with given size constraints - my_time = time.time() - batch_sampler = dataset.batch_by_size( - indices, - max_tokens=max_tokens, - max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - - logger.info( - f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}" - ) - logger.info( - f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - return batch_sampler - - return construct_batch_sampler - - # we need to override get_batch_iterator because we want to reset the epoch iterator each time - def get_batch_iterator( - self, - dataset, - max_tokens=None, - max_sentences=None, - max_positions=None, - ignore_invalid_inputs=False, - required_batch_size_multiple=1, - seed=1, - num_shards=1, - shard_id=0, - num_workers=0, - epoch=1, - data_buffer_size=0, - disable_iterator_cache=False, - ): - """ - Get an iterator that yields batches of data from the given dataset. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset to batch - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - max_positions (optional): max sentence length supported by the - model (default: None). - ignore_invalid_inputs (bool, optional): don't raise Exception for - sentences that are too long (default: False). - required_batch_size_multiple (int, optional): require batch size to - be a multiple of N (default: 1). - seed (int, optional): seed for random number generator for - reproducibility (default: 1). - num_shards (int, optional): shard the data iterator into N - shards (default: 1). - shard_id (int, optional): which shard of the data iterator to - return (default: 0). - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means the data will be loaded in the main process - (default: 0). - epoch (int, optional): the epoch to start the iterator from - (default: 0). - data_buffer_size (int, optional): number of batches to - preload (default: 0). - disable_iterator_cache (bool, optional): don't cache the - EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) - (default: False). - Returns: - ~fairseq.iterators.EpochBatchIterator: a batched iterator over the - given dataset split - """ - # initialize the dataset with the correct starting epoch - assert isinstance(dataset, FairseqDataset) - if dataset in self.dataset_to_epoch_iter: - return self.dataset_to_epoch_iter[dataset] - if self.args.sampling_method == "RoundRobin": - batch_iter = super().get_batch_iterator( - dataset, - max_tokens=max_tokens, - max_sentences=max_sentences, - max_positions=max_positions, - ignore_invalid_inputs=ignore_invalid_inputs, - required_batch_size_multiple=required_batch_size_multiple, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - data_buffer_size=data_buffer_size, - disable_iterator_cache=disable_iterator_cache, - ) - self.dataset_to_epoch_iter[dataset] = batch_iter - return batch_iter - - construct_batch_sampler = self.create_batch_sampler_func( - max_positions, - ignore_invalid_inputs, - max_tokens, - max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - seed=seed, - ) - - epoch_iter = iterators.EpochBatchIterator( - dataset=dataset, - collate_fn=dataset.collater, - batch_sampler=construct_batch_sampler, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - ) - return epoch_iter diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/inference/infer.sh b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/inference/infer.sh deleted file mode 100644 index dec70e1f30fb80f6957f4f3382b4c0963827cf43..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/inference/infer.sh +++ /dev/null @@ -1,15 +0,0 @@ -gender='male' -glowdir='../../checkpoints/glow/'$gender'/' -hifidir='../../checkpoints/hifi/'$gender'/' -device='cpu' -text='testing this one' - - -timestamp=$(date +%s) -wav='../../results/'$gender'/' -wav_file=$wav/$timestamp'.wav' - - -mkdir -p $wav -python ../../utils/inference/tts.py -a $glowdir -v $hifidir -d $device -t "$text" -w $wav_file -echo "File saved at: "$wav_file diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/new/decoders/decoder.py b/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/new/decoders/decoder.py deleted file mode 100644 index b5bec8cf707b53104ef7a45993a5db2893d3443b..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/new/decoders/decoder.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Union - -from fairseq.data.dictionary import Dictionary - -from .decoder_config import DecoderConfig, FlashlightDecoderConfig -from .base_decoder import BaseDecoder - - -def Decoder( - cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary -) -> BaseDecoder: - - if cfg.type == "viterbi": - from .viterbi_decoder import ViterbiDecoder - - return ViterbiDecoder(tgt_dict) - if cfg.type == "kenlm": - from .flashlight_decoder import KenLMDecoder - - return KenLMDecoder(cfg, tgt_dict) - if cfg.type == "fairseqlm": - from .flashlight_decoder import FairseqLMDecoder - - return FairseqLMDecoder(cfg, tgt_dict) - raise NotImplementedError(f"Invalid decoder name: {cfg.name}") diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/losses/losses.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/losses/losses.py deleted file mode 100644 index 1bcf272cfb756d99451a3005567ea4d4c9059067..0000000000000000000000000000000000000000 --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/losses/losses.py +++ /dev/null @@ -1,455 +0,0 @@ -import math -import lpips -import torch -from torch import autograd as autograd -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.archs.vgg_arch import VGGFeatureExtractor -from basicsr.utils.registry import LOSS_REGISTRY -from .loss_util import weighted_loss - -_reduction_modes = ['none', 'mean', 'sum'] - - -@weighted_loss -def l1_loss(pred, target): - return F.l1_loss(pred, target, reduction='none') - - -@weighted_loss -def mse_loss(pred, target): - return F.mse_loss(pred, target, reduction='none') - - -@weighted_loss -def charbonnier_loss(pred, target, eps=1e-12): - return torch.sqrt((pred - target)**2 + eps) - - -@LOSS_REGISTRY.register() -class L1Loss(nn.Module): - """L1 (mean absolute error, MAE) loss. - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(L1Loss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class MSELoss(nn.Module): - """MSE (L2) loss. - - Args: - loss_weight (float): Loss weight for MSE loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(MSELoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class CharbonnierLoss(nn.Module): - """Charbonnier loss (one variant of Robust L1Loss, a differentiable - variant of L1Loss). - - Described in "Deep Laplacian Pyramid Networks for Fast and Accurate - Super-Resolution". - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - eps (float): A value used to control the curvature near zero. - Default: 1e-12. - """ - - def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12): - super(CharbonnierLoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - self.eps = eps - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class WeightedTVLoss(L1Loss): - """Weighted TV loss. - - Args: - loss_weight (float): Loss weight. Default: 1.0. - """ - - def __init__(self, loss_weight=1.0): - super(WeightedTVLoss, self).__init__(loss_weight=loss_weight) - - def forward(self, pred, weight=None): - y_diff = super(WeightedTVLoss, self).forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=weight[:, :, :-1, :]) - x_diff = super(WeightedTVLoss, self).forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=weight[:, :, :, :-1]) - - loss = x_diff + y_diff - - return loss - - -@LOSS_REGISTRY.register() -class PerceptualLoss(nn.Module): - """Perceptual loss with commonly used style loss. - - Args: - layer_weights (dict): The weight for each layer of vgg feature. - Here is an example: {'conv5_4': 1.}, which means the conv5_4 - feature layer (before relu5_4) will be extracted with weight - 1.0 in calculting losses. - vgg_type (str): The type of vgg network used as feature extractor. - Default: 'vgg19'. - use_input_norm (bool): If True, normalize the input image in vgg. - Default: True. - range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. - Default: False. - perceptual_weight (float): If `perceptual_weight > 0`, the perceptual - loss will be calculated and the loss will multiplied by the - weight. Default: 1.0. - style_weight (float): If `style_weight > 0`, the style loss will be - calculated and the loss will multiplied by the weight. - Default: 0. - criterion (str): Criterion used for perceptual loss. Default: 'l1'. - """ - - def __init__(self, - layer_weights, - vgg_type='vgg19', - use_input_norm=True, - range_norm=False, - perceptual_weight=1.0, - style_weight=0., - criterion='l1'): - super(PerceptualLoss, self).__init__() - self.perceptual_weight = perceptual_weight - self.style_weight = style_weight - self.layer_weights = layer_weights - self.vgg = VGGFeatureExtractor( - layer_name_list=list(layer_weights.keys()), - vgg_type=vgg_type, - use_input_norm=use_input_norm, - range_norm=range_norm) - - self.criterion_type = criterion - if self.criterion_type == 'l1': - self.criterion = torch.nn.L1Loss() - elif self.criterion_type == 'l2': - self.criterion = torch.nn.L2loss() - elif self.criterion_type == 'mse': - self.criterion = torch.nn.MSELoss(reduction='mean') - elif self.criterion_type == 'fro': - self.criterion = None - else: - raise NotImplementedError(f'{criterion} criterion has not been supported.') - - def forward(self, x, gt): - """Forward function. - - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - gt (Tensor): Ground-truth tensor with shape (n, c, h, w). - - Returns: - Tensor: Forward results. - """ - # extract vgg features - x_features = self.vgg(x) - gt_features = self.vgg(gt.detach()) - - # calculate perceptual loss - if self.perceptual_weight > 0: - percep_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] - else: - percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] - percep_loss *= self.perceptual_weight - else: - percep_loss = None - - # calculate style loss - if self.style_weight > 0: - style_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - style_loss += torch.norm( - self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] - else: - style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( - gt_features[k])) * self.layer_weights[k] - style_loss *= self.style_weight - else: - style_loss = None - - return percep_loss, style_loss - - def _gram_mat(self, x): - """Calculate Gram matrix. - - Args: - x (torch.Tensor): Tensor with shape of (n, c, h, w). - - Returns: - torch.Tensor: Gram matrix. - """ - n, c, h, w = x.size() - features = x.view(n, c, w * h) - features_t = features.transpose(1, 2) - gram = features.bmm(features_t) / (c * h * w) - return gram - - -@LOSS_REGISTRY.register() -class LPIPSLoss(nn.Module): - def __init__(self, - loss_weight=1.0, - use_input_norm=True, - range_norm=False,): - super(LPIPSLoss, self).__init__() - self.perceptual = lpips.LPIPS(net="vgg", spatial=False).eval() - self.loss_weight = loss_weight - self.use_input_norm = use_input_norm - self.range_norm = range_norm - - if self.use_input_norm: - # the mean is for image with range [0, 1] - self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) - # the std is for image with range [0, 1] - self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) - - def forward(self, pred, target): - if self.range_norm: - pred = (pred + 1) / 2 - target = (target + 1) / 2 - if self.use_input_norm: - pred = (pred - self.mean) / self.std - target = (target - self.mean) / self.std - lpips_loss = self.perceptual(target.contiguous(), pred.contiguous()) - return self.loss_weight * lpips_loss.mean() - - -@LOSS_REGISTRY.register() -class GANLoss(nn.Module): - """Define GAN loss. - - Args: - gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'. - real_label_val (float): The value for real label. Default: 1.0. - fake_label_val (float): The value for fake label. Default: 0.0. - loss_weight (float): Loss weight. Default: 1.0. - Note that loss_weight is only for generators; and it is always 1.0 - for discriminators. - """ - - def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0): - super(GANLoss, self).__init__() - self.gan_type = gan_type - self.loss_weight = loss_weight - self.real_label_val = real_label_val - self.fake_label_val = fake_label_val - - if self.gan_type == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif self.gan_type == 'lsgan': - self.loss = nn.MSELoss() - elif self.gan_type == 'wgan': - self.loss = self._wgan_loss - elif self.gan_type == 'wgan_softplus': - self.loss = self._wgan_softplus_loss - elif self.gan_type == 'hinge': - self.loss = nn.ReLU() - else: - raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.') - - def _wgan_loss(self, input, target): - """wgan loss. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return -input.mean() if target else input.mean() - - def _wgan_softplus_loss(self, input, target): - """wgan loss with soft plus. softplus is a smooth approximation to the - ReLU function. - - In StyleGAN2, it is called: - Logistic loss for discriminator; - Non-saturating loss for generator. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return F.softplus(-input).mean() if target else F.softplus(input).mean() - - def get_target_label(self, input, target_is_real): - """Get target label. - - Args: - input (Tensor): Input tensor. - target_is_real (bool): Whether the target is real or fake. - - Returns: - (bool | Tensor): Target tensor. Return bool for wgan, otherwise, - return Tensor. - """ - - if self.gan_type in ['wgan', 'wgan_softplus']: - return target_is_real - target_val = (self.real_label_val if target_is_real else self.fake_label_val) - return input.new_ones(input.size()) * target_val - - def forward(self, input, target_is_real, is_disc=False): - """ - Args: - input (Tensor): The input for the loss module, i.e., the network - prediction. - target_is_real (bool): Whether the targe is real or fake. - is_disc (bool): Whether the loss for discriminators or not. - Default: False. - - Returns: - Tensor: GAN loss value. - """ - if self.gan_type == 'hinge': - if is_disc: # for discriminators in hinge-gan - input = -input if target_is_real else input - loss = self.loss(1 + input).mean() - else: # for generators in hinge-gan - loss = -input.mean() - else: # other gan types - target_label = self.get_target_label(input, target_is_real) - loss = self.loss(input, target_label) - - # loss_weight is always 1.0 for discriminators - return loss if is_disc else loss * self.loss_weight - - -def r1_penalty(real_pred, real_img): - """R1 regularization for discriminator. The core idea is to - penalize the gradient on real data alone: when the - generator distribution produces the true data distribution - and the discriminator is equal to 0 on the data manifold, the - gradient penalty ensures that the discriminator cannot create - a non-zero gradient orthogonal to the data manifold without - suffering a loss in the GAN game. - - Ref: - Eq. 9 in Which training methods for GANs do actually converge. - """ - grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] - grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() - return grad_penalty - - -def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): - noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3]) - grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] - path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) - - path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) - - path_penalty = (path_lengths - path_mean).pow(2).mean() - - return path_penalty, path_lengths.detach().mean(), path_mean.detach() - - -def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): - """Calculate gradient penalty for wgan-gp. - - Args: - discriminator (nn.Module): Network for the discriminator. - real_data (Tensor): Real input data. - fake_data (Tensor): Fake input data. - weight (Tensor): Weight tensor. Default: None. - - Returns: - Tensor: A tensor for gradient penalty. - """ - - batch_size = real_data.size(0) - alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) - - # interpolate between real_data and fake_data - interpolates = alpha * real_data + (1. - alpha) * fake_data - interpolates = autograd.Variable(interpolates, requires_grad=True) - - disc_interpolates = discriminator(interpolates) - gradients = autograd.grad( - outputs=disc_interpolates, - inputs=interpolates, - grad_outputs=torch.ones_like(disc_interpolates), - create_graph=True, - retain_graph=True, - only_inputs=True)[0] - - if weight is not None: - gradients = gradients * weight - - gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() - if weight is not None: - gradients_penalty /= torch.mean(weight) - - return gradients_penalty diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/__init__.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/JohnTan38/NLLB-translation/flores200_codes.py b/spaces/JohnTan38/NLLB-translation/flores200_codes.py deleted file mode 100644 index 82cd29d33804a6db1ecb4b15e04be19ca83e993a..0000000000000000000000000000000000000000 --- a/spaces/JohnTan38/NLLB-translation/flores200_codes.py +++ /dev/null @@ -1,211 +0,0 @@ -codes_as_string = '''Acehnese (Arabic script) ace_Arab -Acehnese (Latin script) ace_Latn -Mesopotamian Arabic acm_Arab -Ta’izzi-Adeni Arabic acq_Arab -Tunisian Arabic aeb_Arab -Afrikaans afr_Latn -South Levantine Arabic ajp_Arab -Akan aka_Latn -Amharic amh_Ethi -North Levantine Arabic apc_Arab -Modern Standard Arabic arb_Arab -Modern Standard Arabic (Romanized) arb_Latn -Najdi Arabic ars_Arab -Moroccan Arabic ary_Arab -Egyptian Arabic arz_Arab -Assamese asm_Beng -Asturian ast_Latn -Awadhi awa_Deva -Central Aymara ayr_Latn -South Azerbaijani azb_Arab -North Azerbaijani azj_Latn -Bashkir bak_Cyrl -Bambara bam_Latn -Balinese ban_Latn -Belarusian bel_Cyrl -Bemba bem_Latn -Bengali ben_Beng -Bhojpuri bho_Deva -Banjar (Arabic script) bjn_Arab -Banjar (Latin script) bjn_Latn -Standard Tibetan bod_Tibt -Bosnian bos_Latn -Buginese bug_Latn -Bulgarian bul_Cyrl -Catalan cat_Latn -Cebuano ceb_Latn -Czech ces_Latn -Chokwe cjk_Latn -Central Kurdish ckb_Arab -Crimean Tatar crh_Latn -Welsh cym_Latn -Danish dan_Latn -German deu_Latn -Southwestern Dinka dik_Latn -Dyula dyu_Latn -Dzongkha dzo_Tibt -Greek ell_Grek -English eng_Latn -Esperanto epo_Latn -Estonian est_Latn -Basque eus_Latn -Ewe ewe_Latn -Faroese fao_Latn -Fijian fij_Latn -Finnish fin_Latn -Fon fon_Latn -French fra_Latn -Friulian fur_Latn -Nigerian Fulfulde fuv_Latn -Scottish Gaelic gla_Latn -Irish gle_Latn -Galician glg_Latn -Guarani grn_Latn -Gujarati guj_Gujr -Haitian Creole hat_Latn -Hausa hau_Latn -Hebrew heb_Hebr -Hindi hin_Deva -Chhattisgarhi hne_Deva -Croatian hrv_Latn -Hungarian hun_Latn -Armenian hye_Armn -Igbo ibo_Latn -Ilocano ilo_Latn -Indonesian ind_Latn -Icelandic isl_Latn -Italian ita_Latn -Javanese jav_Latn -Japanese jpn_Jpan -Kabyle kab_Latn -Jingpho kac_Latn -Kamba kam_Latn -Kannada kan_Knda -Kashmiri (Arabic script) kas_Arab -Kashmiri (Devanagari script) kas_Deva -Georgian kat_Geor -Central Kanuri (Arabic script) knc_Arab -Central Kanuri (Latin script) knc_Latn -Kazakh kaz_Cyrl -Kabiyè kbp_Latn -Kabuverdianu kea_Latn -Khmer khm_Khmr -Kikuyu kik_Latn -Kinyarwanda kin_Latn -Kyrgyz kir_Cyrl -Kimbundu kmb_Latn -Northern Kurdish kmr_Latn -Kikongo kon_Latn -Korean kor_Hang -Lao lao_Laoo -Ligurian lij_Latn -Limburgish lim_Latn -Lingala lin_Latn -Lithuanian lit_Latn -Lombard lmo_Latn -Latgalian ltg_Latn -Luxembourgish ltz_Latn -Luba-Kasai lua_Latn -Ganda lug_Latn -Luo luo_Latn -Mizo lus_Latn -Standard Latvian lvs_Latn -Magahi mag_Deva -Maithili mai_Deva -Malayalam mal_Mlym -Marathi mar_Deva -Minangkabau (Arabic script) min_Arab -Minangkabau (Latin script) min_Latn -Macedonian mkd_Cyrl -Plateau Malagasy plt_Latn -Maltese mlt_Latn -Meitei (Bengali script) mni_Beng -Halh Mongolian khk_Cyrl -Mossi mos_Latn -Maori mri_Latn -Burmese mya_Mymr -Dutch nld_Latn -Norwegian Nynorsk nno_Latn -Norwegian Bokmål nob_Latn -Nepali npi_Deva -Northern Sotho nso_Latn -Nuer nus_Latn -Nyanja nya_Latn -Occitan oci_Latn -West Central Oromo gaz_Latn -Odia ory_Orya -Pangasinan pag_Latn -Eastern Panjabi pan_Guru -Papiamento pap_Latn -Western Persian pes_Arab -Polish pol_Latn -Portuguese por_Latn -Dari prs_Arab -Southern Pashto pbt_Arab -Ayacucho Quechua quy_Latn -Romanian ron_Latn -Rundi run_Latn -Russian rus_Cyrl -Sango sag_Latn -Sanskrit san_Deva -Santali sat_Olck -Sicilian scn_Latn -Shan shn_Mymr -Sinhala sin_Sinh -Slovak slk_Latn -Slovenian slv_Latn -Samoan smo_Latn -Shona sna_Latn -Sindhi snd_Arab -Somali som_Latn -Southern Sotho sot_Latn -Spanish spa_Latn -Tosk Albanian als_Latn -Sardinian srd_Latn -Serbian srp_Cyrl -Swati ssw_Latn -Sundanese sun_Latn -Swedish swe_Latn -Swahili swh_Latn -Silesian szl_Latn -Tamil tam_Taml -Tatar tat_Cyrl -Telugu tel_Telu -Tajik tgk_Cyrl -Tagalog tgl_Latn -Thai tha_Thai -Tigrinya tir_Ethi -Tamasheq (Latin script) taq_Latn -Tamasheq (Tifinagh script) taq_Tfng -Tok Pisin tpi_Latn -Tswana tsn_Latn -Tsonga tso_Latn -Turkmen tuk_Latn -Tumbuka tum_Latn -Turkish tur_Latn -Twi twi_Latn -Central Atlas Tamazight tzm_Tfng -Uyghur uig_Arab -Ukrainian ukr_Cyrl -Umbundu umb_Latn -Urdu urd_Arab -Northern Uzbek uzn_Latn -Venetian vec_Latn -Vietnamese vie_Latn -Waray war_Latn -Wolof wol_Latn -Xhosa xho_Latn -Eastern Yiddish ydd_Hebr -Yoruba yor_Latn -Yue Chinese yue_Hant -Chinese (Simplified) zho_Hans -Chinese (Traditional) zho_Hant -Standard Malay zsm_Latn -Zulu zul_Latn''' - -codes_as_string = codes_as_string.split('\n') - -flores_codes = {} -for code in codes_as_string: - lang, lang_code = code.split('\t') - flores_codes[lang] = lang_code \ No newline at end of file diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/diffusion/classifier.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/diffusion/classifier.py deleted file mode 100644 index 67e98b9d8ffb96a150b517497ace0a242d7163ef..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = { - 'class_label': EncoderUNetModel, - 'segmentation': UNetModel -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - - def __init__(self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - self.log_time_interval = self.diffusion_model.num_timesteps // log_steps - self.log_steps = log_steps - - self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ - else self.diffusion_model.cond_stage_key - - assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy(self.diffusion_config.params.unet_config.params) - model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print('#####################################################################') - print(f'load from ckpt "{ckpt_path}"') - print('#####################################################################') - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction="mean"): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == "mean": - return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - elif reduction == "none": - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f"{log_prefix}/loss"] = loss.mean() - log[f"{log_prefix}/acc@1"] = self.compute_top_k( - logits, targets, k=1, reduction="mean" - ) - log[f"{log_prefix}/acc@5"] = self.compute_top_k( - logits, targets, k=5, reduction="mean" - ) - - self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) - self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) - self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() - else: - t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in - range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) - self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) - - return loss - - def configure_optimizers(self): - optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/spaces/Kevin676/Clone-Your-Voice/encoder/data_objects/speaker_verification_dataset.py b/spaces/Kevin676/Clone-Your-Voice/encoder/data_objects/speaker_verification_dataset.py deleted file mode 100644 index 77a6e05eae6a939ae7575ae70b7173644141fffe..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/encoder/data_objects/speaker_verification_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -from encoder.data_objects.random_cycler import RandomCycler -from encoder.data_objects.speaker_batch import SpeakerBatch -from encoder.data_objects.speaker import Speaker -from encoder.params_data import partials_n_frames -from torch.utils.data import Dataset, DataLoader -from pathlib import Path - -# TODO: improve with a pool of speakers for data efficiency - -class SpeakerVerificationDataset(Dataset): - def __init__(self, datasets_root: Path): - self.root = datasets_root - speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] - if len(speaker_dirs) == 0: - raise Exception("No speakers found. Make sure you are pointing to the directory " - "containing all preprocessed speaker directories.") - self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] - self.speaker_cycler = RandomCycler(self.speakers) - - def __len__(self): - return int(1e10) - - def __getitem__(self, index): - return next(self.speaker_cycler) - - def get_logs(self): - log_string = "" - for log_fpath in self.root.glob("*.txt"): - with log_fpath.open("r") as log_file: - log_string += "".join(log_file.readlines()) - return log_string - - -class SpeakerVerificationDataLoader(DataLoader): - def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, - batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, - worker_init_fn=None): - self.utterances_per_speaker = utterances_per_speaker - - super().__init__( - dataset=dataset, - batch_size=speakers_per_batch, - shuffle=False, - sampler=sampler, - batch_sampler=batch_sampler, - num_workers=num_workers, - collate_fn=self.collate, - pin_memory=pin_memory, - drop_last=False, - timeout=timeout, - worker_init_fn=worker_init_fn - ) - - def collate(self, speakers): - return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) - \ No newline at end of file diff --git a/spaces/Kevin676/Clone-Your-Voice/vocoder/distribution.py b/spaces/Kevin676/Clone-Your-Voice/vocoder/distribution.py deleted file mode 100644 index d3119a5ba1e77bc25a92d2664f83d366f12399c0..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/vocoder/distribution.py +++ /dev/null @@ -1,132 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F - - -def log_sum_exp(x): - """ numerically stable log_sum_exp implementation that prevents overflow """ - # TF ordering - axis = len(x.size()) - 1 - m, _ = torch.max(x, dim=axis) - m2, _ = torch.max(x, dim=axis, keepdim=True) - return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) - - -# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py -def discretized_mix_logistic_loss(y_hat, y, num_classes=65536, - log_scale_min=None, reduce=True): - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - y_hat = y_hat.permute(0,2,1) - assert y_hat.dim() == 3 - assert y_hat.size(1) % 3 == 0 - nr_mix = y_hat.size(1) // 3 - - # (B x T x C) - y_hat = y_hat.transpose(1, 2) - - # unpack parameters. (B, T, num_mixtures) x 3 - logit_probs = y_hat[:, :, :nr_mix] - means = y_hat[:, :, nr_mix:2 * nr_mix] - log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min) - - # B x T x 1 -> B x T x num_mixtures - y = y.expand_as(means) - - centered_y = y - means - inv_stdv = torch.exp(-log_scales) - plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1)) - cdf_plus = torch.sigmoid(plus_in) - min_in = inv_stdv * (centered_y - 1. / (num_classes - 1)) - cdf_min = torch.sigmoid(min_in) - - # log probability for edge case of 0 (before scaling) - # equivalent: torch.log(F.sigmoid(plus_in)) - log_cdf_plus = plus_in - F.softplus(plus_in) - - # log probability for edge case of 255 (before scaling) - # equivalent: (1 - F.sigmoid(min_in)).log() - log_one_minus_cdf_min = -F.softplus(min_in) - - # probability for all other cases - cdf_delta = cdf_plus - cdf_min - - mid_in = inv_stdv * centered_y - # log probability in the center of the bin, to be used in extreme cases - # (not actually used in our code) - log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in) - - # tf equivalent - """ - log_probs = tf.where(x < -0.999, log_cdf_plus, - tf.where(x > 0.999, log_one_minus_cdf_min, - tf.where(cdf_delta > 1e-5, - tf.log(tf.maximum(cdf_delta, 1e-12)), - log_pdf_mid - np.log(127.5)))) - """ - # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value - # for num_classes=65536 case? 1e-7? not sure.. - inner_inner_cond = (cdf_delta > 1e-5).float() - - inner_inner_out = inner_inner_cond * \ - torch.log(torch.clamp(cdf_delta, min=1e-12)) + \ - (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) - inner_cond = (y > 0.999).float() - inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out - cond = (y < -0.999).float() - log_probs = cond * log_cdf_plus + (1. - cond) * inner_out - - log_probs = log_probs + F.log_softmax(logit_probs, -1) - - if reduce: - return -torch.mean(log_sum_exp(log_probs)) - else: - return -log_sum_exp(log_probs).unsqueeze(-1) - - -def sample_from_discretized_mix_logistic(y, log_scale_min=None): - """ - Sample from discretized mixture of logistic distributions - Args: - y (Tensor): B x C x T - log_scale_min (float): Log scale minimum value - Returns: - Tensor: sample in range of [-1, 1]. - """ - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - assert y.size(1) % 3 == 0 - nr_mix = y.size(1) // 3 - - # B x T x C - y = y.transpose(1, 2) - logit_probs = y[:, :, :nr_mix] - - # sample mixture indicator from softmax - temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) - temp = logit_probs.data - torch.log(- torch.log(temp)) - _, argmax = temp.max(dim=-1) - - # (B, T) -> (B, T, nr_mix) - one_hot = to_one_hot(argmax, nr_mix) - # select logistic parameters - means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1) - log_scales = torch.clamp(torch.sum( - y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min) - # sample from logistic & clip to interval - # we don't actually round to the nearest 8bit value when sampling - u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) - x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u)) - - x = torch.clamp(torch.clamp(x, min=-1.), max=1.) - - return x - - -def to_one_hot(tensor, n, fill_with=1.): - # we perform one hot encore with respect to the last axis - one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() - if tensor.is_cuda: - one_hot = one_hot.cuda() - one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) - return one_hot diff --git a/spaces/KyanChen/BuildingExtraction/Tools/GetTrainValTestCSV.py b/spaces/KyanChen/BuildingExtraction/Tools/GetTrainValTestCSV.py deleted file mode 100644 index a4b14ec4e550d6a98e4f7465fe366889b74c6683..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/BuildingExtraction/Tools/GetTrainValTestCSV.py +++ /dev/null @@ -1,128 +0,0 @@ -import os -import glob -import random - -import pandas as pd -import cv2 -import tqdm -import numpy as np - - -class GetTrainTestCSV: - def __init__(self, dataset_path_list, csv_name, img_format_list, negative_keep_rate=0.1): - self.data_path_list = dataset_path_list - self.img_format_list = img_format_list - self.negative_keep_rate = negative_keep_rate - self.save_path_csv = r'generate_dep_info' - os.makedirs(self.save_path_csv, exist_ok=True) - self.csv_name = csv_name - - def get_csv(self, pattern): - def get_data_infos(img_path, img_format): - data_info = {'img': [], 'label': []} - img_file_list = glob.glob(img_path + '/*%s' % img_format) - assert len(img_file_list), 'No data in DATASET_PATH!' - for img_file in tqdm.tqdm(img_file_list): - label_file = img_file.replace(img_format, 'png').replace('imgs', 'labels') - if not os.path.exists(label_file): - label_file = 'None' - # if os.path.getsize(label_file) == 0: - # if np.random.random() < self.negative_keep_rate: - # data_info['img'].append(img_file) - # data_info['label'].append(label_file) - # continue - if pattern == 'test': - label_file = 'None' - data_info['img'].append(img_file) - data_info['label'].append(label_file) - - return data_info - - data_information = {'img': [], 'label': []} - for idx, data_dir in enumerate(self.data_path_list): - if len(self.data_path_list) == len(self.img_format_list): - img_format = self.img_format_list[idx] - else: - img_format = self.img_format_list[0] - assert os.path.exists(data_dir), 'No dir: ' + data_dir - img_path_list = glob.glob(data_dir+'/*{0}'.format(img_format)) - # img folder - if len(img_path_list) == 0: - img_path_list = glob.glob(data_dir+'/*') - for img_path in img_path_list: - if os.path.isdir(img_path): - data_info = get_data_infos(img_path, img_format) - data_information['img'].extend(data_info['img']) - data_information['label'].extend(data_info['label']) - - else: - data_info = get_data_infos(data_dir, img_format) - data_information['img'].extend(data_info['img']) - data_information['label'].extend(data_info['label']) - - data_annotation = pd.DataFrame(data_information) - writer_name = self.save_path_csv + '/' + self.csv_name - data_annotation.to_csv(writer_name, index_label=False) - print(os.path.basename(writer_name) + ' file saves successfully!') - - def generate_val_data_from_train_data(self, frac=0.1): - if os.path.exists(self.save_path_csv + '/' + self.csv_name): - data = pd.read_csv(self.save_path_csv + '/' + self.csv_name) - else: - raise Exception('no train data') - - val_data = data.sample(frac=frac, replace=False) - train_data = data.drop(val_data.index) - val_data = val_data.reset_index(drop=True) - train_data = train_data.reset_index(drop=True) - writer_name = self.save_path_csv + '/' + self.csv_name - train_data.to_csv(writer_name, index_label=False) - writer_name = self.save_path_csv + '/' + self.csv_name.replace('train', 'val') - val_data.to_csv(writer_name, index_label=False) - - def _get_file(self, in_path_list): - file_list = [] - for file in in_path_list: - if os.path.isdir(os.path.abspath(file)): - files = glob.glob(file + '/*') - file_list.extend(self._get_file(files)) - else: - file_list += [file] - return file_list - - def get_csv_file(self, phase): - phases = ['seg', 'flow', 'od'] - assert phase in phases, f'{phase} should in {phases}!' - - file_list = self._get_file(self.data_path_list) - file_list = [x for x in file_list if x.split('.')[-1] in self.img_format_list] - assert len(file_list), 'No data in data_path_list!' - random.shuffle(file_list) - data_information = {} - if phase == 'seg': - data_information['img'] = file_list - data_information['label'] = [x.replace('img', 'label') for x in file_list] - elif phase == 'flow': - data_information['img1'] = file_list[:-1] - data_information['img2'] = file_list[1:] - elif phase == 'od': - data_information['img'] = file_list - data_information['label'] = [x.replace('tiff', 'txt').replace('jpg', 'txt').replace('png', 'txt') for x in file_list] - - data_annotation = pd.DataFrame(data_information) - writer_name = self.save_path_csv + '/' + self.csv_name - data_annotation.to_csv(writer_name, index_label=False) - print(os.path.basename(writer_name) + ' file saves successfully!') - - -if __name__ == '__main__': - data_path_list = [ - 'D:/Code/ProjectOnGithub/STT/Data/val_samples/img' - ] - csv_name = 'val_data.csv' - img_format_list = ['png'] - - getTrainTestCSV = GetTrainTestCSV(dataset_path_list=data_path_list, csv_name=csv_name, img_format_list=img_format_list) - getTrainTestCSV.get_csv_file(phase='seg') - - diff --git a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/models_onnx.py b/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Lbin123/Lbingo/src/lib/hooks/use-bing.ts b/spaces/Lbin123/Lbingo/src/lib/hooks/use-bing.ts deleted file mode 100644 index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/lib/hooks/use-bing.ts +++ /dev/null @@ -1,173 +0,0 @@ -'use client' - -import { useState, useCallback, useEffect, useMemo } from 'react' -import { useAtom, useAtomValue } from 'jotai' -import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state' -import { setConversationMessages } from './chat-history' -import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types' -import { nanoid } from '../utils' -import { TTS } from '../bots/bing/tts' - -export function useBing(botId: BotId = 'bing') { - const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId]) - const [enableTTS] = useAtom(voiceAtom) - const speaker = useMemo(() => new TTS(), []) - const [hash, setHash] = useAtom(hashAtom) - const bingConversationStyle = useAtomValue(bingConversationStyleAtom) - const [chatState, setChatState] = useAtom(chatAtom) - const [input, setInput] = useState('') - const [attachmentList, setAttachmentList] = useState([]) - - const updateMessage = useCallback( - (messageId: string, updater: (message: ChatMessageModel) => void) => { - setChatState((draft) => { - const message = draft.messages.find((m) => m.id === messageId) - if (message) { - updater(message) - } - }) - }, - [setChatState], - ) - - const sendMessage = useCallback( - async (input: string, options = {}) => { - const botMessageId = nanoid() - const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined - setChatState((draft) => { - const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input - draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' }) - setAttachmentList([]) - }) - const abortController = new AbortController() - setChatState((draft) => { - draft.generatingMessageId = botMessageId - draft.abortController = abortController - }) - speaker.reset() - await chatState.bot.sendMessage({ - prompt: input, - imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl, - options: { - ...options, - bingConversationStyle, - }, - signal: abortController.signal, - onEvent(event) { - if (event.type === 'UPDATE_ANSWER') { - updateMessage(botMessageId, (message) => { - if (event.data.text.length > message.text.length) { - message.text = event.data.text - } - - if (event.data.spokenText && enableTTS) { - speaker.speak(event.data.spokenText) - } - - message.throttling = event.data.throttling || message.throttling - message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions - message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses - }) - } else if (event.type === 'ERROR') { - updateMessage(botMessageId, (message) => { - message.error = event.error - }) - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } else if (event.type === 'DONE') { - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } - }, - }) - }, - [botId, attachmentList, chatState.bot, setChatState, updateMessage], - ) - - const uploadImage = useCallback(async (imgUrl: string) => { - setAttachmentList([{ url: imgUrl, status: 'loading' }]) - const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle) - if (response?.blobId) { - setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }]) - } else { - setAttachmentList([{ url: imgUrl, status: 'error' }]) - } - }, [chatState.bot]) - - const resetConversation = useCallback(() => { - chatState.bot.resetConversation() - speaker.abort() - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }] - draft.conversationId = nanoid() - }) - }, [chatState.bot, setChatState]) - - const stopGenerating = useCallback(() => { - chatState.abortController?.abort() - if (chatState.generatingMessageId) { - updateMessage(chatState.generatingMessageId, (message) => { - if (!message.text && !message.error) { - message.text = 'Cancelled' - } - }) - } - setChatState((draft) => { - draft.generatingMessageId = '' - }) - }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage]) - - useEffect(() => { - if (chatState.messages.length) { - setConversationMessages(botId, chatState.conversationId, chatState.messages) - } - }, [botId, chatState.conversationId, chatState.messages]) - - useEffect(() => { - if (hash === 'reset') { - resetConversation() - setHash('') - } - }, [hash, setHash]) - - const chat = useMemo( - () => ({ - botId, - bot: chatState.bot, - isSpeaking: speaker.isSpeaking, - messages: chatState.messages, - sendMessage, - setInput, - input, - resetConversation, - generating: !!chatState.generatingMessageId, - stopGenerating, - uploadImage, - setAttachmentList, - attachmentList, - }), - [ - botId, - bingConversationStyle, - chatState.bot, - chatState.generatingMessageId, - chatState.messages, - speaker.isSpeaking, - setInput, - input, - setAttachmentList, - attachmentList, - resetConversation, - sendMessage, - stopGenerating, - ], - ) - - return chat -} diff --git a/spaces/Ld75/pyannote-voice-activity-detection/Dockerfile b/spaces/Ld75/pyannote-voice-activity-detection/Dockerfile deleted file mode 100644 index 6ae9cf23a739351fe60ef3c2471af3d3e0521b45..0000000000000000000000000000000000000000 --- a/spaces/Ld75/pyannote-voice-activity-detection/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker -# you will also find guides on how best to write your Dockerfile - -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/Lianjd/stock_dashboard/backtrader/trade.py b/spaces/Lianjd/stock_dashboard/backtrader/trade.py deleted file mode 100644 index c49b7b3325eab55b04d25af90dc5aa6d6beacc0b..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/trade.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import itertools - -from .utils import AutoOrderedDict -from .utils.date import num2date -from .utils.py3 import range - - -class TradeHistory(AutoOrderedDict): - '''Represents the status and update event for each update a Trade has - - This object is a dictionary which allows '.' notation - - Attributes: - - ``status`` (``dict`` with '.' notation): Holds the resulting status of - an update event and has the following sub-attributes - - - ``status`` (``int``): Trade status - - ``dt`` (``float``): float coded datetime - - ``barlen`` (``int``): number of bars the trade has been active - - ``size`` (``int``): current size of the Trade - - ``price`` (``float``): current price of the Trade - - ``value`` (``float``): current monetary value of the Trade - - ``pnl`` (``float``): current profit and loss of the Trade - - ``pnlcomm`` (``float``): current profit and loss minus commission - - - ``event`` (``dict`` with '.' notation): Holds the event update - - parameters - - - ``order`` (``object``): the order which initiated the``update`` - - ``size`` (``int``): size of the update - - ``price`` (``float``):price of the update - - ``commission`` (``float``): price of the update - ''' - - def __init__(self, - status, dt, barlen, size, price, value, pnl, pnlcomm, tz, event=None): - '''Initializes the object to the current status of the Trade''' - super(TradeHistory, self).__init__() - self.status.status = status - self.status.dt = dt - self.status.barlen = barlen - self.status.size = size - self.status.price = price - self.status.value = value - self.status.pnl = pnl - self.status.pnlcomm = pnlcomm - self.status.tz = tz - if event is not None: - self.event = event - - def __reduce__(self): - return (self.__class__, (self.status.status, self.status.dt, self.status.barlen, self.status.size, - self.status.price, self.status.value, self.status.pnl, self.status.pnlcomm, - self.status.tz, self.event, )) - - def doupdate(self, order, size, price, commission): - '''Used to fill the ``update`` part of the history entry''' - self.event.order = order - self.event.size = size - self.event.price = price - self.event.commission = commission - - # Do not allow updates (avoids typing errors) - self._close() - - def datetime(self, tz=None, naive=True): - '''Returns a datetime for the time the update event happened''' - return num2date(self.status.dt, tz or self.status.tz, naive) - - -class Trade(object): - '''Keeps track of the life of an trade: size, price, - commission (and value?) - - An trade starts at 0 can be increased and reduced and can - be considered closed if it goes back to 0. - - The trade can be long (positive size) or short (negative size) - - An trade is not meant to be reversed (no support in the logic for it) - - Member Attributes: - - - ``ref``: unique trade identifier - - ``status`` (``int``): one of Created, Open, Closed - - ``tradeid``: grouping tradeid passed to orders during creation - The default in orders is 0 - - ``size`` (``int``): current size of the trade - - ``price`` (``float``): current price of the trade - - ``value`` (``float``): current value of the trade - - ``commission`` (``float``): current accumulated commission - - ``pnl`` (``float``): current profit and loss of the trade (gross pnl) - - ``pnlcomm`` (``float``): current profit and loss of the trade minus - commission (net pnl) - - ``isclosed`` (``bool``): records if the last update closed (set size to - null the trade - - ``isopen`` (``bool``): records if any update has opened the trade - - ``justopened`` (``bool``): if the trade was just opened - - ``baropen`` (``int``): bar in which this trade was opened - - - ``dtopen`` (``float``): float coded datetime in which the trade was - opened - - - Use method ``open_datetime`` to get a Python datetime.datetime - or use the platform provided ``num2date`` method - - - ``barclose`` (``int``): bar in which this trade was closed - - - ``dtclose`` (``float``): float coded datetime in which the trade was - closed - - - Use method ``close_datetime`` to get a Python datetime.datetime - or use the platform provided ``num2date`` method - - - ``barlen`` (``int``): number of bars this trade was open - - ``historyon`` (``bool``): whether history has to be recorded - - ``history`` (``list``): holds a list updated with each "update" event - containing the resulting status and parameters used in the update - - The first entry in the history is the Opening Event - The last entry in the history is the Closing Event - - ''' - refbasis = itertools.count(1) - - status_names = ['Created', 'Open', 'Closed'] - Created, Open, Closed = range(3) - - def __str__(self): - toprint = ( - 'ref', 'data', 'tradeid', - 'size', 'price', 'value', 'commission', 'pnl', 'pnlcomm', - 'justopened', 'isopen', 'isclosed', - 'baropen', 'dtopen', 'barclose', 'dtclose', 'barlen', - 'historyon', 'history', - 'status') - - return '\n'.join( - (':'.join((x, str(getattr(self, x)))) for x in toprint) - ) - - def __init__(self, data=None, tradeid=0, historyon=False, - size=0, price=0.0, value=0.0, commission=0.0): - - self.ref = next(self.refbasis) - self.data = data - self.tradeid = tradeid - self.size = size - self.price = price - self.value = value - self.commission = commission - - self.pnl = 0.0 - self.pnlcomm = 0.0 - - self.justopened = False - self.isopen = False - self.isclosed = False - - self.baropen = 0 - self.dtopen = 0.0 - self.barclose = 0 - self.dtclose = 0.0 - self.barlen = 0 - - self.historyon = historyon - self.history = list() - - self.status = self.Created - - def __len__(self): - '''Absolute size of the trade''' - return abs(self.size) - - def __bool__(self): - '''Trade size is not 0''' - return self.size != 0 - - __nonzero__ = __bool__ - - def getdataname(self): - '''Shortcut to retrieve the name of the data this trade references''' - return self.data._name - - def open_datetime(self, tz=None, naive=True): - '''Returns a datetime.datetime object with the datetime in which - the trade was opened - ''' - return self.data.num2date(self.dtopen, tz=tz, naive=naive) - - def close_datetime(self, tz=None, naive=True): - '''Returns a datetime.datetime object with the datetime in which - the trade was closed - ''' - return self.data.num2date(self.dtclose, tz=tz, naive=naive) - - def update(self, order, size, price, value, commission, pnl, - comminfo): - ''' - Updates the current trade. The logic does not check if the - trade is reversed, which is not conceptually supported by the - object. - - If an update sets the size attribute to 0, "closed" will be - set to true - - Updates may be received twice for each order, once for the existing - size which has been closed (sell undoing a buy) and a second time for - the the opening part (sell reversing a buy) - - Args: - order: the order object which has (completely or partially) - generated this update - size (int): amount to update the order - if size has the same sign as the current trade a - position increase will happen - if size has the opposite sign as current op size a - reduction/close will happen - - price (float): always be positive to ensure consistency - value (float): (unused) cost incurred in new size/price op - Not used because the value is calculated for the - trade - commission (float): incurred commission in the new size/price op - pnl (float): (unused) generated by the executed part - Not used because the trade has an independent pnl - ''' - if not size: - return # empty update, skip all other calculations - - # Commission can only increase - self.commission += commission - - # Update size and keep a reference for logic an calculations - oldsize = self.size - self.size += size # size will carry the opposite sign if reducing - - # Check if it has been currently opened - self.justopened = bool(not oldsize and size) - - if self.justopened: - self.baropen = len(self.data) - self.dtopen = 0.0 if order.p.simulated else self.data.datetime[0] - self.long = self.size > 0 - - # Any size means the trade was opened - self.isopen = bool(self.size) - - # Update current trade length - self.barlen = len(self.data) - self.baropen - - # record if the position was closed (set to null) - self.isclosed = bool(oldsize and not self.size) - - # record last bar for the trade - if self.isclosed: - self.isopen = False - self.barclose = len(self.data) - self.dtclose = self.data.datetime[0] - - self.status = self.Closed - elif self.isopen: - self.status = self.Open - - if abs(self.size) > abs(oldsize): - # position increased (be it positive or negative) - # update the average price - self.price = (oldsize * self.price + size * price) / self.size - pnl = 0.0 - - else: # abs(self.size) < abs(oldsize) - # position reduced/closed - pnl = comminfo.profitandloss(-size, self.price, price) - - self.pnl += pnl - self.pnlcomm = self.pnl - self.commission - - self.value = comminfo.getvaluesize(self.size, self.price) - - # Update the history if needed - if self.historyon: - dt0 = self.data.datetime[0] if not order.p.simulated else 0.0 - histentry = TradeHistory( - self.status, dt0, self.barlen, - self.size, self.price, self.value, - self.pnl, self.pnlcomm, self.data._tz) - histentry.doupdate(order, size, price, commission) - self.history.append(histentry) diff --git a/spaces/Liu-LAB/GPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/Liu-LAB/GPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/MAPS-research/GEMRec-Gallery/pages/Gallery.py b/spaces/MAPS-research/GEMRec-Gallery/pages/Gallery.py deleted file mode 100644 index 3cc5f6cf297fef1ad99cefd302ac8a1b18c805a0..0000000000000000000000000000000000000000 --- a/spaces/MAPS-research/GEMRec-Gallery/pages/Gallery.py +++ /dev/null @@ -1,639 +0,0 @@ -import itertools -import json -import os -import requests - -import altair as alt -import extra_streamlit_components as stx -import random -import numpy as np -import pandas as pd -import streamlit as st - -from bs4 import BeautifulSoup -from datasets import load_dataset, Dataset, load_from_disk -from datetime import datetime -from huggingface_hub import login -from streamlit_agraph import agraph, Node, Edge, Config -from streamlit_extras.switch_page_button import switch_page -from streamlit_extras.tags import tagger_component -from streamlit_extras.no_default_selectbox import selectbox -from sklearn.svm import LinearSVC - -from Home import connect_to_db - -class GalleryApp: - def __init__(self, promptBook, images_ds): - self.promptBook = promptBook - self.images_ds = images_ds - - # init gallery state - if 'gallery_state' not in st.session_state: - st.session_state.gallery_state = 'graph' - - # initialize selected_dict - if 'selected_dict' not in st.session_state: - st.session_state['selected_dict'] = {} - - # clear up empty entries in seleted_dict - for prompt_id in list(st.session_state.selected_dict.keys()): - if len(st.session_state.selected_dict[prompt_id]) == 0: - st.session_state.selected_dict.pop(prompt_id) - - if 'gallery_focus' not in st.session_state: - st.session_state.gallery_focus = {'tag': None, 'prompt': None} - - def gallery_standard(self, items, col_num, info, show_checkbox=True): - rows = len(items) // col_num + 1 - containers = [st.container() for _ in range(rows)] - for idx in range(0, len(items), col_num): - row_idx = idx // col_num - with containers[row_idx]: - cols = st.columns(col_num) - for j in range(col_num): - if idx + j < len(items): - with cols[j]: - # show image - # image = self.images_ds[items.iloc[idx + j]['row_idx'].item()]['image'] - image = f"https://modelcofferbucket.s3-accelerate.amazonaws.com/{items.iloc[idx + j]['image_id']}.png" - st.image(image, use_column_width=True) - - # handel checkbox information - prompt_id = items.iloc[idx + j]['prompt_id'] - modelVersion_id = items.iloc[idx + j]['modelVersion_id'] - - check_init = True if modelVersion_id in st.session_state.selected_dict.get(prompt_id, []) else False - - # st.write("Position: ", idx + j) - - if show_checkbox: - # show checkbox - st.checkbox('Select', key=f'select_{prompt_id}_{modelVersion_id}', value=check_init) - - # show selected info - for key in info: - st.write(f"**{key}**: {items.iloc[idx + j][key]}") - - def gallery_graph(self, items): - items = load_tsne_coordinates(items) - - # sort items to be popularity from low to high, so that most popular ones will be on the top - items = items.sort_values(by=['model_download_count'], ascending=True).reset_index(drop=True) - - scale = 50 - items.loc[:, 'x'] = items['x'] * scale - items.loc[:, 'y'] = items['y'] * scale - - nodes = [] - edges = [] - - for idx in items.index: - nodes.append(Node(id=items.loc[idx, 'image_id'], - # label=str(items.loc[idx, 'model_name']), - # title=f"model name: {items.loc[idx, 'model_name']}\nmodelVersion name: {items.loc[idx, 'modelVersion_name']}\nclip score: {items.loc[idx, 'clip_score']}\nmcos score: {items.loc[idx, 'mcos_score']}\npopularity: {items.loc[idx, 'model_download_count']}", - title=f"model name: {items.loc[idx, 'model_name']}", - size=20, - shape='image', - image=f"https://modelcofferbucket.s3-accelerate.amazonaws.com/{items.loc[idx, 'image_id']}.png", - x=items.loc[idx, 'x'].item(), - y=items.loc[idx, 'y'].item(), - # fixed=True, - color={'background': '#E0E0E1', 'border': '#ffffff', 'highlight': {'border': '#F04542'}}, - # opacity=opacity, - shadow={'enabled': True, 'color': 'rgba(0,0,0,0.4)', 'size': 10, 'x': 1, 'y': 1}, - borderWidth=3, - borderWidthSelected=3, - shapeProperties={'useBorderWithImage': True}, - ) - ) - - config = Config(width='100%', - height='600', - directed=True, - physics=False, - hierarchical=False, - interaction={'navigationButtons': True, 'dragNodes': False, 'multiselect': False, 'hover': True}, - # **kwargs - ) - - return agraph(nodes=nodes, - edges=edges, - config=config, - ) - - def sidebar(self, items, prompt_id, note): - with st.sidebar: - # show source - if isinstance(note, str): - if note.isdigit(): - st.caption(f"`Source: civitai`") - else: - st.caption(f"`Source: {note}`") - else: - st.caption("`Source: Parti-prompts`") - - # show image metadata - image_metadatas = ['prompt', 'negativePrompt', 'sampler', 'cfgScale', 'size', 'seed'] - for key in image_metadatas: - label = ' '.join(key.split('_')).capitalize() - st.write(f"**{label}**") - if items[key][0] == ' ': - st.write('`None`') - else: - st.caption(f"{items[key][0]}") - - # for note as civitai image id, add civitai reference - if isinstance(note, str) and note.isdigit(): - try: - st.write(f'**[Civitai Reference](https://civitai.com/images/{note})**') - res = requests.get(f'https://civitai.com/images/{note}') - # st.write(res.text) - soup = BeautifulSoup(res.text, 'html.parser') - image_section = soup.find('div', {'class': 'mantine-12rlksp'}) - image_url = image_section.find('img')['src'] - st.image(image_url, use_column_width=True) - except: - pass - - # return prompt_tags, tag, prompt_id, items - - def text_coloring_add(self, tobe_colored:list, total_items, color_name='orange'): - if color_name in ['orange', 'red', 'green', 'blue', 'violet', 'yellow']: - colored = [f':{color_name}[{item}]' if item in tobe_colored else item for item in total_items] - else: - colored = [f'[{color_name}] {item}' if item in tobe_colored else item for item in total_items] - return colored - - def text_coloring_remove(self, tobe_removed): - if isinstance(tobe_removed, str): - if tobe_removed.startswith(':'): - tobe_removed = tobe_removed.split('[')[-1][:-1] - - elif tobe_removed.startswith('['): - tobe_removed = tobe_removed.split(']')[-1][1:] - return tobe_removed - - - def app(self): - # print(st.session_state.gallery_focus) - st.write('### Prompt-Model Retrieval') - with st.sidebar: - tagger_component('**Gallery State:**', [st.session_state.gallery_state.title()], color_name=['orange']) - # st.write('This is a gallery of images generated by the models') - - # build the tabular view - prompt_tags = self.promptBook['tag'].unique() - # sort tags by alphabetical order - prompt_tags = np.sort(prompt_tags)[::1].tolist() - - # set focus tag and prompt index if exists - if st.session_state.gallery_focus['tag'] is None: - tag_focus_idx = 0 - else: - tag_focus_idx = prompt_tags.index(st.session_state.gallery_focus['tag']) - - # add coloring to tag based on selection - tags_tobe_colored = self.promptBook[self.promptBook['prompt_id'].isin(st.session_state.selected_dict.keys())]['tag'].unique() - # colored_prompt_tags = [f':orange[{tag}]' if tag in tags_tobe_colored else tag for tag in prompt_tags] - colored_prompt_tags = self.text_coloring_add(tags_tobe_colored, prompt_tags, color_name='orange') - - # save tag to session state on change - tag = st.radio('Select a tag', colored_prompt_tags, index=tag_focus_idx, horizontal=True, key='tag', label_visibility='collapsed') - - # remove coloring from tag - tag = self.text_coloring_remove(tag) - # print('tag: ', tag) - - # print('current state: ', st.session_state.gallery_state) - - if st.session_state.gallery_state == 'graph': - - items = self.promptBook[self.promptBook['tag'] == tag].reset_index(drop=True) - - prompts = np.sort(items['prompt'].unique())[::1].tolist() - - # print('prompts: ', prompts, 'tags: ', prompt_tags) - - # selt focus prompt index if exists - if st.session_state.gallery_focus['prompt'] is None or tag != st.session_state.gallery_focus['tag']: - prompt_focus_idx = 0 - else: - prompt_focus_idx = 1 + prompts.index(st.session_state.gallery_focus['prompt']) - - # st.caption('Select a prompt') - subset_selector = st.columns([3, 1]) - with subset_selector[0]: - selector_bar = st.columns([1, 15]) - with selector_bar[0]: - shuffle = st.button('🎲', key='prompt_shuffle', on_click=self.random_gallery_focus, args=(prompt_tags,), use_container_width=True) - - with selector_bar[-1]: - # add coloring to prompt based on selection - prompts_tobe_colored = self.promptBook[self.promptBook['prompt_id'].isin(st.session_state.selected_dict.keys())]['prompt'].unique() - colored_prompts = self.text_coloring_add(prompts_tobe_colored, prompts, color_name='✅') - - selected_prompt = selectbox('Select prompt', colored_prompts, key=f'prompt_{tag}', no_selection_label='---', label_visibility='collapsed', index=prompt_focus_idx) - - # remove coloring from prompt - selected_prompt = self.text_coloring_remove(selected_prompt) - # print('selected_prompt: ', selected_prompt) - st.session_state.prompt_idx_last_time = prompts.index(selected_prompt) if selected_prompt else 0 - - if selected_prompt is None: - # st.markdown(':orange[Please select a prompt above👆]') - st.caption('Feel free to **navigate among tags and pages**! Your selection will be saved within one log-in session.') - - with subset_selector[-1]: - st.button(':orange[👈 **Please select a prompt**]', disabled=True, use_container_width=True) - - else: - items = items[items['prompt'] == selected_prompt].reset_index(drop=True) - prompt_id = items['prompt_id'].unique()[0] - note = items['note'].unique()[0] - - # add safety check for some prompts - safety_check = True - - # load unsafe prompts - unsafe_prompts = json.load(open('./data/unsafe_prompts.json', 'r')) - for prompt_tag in prompt_tags: - if prompt_tag not in unsafe_prompts: - unsafe_prompts[prompt_tag] = [] - # # manually add unsafe prompts - # unsafe_prompts['world knowledge'] = [83] - # unsafe_prompts['abstract'] = [1, 3] - - if int(prompt_id.item()) in unsafe_prompts[tag]: - st.warning('This prompt may contain unsafe content. They might be offensive, depressing, or sexual.') - safety_check = st.checkbox('I understand that this prompt may contain unsafe content. Show these images anyway.', key=f'safety_{prompt_id}') - - # print('current state: ', st.session_state.gallery_state) - # - # if st.session_state.gallery_state == 'graph': - if safety_check: - self.graph_mode(prompt_id, items) - with subset_selector[-1]: - has_selection = False - try: - if len(st.session_state.selected_dict.get(prompt_id, [])) > 0: - has_selection = True - except: - pass - - if has_selection: - checkout = st.button('Check out selections ➡️', use_container_width=True, type='primary', on_click=self.switch_to_checkout, args=(tag, selected_prompt)) - else: - st.button(':orange[👇 **Select images below**]', disabled=True, use_container_width=True) - try: - self.sidebar(items, prompt_id, note) - except: - pass - - elif st.session_state.gallery_state == 'check out': - # select items under the current tag, while model_id in selected_dict keys with corresponding modelVersion_ids - items = self.promptBook[self.promptBook['tag'] == tag].reset_index(drop=True) - temp_items = pd.DataFrame() - for prompt_id, selected_models in st.session_state.selected_dict.items(): - temp_items = pd.concat([temp_items, items[items['modelVersion_id'].isin(selected_models) & (items['prompt_id'] == prompt_id)]], axis=0) - items = temp_items.reset_index(drop=True) - - self.checkout_mode(tag, items) - - def switch_to_checkout(self, tag, selected_prompt): - # add focus to session state - st.session_state.gallery_focus['tag'] = tag - st.session_state.gallery_focus['prompt'] = selected_prompt - - st.session_state.gallery_state = 'check out' - - def random_gallery_focus(self, tags): - st.session_state.gallery_focus['tag'] = random.choice(tags) - # st.session_state.gallery_focus['prompt'] = random.choice(prompts) - prompts = self.promptBook[self.promptBook['tag'] == st.session_state.gallery_focus['tag']]['prompt'].unique() - st.session_state.gallery_focus['prompt'] = random.choice(prompts) - - def graph_mode(self, prompt_id, items): - graph_cols = st.columns([3, 1]) - - with graph_cols[0]: - st.caption( - 'Please **:red[click on and select]** as many images as you like! You will be able to compare them later in ranking stage.') - graph_space = st.empty() - - with graph_space.container(): - return_value = self.gallery_graph(items) - - with graph_cols[1]: - if return_value: - with st.form(key=f'{prompt_id}'): - image_url = f"https://modelcofferbucket.s3-accelerate.amazonaws.com/{return_value}.png" - - st.image(image_url) - - item = items[items['image_id'] == return_value].reset_index(drop=True).iloc[0] - modelVersion_id = item['modelVersion_id'] - - # handle selection - # get the latest record in database - cursor = GALLERY_CONN.cursor() - query = "SELECT * FROM gallery_clicks WHERE username = '{}' AND timestamp = '{}' AND prompt_id = '{}' AND modelVersion_id = {} ORDER BY clicktime DESC LIMIT 1".format( - st.session_state.user_id[0], st.session_state.user_id[1], prompt_id, modelVersion_id) - cursor.execute(query) - record = cursor.fetchone() - try: - image_status = record['status'] - except: - image_status = None - - print('image_status: ', image_status) - - if 'selected_dict' in st.session_state: - if item['prompt_id'] not in st.session_state.selected_dict: - st.session_state.selected_dict[item['prompt_id']] = [] - - # if 'last_clicked' not in st.session_state or item['image_id'] != st.session_state.last_clicked: - # print('last_clicked not in session state') - # self.image_selection_control(item['tag'], item['prompt'], item['prompt_id'], modelVersion_id, 'select') - # st.toast('Image selected.', icon='👍') - # - # st.session_state.last_clicked = item['image_id'] - - # if modelVersion_id in st.session_state.selected_dict[item['prompt_id']]: - # checked = True - # else: - # checked = False - - if image_status == 'report': - st.warning('You have reported this image') - unreport = st.form_submit_button('Withdraw report', use_container_width=True, type='secondary', on_click=self.image_selection_control, args=(item['tag'], item['prompt'], item['prompt_id'], item['modelVersion_id'], 'deselect')) - - else: - if image_status is None: - self.image_selection_control(item['tag'], item['prompt'], item['prompt_id'], - modelVersion_id, - 'select') - - if image_status == 'select' or image_status == 'reselect' or image_status is None: - # deselect = st.button('Deselect', key=f'select_{item["prompt_id"]}_{item["modelVersion_id"]}', use_container_width=True) - deselect = st.form_submit_button('Deselect', use_container_width=True, on_click=self.image_selection_control, args=(item['tag'], item['prompt'], item['prompt_id'], item['modelVersion_id'], 'deselect')) - - - elif image_status =='deselect': - # select = st.button('Select', key=f'select_{item["prompt_id"]}_{item["modelVersion_id"]}', use_container_width=True, type='primary') - reselect = st.form_submit_button('Reselect', use_container_width=True, type='primary', on_click=self.image_selection_control, args=(item['tag'], item['prompt'], item['prompt_id'], item['modelVersion_id'], 'reselect')) - - report = st.form_submit_button('⚠️Report', use_container_width=True, type='secondary', - on_click=self.image_selection_control, args=( - item['tag'], item['prompt'], item['prompt_id'], item['modelVersion_id'], 'report'), - help='Report this image if it contains offensive, depressing, or sexual content.') - - if image_status == 'select' or image_status == 'reselect' or image_status is None: - st.info( - "Image selected. **Click 'Check out selections ➡️' on top to see all selected images**.") - - # st.write(item) - # infos = ['model_name', 'modelVersion_name', 'model_download_count', 'clip_score', 'mcos_score', - # 'nsfw_score'] - # - # infos_df = item[infos] - # # rename columns - # infos_df = infos_df.rename(index={'model_name': 'Model', 'modelVersion_name': 'Version', 'model_download_count': 'Downloads', 'clip_score': 'Clip Score', 'mcos_score': 'mcos Score', 'nsfw_score': 'NSFW Score'}) - # st.table(infos_df) - - else: - st.info('You can click on and select an image.') - - def image_selection_control(self, tag, prompt, prompt_id, modelVersion_id, operation:['select', 'reselect', 'deselect','report']): - # self.remove_ranking_states(prompt_id) - - if operation == 'select' or operation == 'reselect': - st.session_state.selected_dict[prompt_id].append(modelVersion_id) - # add focus to session state - st.session_state.gallery_focus['tag'] = tag - st.session_state.gallery_focus['prompt'] = prompt - - elif operation == 'deselect': - if modelVersion_id in st.session_state.selected_dict[prompt_id]: - st.session_state.selected_dict[prompt_id].remove(modelVersion_id) - elif operation == 'report': - pass - - cursor = GALLERY_CONN.cursor() - clicktime = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") - query = "INSERT INTO gallery_clicks (username, timestamp, tag, prompt_id, modelVersion_id, clicktime, status) VALUES ('{}', '{}', '{}', '{}', {}, '{}', '{}')".format( - st.session_state.user_id[0], st.session_state.user_id[1], tag, prompt_id, modelVersion_id, clicktime, - operation) - - cursor.execute(query) - GALLERY_CONN.commit() - cursor.close() - - def checkout_mode(self, tag, items): - # st.write(items) - if len(items) > 0: - prompt_ids = items['prompt_id'].unique() - for i in range(len(prompt_ids)): - prompt_id = prompt_ids[i] - prompt = items[items['prompt_id'] == prompt_id]['prompt'].unique()[0] - # default_expand = True if st.session_state.gallery_focus['prompt'] == prompt else False - if tag == st.session_state.gallery_focus['tag'] and prompt == st.session_state.gallery_focus['prompt']: - default_expand = True - elif tag != st.session_state.gallery_focus['tag'] and i==0: - default_expand = True - else: - default_expand = False - - with st.expander(f'**{prompt}**', expanded=default_expand): - # st.caption('select info to show') - checkout_panel = st.columns([5, 3]) - with checkout_panel[0]: - info = st.multiselect('Show Info', - ['model_name', 'model_id', 'modelVersion_name', 'modelVersion_id', - 'total_score', 'model_download_count', 'clip_score', 'mcos_score', - 'norm_nsfw'], - label_visibility='collapsed', key=f'info_{prompt_id}', placeholder='Select what info to show') - - with checkout_panel[-1]: - checkout_buttons = st.columns([1, 1, 1]) - with checkout_buttons[0]: - back = st.button('Back to 🖼️', key=f'checkout_back_{prompt_id}', use_container_width=True) - if back: - st.session_state.gallery_focus['tag'] = tag - st.session_state.gallery_focus['prompt'] = prompt - print(st.session_state.gallery_focus) - st.session_state.gallery_state = 'graph' - st.rerun() - - with checkout_buttons[1]: - # init edit state - if 'edit_state' not in st.session_state: - st.session_state.edit_state = False - - if not st.session_state.edit_state: - edit = st.button('Edit', key=f'checkout_edit_{prompt_id}', use_container_width=True) - if edit: - st.session_state.edit_state = True - st.rerun() - else: - done = st.button('Done', key=f'checkout_done_{prompt_id}', use_container_width=True) - if done: - st.session_state.selected_dict[prompt_id] = [] - for key in st.session_state: - - # update selected_dict with edited selection - keys = key.split('_') - if keys[0] == 'select' and keys[1] == str(prompt_id): - if st.session_state[key]: - st.session_state.selected_dict[prompt_id].append(int(keys[2])) - self.image_selection_control(tag, prompt, prompt_id, int(keys[2]), 'select') # update database - st.session_state.edit_state = False - st.rerun() - - with checkout_buttons[-1]: - proceed = st.button('Proceed ➡️', key=f'checkout_proceed_{prompt_id}', use_container_width=True, - type='primary', disabled=st.session_state.edit_state) - if proceed: - self.remove_ranking_states(prompt_id) - st.session_state.gallery_focus['tag'] = tag - st.session_state.gallery_focus['prompt'] = prompt - st.session_state.gallery_state = 'graph' - - print('selected_dict: ', st.session_state.selected_dict) - - # # save the user selection to database - # cursor = GALLERY_CONN.cursor() - # st.session_state.epoch['gallery'] += 1 - # checkouttime = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") - # # for modelVersion_id in st.session_state.selected_dict[prompt_id]: - # for key, values in st.session_state.selected_dict.items(): - # # print('key: ', key, 'values: ', values) - # key_tag = self.promptBook[self.promptBook['prompt_id'] == key]['tag'].unique()[0] - # for value in values: - # query = "INSERT INTO gallery_selections (username, timestamp, tag, prompt_id, modelVersion_id, checkouttime, epoch) VALUES ('{}', '{}', '{}', '{}', {}, '{}', {})".format(st.session_state.user_id[0], st.session_state.user_id[1], key_tag, key, value, checkouttime, st.session_state.epoch['gallery']) - # print(query) - # cursor.execute(query) - # GALLERY_CONN.commit() - # cursor.close() - - # get the largest epoch number of this user and prompt - cursor = GALLERY_CONN.cursor() - db_table = 'battle_results' if st.session_state.assigned_rank_mode=='Battle' else 'sort_results' - query = "SELECT MAX(epoch) FROM {} WHERE username = '{}' AND timestamp = '{}' AND prompt_id = {}".format(db_table, st.session_state.user_id[0], st.session_state.user_id[1], prompt_id) - cursor.execute(query) - max_epoch = cursor.fetchone()['MAX(epoch)'], - # print('max epoch: ', max_epoch, type(max_epoch)) - cursor.close() - - try: - st.session_state.epoch['ranking'][prompt_id] = max_epoch[0] + 1 - except TypeError: - st.session_state.epoch['ranking'][prompt_id] = 1 - # st.session_state.epoch['summary'][tag] = st.session_state.epoch['summary'].get(tag, 0) + 1 - # st.session_state.epoch['summary']['overall'] += 1 - print('epoch: ', st.session_state.epoch) - switch_page('ranking') - - self.gallery_standard(items[items['prompt_id'] == prompt_id].reset_index(drop=True), 4, info, show_checkbox=st.session_state.edit_state) - else: - # with st.form(key=f'checkout_{tag}'): - st.info('No selection under this tag') - back = st.button('🖼️ Back to gallery and select something you like', key=f'checkout_{tag}', type='primary') - if back: - st.session_state.gallery_focus['tag'] = tag - st.session_state.gallery_focus['prompt'] = None - st.session_state.gallery_state = 'graph' - st.rerun() - - def remove_ranking_states(self, prompt_id): - # for drag sort - try: - st.session_state.counter[prompt_id] = 0 - st.session_state.ranking[prompt_id] = {} - print('remove ranking states') - except: - print('no sort ranking states to remove') - - # for battles - try: - st.session_state.pointer[prompt_id] = {'left': 0, 'right': 1} - print('remove battles states') - except: - print('no battles states to remove') - - # for page progress - try: - st.session_state.progress[prompt_id] = 'ranking' - print('reset page progress states') - except: - print('no page progress states to be reset') - -@st.cache_data -def load_hf_dataset(show_NSFW=False): - # login to huggingface - login(token=os.environ.get("HF_TOKEN")) - - # load from huggingface - roster = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Roster', split='train')) - promptBook = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Metadata', split='train')) - # images_ds = load_from_disk(os.path.join(os.getcwd(), 'data', 'promptbook')) - images_ds = None # set to None for now since we use s3 bucket to store images - - # # process dataset - # roster = roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name', - # 'model_download_count']].drop_duplicates().reset_index(drop=True) - - # add 'custom_score_weights' column to promptBook if not exist - if 'weighted_score_sum' not in promptBook.columns: - promptBook.loc[:, 'weighted_score_sum'] = 0 - - # merge roster and promptbook - promptBook = promptBook.merge(roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name', 'model_download_count']], - on=['model_id', 'modelVersion_id'], how='left') - - # add column to record current row index - promptBook.loc[:, 'row_idx'] = promptBook.index - - # apply curation filter - prompt_to_hide = json.load(open('./data/curation.json', 'r')) - prompt_to_hide = list(itertools.chain.from_iterable(prompt_to_hide.values())) - print('prompt to hide: ', prompt_to_hide) - promptBook = promptBook[~promptBook['prompt_id'].isin(prompt_to_hide)].reset_index(drop=True) - - # apply a nsfw filter - if not show_NSFW: - promptBook = promptBook[promptBook['norm_nsfw'] <= 0.8].reset_index(drop=True) - print('nsfw filter applied', len(promptBook)) - - # add a column that adds up 'norm_clip', 'norm_mcos', and 'norm_pop' - score_weights = [1.0, 0.8, 0.2] - promptBook.loc[:, 'total_score'] = round(promptBook['norm_clip'] * score_weights[0] + promptBook['norm_mcos'] * score_weights[1] + promptBook['norm_pop'] * score_weights[2], 4) - - return roster, promptBook, images_ds - -@st.cache_data -def load_tsne_coordinates(items): - # load tsne coordinates - tsne_df = pd.read_parquet('./data/feats_tsne.parquet') - - items = items.merge(tsne_df, on=['modelVersion_id', 'prompt_id'], how='left') - return items - - -if __name__ == "__main__": - st.set_page_config(page_title="Model Coffer Gallery", page_icon="🖼️", layout="wide") - - if 'user_id' not in st.session_state: - st.warning('Please log in first.') - home_btn = st.button('Go to Home Page') - if home_btn: - switch_page("home") - else: - GALLERY_CONN = connect_to_db() - roster, promptBook, images_ds = load_hf_dataset(st.session_state.show_NSFW) - - app = GalleryApp(promptBook=promptBook, images_ds=images_ds) - app.app() - - with open('./css/style.css') as f: - st.markdown(f'', unsafe_allow_html=True) - diff --git a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/__MACOSX/smpl/smpl_webuser/._posemapper.py b/spaces/Marshalls/testmtd/analysis/aistplusplus_api/__MACOSX/smpl/smpl_webuser/._posemapper.py deleted file mode 100644 index f4a067064fdd6a84bd9f7a6042579d744c501927..0000000000000000000000000000000000000000 Binary files a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/__MACOSX/smpl/smpl_webuser/._posemapper.py and /dev/null differ diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/transformer.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/transformer.py deleted file mode 100644 index e61ae0dd941a7be00b3e41a3de833ec50470a45f..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/bricks/transformer.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -import torch -import torch.nn as nn - -from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning -from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer -from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential -from annotator.uniformer.mmcv.utils import build_from_cfg -from .drop import build_dropout -from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, - TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) - -# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file -try: - from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401 - warnings.warn( - ImportWarning( - '``MultiScaleDeformableAttention`` has been moved to ' - '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501 - '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501 - 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501 - )) - -except ImportError: - warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from ' - '``mmcv.ops.multi_scale_deform_attn``, ' - 'You should install ``mmcv-full`` if you need this module. ') - - -def build_positional_encoding(cfg, default_args=None): - """Builder for Position Encoding.""" - return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) - - -def build_attention(cfg, default_args=None): - """Builder for attention.""" - return build_from_cfg(cfg, ATTENTION, default_args) - - -def build_feedforward_network(cfg, default_args=None): - """Builder for feed-forward network (FFN).""" - return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args) - - -def build_transformer_layer(cfg, default_args=None): - """Builder for transformer layer.""" - return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args) - - -def build_transformer_layer_sequence(cfg, default_args=None): - """Builder for transformer encoder and transformer decoder.""" - return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) - - -@ATTENTION.register_module() -class MultiheadAttention(BaseModule): - """A wrapper for ``torch.nn.MultiheadAttention``. - - This module implements MultiheadAttention with identity connection, - and positional encoding is also passed as input. - - Args: - embed_dims (int): The embedding dimension. - num_heads (int): Parallel attention heads. - attn_drop (float): A Dropout layer on attn_output_weights. - Default: 0.0. - proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. - Default: 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - batch_first (bool): When it is True, Key, Query and Value are shape of - (batch, n, embed_dim), otherwise (n, batch, embed_dim). - Default to False. - """ - - def __init__(self, - embed_dims, - num_heads, - attn_drop=0., - proj_drop=0., - dropout_layer=dict(type='Dropout', drop_prob=0.), - init_cfg=None, - batch_first=False, - **kwargs): - super(MultiheadAttention, self).__init__(init_cfg) - if 'dropout' in kwargs: - warnings.warn('The arguments `dropout` in MultiheadAttention ' - 'has been deprecated, now you can separately ' - 'set `attn_drop`(float), proj_drop(float), ' - 'and `dropout_layer`(dict) ') - attn_drop = kwargs['dropout'] - dropout_layer['drop_prob'] = kwargs.pop('dropout') - - self.embed_dims = embed_dims - self.num_heads = num_heads - self.batch_first = batch_first - - self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, - **kwargs) - - self.proj_drop = nn.Dropout(proj_drop) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else nn.Identity() - - @deprecated_api_warning({'residual': 'identity'}, - cls_name='MultiheadAttention') - def forward(self, - query, - key=None, - value=None, - identity=None, - query_pos=None, - key_pos=None, - attn_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `MultiheadAttention`. - - **kwargs allow passing a more general data flow when combining - with other operations in `transformerlayer`. - - Args: - query (Tensor): The input query with shape [num_queries, bs, - embed_dims] if self.batch_first is False, else - [bs, num_queries embed_dims]. - key (Tensor): The key tensor with shape [num_keys, bs, - embed_dims] if self.batch_first is False, else - [bs, num_keys, embed_dims] . - If None, the ``query`` will be used. Defaults to None. - value (Tensor): The value tensor with same shape as `key`. - Same in `nn.MultiheadAttention.forward`. Defaults to None. - If None, the `key` will be used. - identity (Tensor): This tensor, with the same shape as x, - will be used for the identity link. - If None, `x` will be used. Defaults to None. - query_pos (Tensor): The positional encoding for query, with - the same shape as `x`. If not None, it will - be added to `x` before forward function. Defaults to None. - key_pos (Tensor): The positional encoding for `key`, with the - same shape as `key`. Defaults to None. If not None, it will - be added to `key` before forward function. If None, and - `query_pos` has the same shape as `key`, then `query_pos` - will be used for `key_pos`. Defaults to None. - attn_mask (Tensor): ByteTensor mask with shape [num_queries, - num_keys]. Same in `nn.MultiheadAttention.forward`. - Defaults to None. - key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. - Defaults to None. - - Returns: - Tensor: forwarded results with shape - [num_queries, bs, embed_dims] - if self.batch_first is False, else - [bs, num_queries embed_dims]. - """ - - if key is None: - key = query - if value is None: - value = key - if identity is None: - identity = query - if key_pos is None: - if query_pos is not None: - # use query_pos if key_pos is not available - if query_pos.shape == key.shape: - key_pos = query_pos - else: - warnings.warn(f'position encoding of key is' - f'missing in {self.__class__.__name__}.') - if query_pos is not None: - query = query + query_pos - if key_pos is not None: - key = key + key_pos - - # Because the dataflow('key', 'query', 'value') of - # ``torch.nn.MultiheadAttention`` is (num_query, batch, - # embed_dims), We should adjust the shape of dataflow from - # batch_first (batch, num_query, embed_dims) to num_query_first - # (num_query ,batch, embed_dims), and recover ``attn_output`` - # from num_query_first to batch_first. - if self.batch_first: - query = query.transpose(0, 1) - key = key.transpose(0, 1) - value = value.transpose(0, 1) - - out = self.attn( - query=query, - key=key, - value=value, - attn_mask=attn_mask, - key_padding_mask=key_padding_mask)[0] - - if self.batch_first: - out = out.transpose(0, 1) - - return identity + self.dropout_layer(self.proj_drop(out)) - - -@FEEDFORWARD_NETWORK.register_module() -class FFN(BaseModule): - """Implements feed-forward networks (FFNs) with identity connection. - - Args: - embed_dims (int): The feature dimension. Same as - `MultiheadAttention`. Defaults: 256. - feedforward_channels (int): The hidden dimension of FFNs. - Defaults: 1024. - num_fcs (int, optional): The number of fully-connected layers in - FFNs. Default: 2. - act_cfg (dict, optional): The activation config for FFNs. - Default: dict(type='ReLU') - ffn_drop (float, optional): Probability of an element to be - zeroed in FFN. Default 0.0. - add_identity (bool, optional): Whether to add the - identity connection. Default: `True`. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - @deprecated_api_warning( - { - 'dropout': 'ffn_drop', - 'add_residual': 'add_identity' - }, - cls_name='FFN') - def __init__(self, - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - act_cfg=dict(type='ReLU', inplace=True), - ffn_drop=0., - dropout_layer=None, - add_identity=True, - init_cfg=None, - **kwargs): - super(FFN, self).__init__(init_cfg) - assert num_fcs >= 2, 'num_fcs should be no less ' \ - f'than 2. got {num_fcs}.' - self.embed_dims = embed_dims - self.feedforward_channels = feedforward_channels - self.num_fcs = num_fcs - self.act_cfg = act_cfg - self.activate = build_activation_layer(act_cfg) - - layers = [] - in_channels = embed_dims - for _ in range(num_fcs - 1): - layers.append( - Sequential( - Linear(in_channels, feedforward_channels), self.activate, - nn.Dropout(ffn_drop))) - in_channels = feedforward_channels - layers.append(Linear(feedforward_channels, embed_dims)) - layers.append(nn.Dropout(ffn_drop)) - self.layers = Sequential(*layers) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else torch.nn.Identity() - self.add_identity = add_identity - - @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') - def forward(self, x, identity=None): - """Forward function for `FFN`. - - The function would add x to the output tensor if residue is None. - """ - out = self.layers(x) - if not self.add_identity: - return self.dropout_layer(out) - if identity is None: - identity = x - return identity + self.dropout_layer(out) - - -@TRANSFORMER_LAYER.register_module() -class BaseTransformerLayer(BaseModule): - """Base `TransformerLayer` for vision transformer. - - It can be built from `mmcv.ConfigDict` and support more flexible - customization, for example, using any number of `FFN or LN ` and - use different kinds of `attention` by specifying a list of `ConfigDict` - named `attn_cfgs`. It is worth mentioning that it supports `prenorm` - when you specifying `norm` as the first element of `operation_order`. - More details about the `prenorm`: `On Layer Normalization in the - Transformer Architecture `_ . - - Args: - attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): - Configs for `self_attention` or `cross_attention` modules, - The order of the configs in the list should be consistent with - corresponding attentions in operation_order. - If it is a dict, all of the attention modules in operation_order - will be built with this config. Default: None. - ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): - Configs for FFN, The order of the configs in the list should be - consistent with corresponding ffn in operation_order. - If it is a dict, all of the attention modules in operation_order - will be built with this config. - operation_order (tuple[str]): The execution order of operation - in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). - Support `prenorm` when you specifying first element as `norm`. - Default:None. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - batch_first (bool): Key, Query and Value are shape - of (batch, n, embed_dim) - or (n, batch, embed_dim). Default to False. - """ - - def __init__(self, - attn_cfgs=None, - ffn_cfgs=dict( - type='FFN', - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - ffn_drop=0., - act_cfg=dict(type='ReLU', inplace=True), - ), - operation_order=None, - norm_cfg=dict(type='LN'), - init_cfg=None, - batch_first=False, - **kwargs): - - deprecated_args = dict( - feedforward_channels='feedforward_channels', - ffn_dropout='ffn_drop', - ffn_num_fcs='num_fcs') - for ori_name, new_name in deprecated_args.items(): - if ori_name in kwargs: - warnings.warn( - f'The arguments `{ori_name}` in BaseTransformerLayer ' - f'has been deprecated, now you should set `{new_name}` ' - f'and other FFN related arguments ' - f'to a dict named `ffn_cfgs`. ') - ffn_cfgs[new_name] = kwargs[ori_name] - - super(BaseTransformerLayer, self).__init__(init_cfg) - - self.batch_first = batch_first - - assert set(operation_order) & set( - ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ - set(operation_order), f'The operation_order of' \ - f' {self.__class__.__name__} should ' \ - f'contains all four operation type ' \ - f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" - - num_attn = operation_order.count('self_attn') + operation_order.count( - 'cross_attn') - if isinstance(attn_cfgs, dict): - attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] - else: - assert num_attn == len(attn_cfgs), f'The length ' \ - f'of attn_cfg {num_attn} is ' \ - f'not consistent with the number of attention' \ - f'in operation_order {operation_order}.' - - self.num_attn = num_attn - self.operation_order = operation_order - self.norm_cfg = norm_cfg - self.pre_norm = operation_order[0] == 'norm' - self.attentions = ModuleList() - - index = 0 - for operation_name in operation_order: - if operation_name in ['self_attn', 'cross_attn']: - if 'batch_first' in attn_cfgs[index]: - assert self.batch_first == attn_cfgs[index]['batch_first'] - else: - attn_cfgs[index]['batch_first'] = self.batch_first - attention = build_attention(attn_cfgs[index]) - # Some custom attentions used as `self_attn` - # or `cross_attn` can have different behavior. - attention.operation_name = operation_name - self.attentions.append(attention) - index += 1 - - self.embed_dims = self.attentions[0].embed_dims - - self.ffns = ModuleList() - num_ffns = operation_order.count('ffn') - if isinstance(ffn_cfgs, dict): - ffn_cfgs = ConfigDict(ffn_cfgs) - if isinstance(ffn_cfgs, dict): - ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] - assert len(ffn_cfgs) == num_ffns - for ffn_index in range(num_ffns): - if 'embed_dims' not in ffn_cfgs[ffn_index]: - ffn_cfgs['embed_dims'] = self.embed_dims - else: - assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims - self.ffns.append( - build_feedforward_network(ffn_cfgs[ffn_index], - dict(type='FFN'))) - - self.norms = ModuleList() - num_norms = operation_order.count('norm') - for _ in range(num_norms): - self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) - - def forward(self, - query, - key=None, - value=None, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `TransformerDecoderLayer`. - - **kwargs contains some specific arguments of attentions. - - Args: - query (Tensor): The input query with shape - [num_queries, bs, embed_dims] if - self.batch_first is False, else - [bs, num_queries embed_dims]. - key (Tensor): The key tensor with shape [num_keys, bs, - embed_dims] if self.batch_first is False, else - [bs, num_keys, embed_dims] . - value (Tensor): The value tensor with same shape as `key`. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. - Default: None. - attn_masks (List[Tensor] | None): 2D Tensor used in - calculation of corresponding attention. The length of - it should equal to the number of `attention` in - `operation_order`. Default: None. - query_key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_queries]. Only used in `self_attn` layer. - Defaults to None. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_keys]. Default: None. - - Returns: - Tensor: forwarded results with shape [num_queries, bs, embed_dims]. - """ - - norm_index = 0 - attn_index = 0 - ffn_index = 0 - identity = query - if attn_masks is None: - attn_masks = [None for _ in range(self.num_attn)] - elif isinstance(attn_masks, torch.Tensor): - attn_masks = [ - copy.deepcopy(attn_masks) for _ in range(self.num_attn) - ] - warnings.warn(f'Use same attn_mask in all attentions in ' - f'{self.__class__.__name__} ') - else: - assert len(attn_masks) == self.num_attn, f'The length of ' \ - f'attn_masks {len(attn_masks)} must be equal ' \ - f'to the number of attention in ' \ - f'operation_order {self.num_attn}' - - for layer in self.operation_order: - if layer == 'self_attn': - temp_key = temp_value = query - query = self.attentions[attn_index]( - query, - temp_key, - temp_value, - identity if self.pre_norm else None, - query_pos=query_pos, - key_pos=query_pos, - attn_mask=attn_masks[attn_index], - key_padding_mask=query_key_padding_mask, - **kwargs) - attn_index += 1 - identity = query - - elif layer == 'norm': - query = self.norms[norm_index](query) - norm_index += 1 - - elif layer == 'cross_attn': - query = self.attentions[attn_index]( - query, - key, - value, - identity if self.pre_norm else None, - query_pos=query_pos, - key_pos=key_pos, - attn_mask=attn_masks[attn_index], - key_padding_mask=key_padding_mask, - **kwargs) - attn_index += 1 - identity = query - - elif layer == 'ffn': - query = self.ffns[ffn_index]( - query, identity if self.pre_norm else None) - ffn_index += 1 - - return query - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class TransformerLayerSequence(BaseModule): - """Base class for TransformerEncoder and TransformerDecoder in vision - transformer. - - As base-class of Encoder and Decoder in vision transformer. - Support customization such as specifying different kind - of `transformer_layer` in `transformer_coder`. - - Args: - transformerlayer (list[obj:`mmcv.ConfigDict`] | - obj:`mmcv.ConfigDict`): Config of transformerlayer - in TransformerCoder. If it is obj:`mmcv.ConfigDict`, - it would be repeated `num_layer` times to a - list[`mmcv.ConfigDict`]. Default: None. - num_layers (int): The number of `TransformerLayer`. Default: None. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): - super(TransformerLayerSequence, self).__init__(init_cfg) - if isinstance(transformerlayers, dict): - transformerlayers = [ - copy.deepcopy(transformerlayers) for _ in range(num_layers) - ] - else: - assert isinstance(transformerlayers, list) and \ - len(transformerlayers) == num_layers - self.num_layers = num_layers - self.layers = ModuleList() - for i in range(num_layers): - self.layers.append(build_transformer_layer(transformerlayers[i])) - self.embed_dims = self.layers[0].embed_dims - self.pre_norm = self.layers[0].pre_norm - - def forward(self, - query, - key, - value, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `TransformerCoder`. - - Args: - query (Tensor): Input query with shape - `(num_queries, bs, embed_dims)`. - key (Tensor): The key tensor with shape - `(num_keys, bs, embed_dims)`. - value (Tensor): The value tensor with shape - `(num_keys, bs, embed_dims)`. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. - Default: None. - attn_masks (List[Tensor], optional): Each element is 2D Tensor - which is used in calculation of corresponding attention in - operation_order. Default: None. - query_key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_queries]. Only used in self-attention - Default: None. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_keys]. Default: None. - - Returns: - Tensor: results with shape [num_queries, bs, embed_dims]. - """ - for layer in self.layers: - query = layer( - query, - key, - value, - query_pos=query_pos, - key_pos=key_pos, - attn_masks=attn_masks, - query_key_padding_mask=query_key_padding_mask, - key_padding_mask=key_padding_mask, - **kwargs) - return query diff --git a/spaces/Melyoooo/test/Dockerfile b/spaces/Melyoooo/test/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Melyoooo/test/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/README.md b/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/README.md deleted file mode 100644 index 424df09813e543e821bbd973e3b8c25f67a62793..0000000000000000000000000000000000000000 --- a/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 1 -emoji: 🚀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool_grad.py b/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool_grad.py deleted file mode 100644 index b75af49c08e2e724839828bc419792ed580809bb..0000000000000000000000000000000000000000 --- a/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool_grad.py +++ /dev/null @@ -1,160 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path -import io -import librosa -import maad -import numpy as np -from inference import slicer -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class VitsSvc(object): - def __init__(self): - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.SVCVITS = None - self.hps = None - self.speakers = None - self.hubert_soft = utils.get_hubert_model() - - def set_device(self, device): - self.device = torch.device(device) - self.hubert_soft.to(self.device) - if self.SVCVITS != None: - self.SVCVITS.to(self.device) - - def loadCheckpoint(self, path): - self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - self.SVCVITS = SynthesizerTrn( - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None) - _ = self.SVCVITS.eval().to(self.device) - self.speakers = self.hps.spk - - def get_units(self, source, sr): - source = source.unsqueeze(0).to(self.device) - with torch.inference_mode(): - units = self.hubert_soft.units(source) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - speaker_id = self.speakers[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device) - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.device) - x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2) - audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - return audio, audio.shape[-1] - - def inference(self,srcaudio,chara,tran,slice_db): - sampling_rate, audio = srcaudio - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - soundfile.write("tmpwav.wav", audio, 16000, format="wav") - chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks) - audio = [] - for (slice_tag, data) in audio_data: - length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - _audio = np.zeros(length) - else: - out_audio, out_sr = self.infer(chara, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - audio = (np.array(audio) * 32768.0).astype('int16') - return (self.hps.data.sampling_rate,audio) diff --git a/spaces/MoonQiu/LongerCrafter/lvdm/modules/x_transformer.py b/spaces/MoonQiu/LongerCrafter/lvdm/modules/x_transformer.py deleted file mode 100644 index f252ab4032a78407ed487495807940c4ba802ffa..0000000000000000000000000000000000000000 --- a/spaces/MoonQiu/LongerCrafter/lvdm/modules/x_transformer.py +++ /dev/null @@ -1,640 +0,0 @@ -"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -from functools import partial -from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat -import torch -from torch import nn, einsum -import torch.nn.functional as F - -# constants -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates' -]) - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.emb = nn.Embedding(max_seq_len, dim) - self.init_() - - def init_(self): - nn.init.normal_(self.emb.weight, std=0.02) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - return self.emb(n)[None, :, :] - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return emb[None, :, :] - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def always(val): - def inner(*args, **kwargs): - return val - return inner - - -def not_equals(val): - def inner(x): - return x != val - return inner - - -def equals(val): - def inner(x): - return x == val - return inner - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# classes -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.value, *rest) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.g, *rest) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class Residual(nn.Module): - def forward(self, x, residual): - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - - def forward(self, x, residual): - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# feedforward - -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -# attention. -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - mask=None, - talking_heads=False, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False - ): - super().__init__() - if use_entmax15: - raise NotImplementedError("Check out entmax activation instead of softmax activation!") - self.scale = dim_head ** -0.5 - self.heads = heads - self.causal = causal - self.mask = mask - - inner_dim = dim_head * heads - - self.to_q = nn.Linear(dim, inner_dim, bias=False) - self.to_k = nn.Linear(dim, inner_dim, bias=False) - self.to_v = nn.Linear(dim, inner_dim, bias=False) - self.dropout = nn.Dropout(dropout) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - #self.attn_fn = entmax15 if use_entmax15 else F.softmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - rel_pos=None, - sinusoidal_emb=None, - prev_attn=None, - mem=None - ): - b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if exists(rel_pos): - dots = rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rmsnorm=False, - use_rezero=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - position_infused_attn=False, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - - self.has_pos_emb = position_infused_attn - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - self.rotary_pos_emb = always(None) - - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = None - - self.pre_norm = pre_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if isinstance(layer, Attention) and exists(branch_fn): - layer = branch_fn(layer) - - if gate_residual: - residual_fn = GRUGating(dim) - else: - residual_fn = Residual() - - self.layers.append(nn.ModuleList([ - norm_fn(), - layer, - residual_fn - ])) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - mems=None, - return_hiddens=False - ): - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - is_last = ind == (len(self.layers) - 1) - - if layer_type == 'a': - hiddens.append(x) - layer_mem = mems.pop(0) - - residual = x - - if self.pre_norm: - x = norm(x) - - if layer_type == 'a': - out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, - prev_attn=prev_attn, mem=layer_mem) - elif layer_type == 'c': - out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) - elif layer_type == 'f': - out = block(x) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if not self.pre_norm and not is_last: - x = norm(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.num_tokens = num_tokens - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): - attn_layers.num_memory_tokens = num_memory_tokens - - def init_(self): - nn.init.normal_(self.token_emb.weight, std=0.02) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_mems=False, - return_attn=False, - mems=None, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x += self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_mems: - hiddens = intermediates.hiddens - new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens - new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) - return out, new_mems - - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - return out, attn_maps - - return out - diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/gatherers/pair_gatherer.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/gatherers/pair_gatherer.py deleted file mode 100644 index 63c11e0c121a6608a7a39769f8a9f09bdf3ba076..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/gatherers/pair_gatherer.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import re -from typing import List, Optional, Tuple - -from mmocr.registry import DATA_GATHERERS -from mmocr.utils import list_files -from .base import BaseGatherer - - -@DATA_GATHERERS.register_module() -class PairGatherer(BaseGatherer): - """Gather the dataset files. Specifically for the paired annotations. That - is to say, each image has a corresponding annotation file. For example, - - img_1.jpg <---> gt_img_1.txt - img_2.jpg <---> gt_img_2.txt - img_3.jpg <---> gt_img_3.txt - - Args: - img_suffixes (List[str]): File suffixes that used for searching. - rule (Sequence): The rule for pairing the files. The first element is - the matching pattern for the file, and the second element is the - replacement pattern, which should be a regular expression. For - example, to map the image name img_1.jpg to the annotation name - gt_img_1.txt, the rule is - [r'img_(\d+)\.([jJ][pP][gG])', r'gt_img_\1.txt'] # noqa: W605 E501 - - Note: PairGatherer assumes that each split annotation file is in the - correspond split directory. For example, all the train annotation files are - in {ann_dir}/train. - """ - - def __init__(self, - img_suffixes: Optional[List[str]] = None, - rule: Optional[List[str]] = None, - **kwargs) -> None: - super().__init__(**kwargs) - self.rule = rule - self.img_suffixes = img_suffixes - # ann_dir = {ann_root}/{ann_dir}/{split} - self.ann_dir = osp.join(self.ann_dir, self.split) - - def __call__(self) -> Tuple[List[str], List[str]]: - """tuple(list, list): The list of image paths and the list of - annotation paths.""" - - img_list = list() - ann_list = list() - for img_path in list_files(self.img_dir, self.img_suffixes): - if not re.match(self.rule[0], osp.basename(img_path)): - continue - ann_name = re.sub(self.rule[0], self.rule[1], - osp.basename(img_path)) - ann_path = osp.join(self.ann_dir, ann_name) - img_list.append(img_path) - ann_list.append(ann_path) - - return img_list, ann_list diff --git a/spaces/MrVicente/RA-BART/custom_bart/encoder.py b/spaces/MrVicente/RA-BART/custom_bart/encoder.py deleted file mode 100644 index 409da4c6d57316d056dff33da3f76b81a94508d7..0000000000000000000000000000000000000000 --- a/spaces/MrVicente/RA-BART/custom_bart/encoder.py +++ /dev/null @@ -1,216 +0,0 @@ -############################# -# Imports -############################# - -# Python modules -from typing import ( - Optional, - Tuple, - Union, -) -import math -import random - -# Remote modules -import torch -from torch import nn -from transformers import ( - BartConfig, - BartPretrainedModel, -) -from transformers.modeling_outputs import BaseModelOutput -from transformers.models.bart.modeling_bart import ( - BartLearnedPositionalEmbedding, - _expand_mask -) - -# Local modules -from .config import BartCustomConfig -from .encoder_layer import BartCustomEncoderLayer - - -class BartCustomEncoder(BartPretrainedModel): - """ - Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a - [`BartEncoderLayer`]. - - Args: - config: BartConfig - embed_tokens (nn.Embedding): output embedding - """ - - def __init__(self, config: BartCustomConfig, embed_tokens: Optional[nn.Embedding] = None): - super().__init__(config) - - self.dropout = config.dropout - self.layerdrop = config.encoder_layerdrop - - embed_dim = config.d_model - self.padding_idx = config.pad_token_id - self.max_source_positions = config.max_position_embeddings - self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 - - if embed_tokens is not None: - self.embed_tokens = embed_tokens - else: - self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) - - if not config.should_embed_positions: - self.embed_positions = None - else: - self.embed_positions = BartLearnedPositionalEmbedding( - config.max_position_embeddings, - embed_dim, - ) - device = self.device - self.layers = nn.ModuleList([BartCustomEncoderLayer(config, heads_mask=torch.Tensor(config.heads_mask[i]).to(device)) - for i in range(config.encoder_layers)]) - self.layernorm_embedding = nn.LayerNorm(embed_dim) - - self.gradient_checkpointing = False - # Initialize weights and apply final processing - self.post_init() - self.run_config = config - - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - relation_inputs: Optional[torch.Tensor] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - - # Important for datasets which the order of words deoes not matter(eg: commongen) - if self.run_config.should_embed_positions: - embed_pos = self.embed_positions(input_shape) - hidden_states = inputs_embeds + embed_pos - else: - hidden_states = inputs_embeds - - hidden_states = self.layernorm_embedding(hidden_states) - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) - - # expand attention_mask - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - if head_mask.size()[0] != (len(self.layers)): - raise ValueError( - f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." - ) - - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - dropout_probability = random.uniform(0, 1) - if self.training and (dropout_probability < self.layerdrop): # skip the layer - layer_outputs = (None, None) - else: - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions, relation_inputs=relation_inputs) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(encoder_layer), - hidden_states, - attention_mask, - (head_mask[idx] if head_mask is not None else None), - ) - else: - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - layer_head_mask=(head_mask[idx] if head_mask is not None else None), - output_attentions=output_attentions, - relation_inputs=relation_inputs, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) diff --git a/spaces/Munderstand/sd-img-variations/README.md b/spaces/Munderstand/sd-img-variations/README.md deleted file mode 100644 index c73c17dd86c5f3b19b69ca0e5ff09288717878a9..0000000000000000000000000000000000000000 --- a/spaces/Munderstand/sd-img-variations/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion Img Variations CPU -emoji: 🐓🐣🐣🐣🐣 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -duplicated_from: fffiloni/sd-img-variations ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NATSpeech/DiffSpeech/modules/vocoder/hifigan/mel_utils.py b/spaces/NATSpeech/DiffSpeech/modules/vocoder/hifigan/mel_utils.py deleted file mode 100644 index a75fce72db54812320bc60aedfdd378ccecb3374..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/modules/vocoder/hifigan/mel_utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import numpy as np -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, hparams, center=False, complex=False): - # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) - # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) - # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) - # fmax: 10000 # To be increased/reduced depending on data. - # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter - # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, - n_fft = hparams['fft_size'] - num_mels = hparams['audio_num_mel_bins'] - sampling_rate = hparams['audio_sample_rate'] - hop_size = hparams['hop_size'] - win_size = hparams['win_size'] - fmin = hparams['fmin'] - fmax = hparams['fmax'] - y = y.clamp(min=-1., max=1.) - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), [int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)], - mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - if not complex: - spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) - spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) - spec = spectral_normalize_torch(spec) - else: - B, C, T, _ = spec.shape - spec = spec.transpose(1, 2) # [B, T, n_fft, 2] - return spec diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/ops/__init__.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/ops/__init__.py deleted file mode 100644 index 931c2ef11db4a949e6c2e95bca44e36bac1241e9..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/ops/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/spaces/NCTCMumbai/NCTC/models/research/audioset/vggish/README.md b/spaces/NCTCMumbai/NCTC/models/research/audioset/vggish/README.md deleted file mode 100644 index 0be0ae86687b81f2074d9011f4c16b5402a138bf..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/audioset/vggish/README.md +++ /dev/null @@ -1,184 +0,0 @@ -# VGGish - -The initial AudioSet release included 128-dimensional embeddings of each -AudioSet segment produced from a VGG-like audio classification model that was -trained on a large YouTube dataset (a preliminary version of what later became -[YouTube-8M](https://research.google.com/youtube8m)). - -We provide a TensorFlow definition of this model, which we call __*VGGish*__, as -well as supporting code to extract input features for the model from audio -waveforms and to post-process the model embedding output into the same format as -the released embedding features. - -## Installation - -VGGish depends on the following Python packages: - -* [`numpy`](http://www.numpy.org/) -* [`resampy`](http://resampy.readthedocs.io/en/latest/) -* [`tensorflow`](http://www.tensorflow.org/) (currently, only TF v1.x) -* [`tf_slim`](https://github.com/google-research/tf-slim) -* [`six`](https://pythonhosted.org/six/) -* [`soundfile`](https://pysoundfile.readthedocs.io/) - -These are all easily installable via, e.g., `pip install numpy` (as in the -sample installation session below). - -Any reasonably recent version of these packages shold work. Note that we currently only support -TensorFlow v1.x due to a [`tf_slim` limitation](https://github.com/google-research/tf-slim/pull/1). -TensorFlow v1.15 (the latest version as of Jan 2020) has been tested to work. - -VGGish also requires downloading two data files: - -* [VGGish model checkpoint](https://storage.googleapis.com/audioset/vggish_model.ckpt), - in TensorFlow checkpoint format. -* [Embedding PCA parameters](https://storage.googleapis.com/audioset/vggish_pca_params.npz), - in NumPy compressed archive format. - -After downloading these files into the same directory as this README, the -installation can be tested by running `python vggish_smoke_test.py` which -runs a known signal through the model and checks the output. - -Here's a sample installation and test session: - -```shell -# You can optionally install and test VGGish within a Python virtualenv, which -# is useful for isolating changes from the rest of your system. For example, you -# may have an existing version of some packages that you do not want to upgrade, -# or you want to try Python 3 instead of Python 2. If you decide to use a -# virtualenv, you can create one by running -# $ virtualenv vggish # For Python 2 -# or -# $ python3 -m venv vggish # For Python 3 -# and then enter the virtual environment by running -# $ source vggish/bin/activate # Assuming you use bash -# Leave the virtual environment at the end of the session by running -# $ deactivate -# Within the virtual environment, do not use 'sudo'. - -# Upgrade pip first. Also make sure wheel is installed. -$ sudo python -m pip install --upgrade pip wheel - -# Install all dependences. -$ sudo pip install numpy resampy tensorflow==1.15 tf_slim six soundfile - -# Clone TensorFlow models repo into a 'models' directory. -$ git clone https://github.com/tensorflow/models.git -$ cd models/research/audioset/vggish -# Download data files into same directory as code. -$ curl -O https://storage.googleapis.com/audioset/vggish_model.ckpt -$ curl -O https://storage.googleapis.com/audioset/vggish_pca_params.npz - -# Installation ready, let's test it. -$ python vggish_smoke_test.py -# If we see "Looks Good To Me", then we're all set. -``` - -## Usage - -VGGish can be used in two ways: - -* *As a feature extractor*: VGGish converts audio input features into a - semantically meaningful, high-level 128-D embedding which can be fed as input - to a downstream classification model. The downstream model can be shallower - than usual because the VGGish embedding is more semantically compact than raw - audio features. - - So, for example, you could train a classifier for 10 of the AudioSet classes - by using the released embeddings as features. Then, you could use that - trained classifier with any arbitrary audio input by running the audio through - the audio feature extractor and VGGish model provided here, passing the - resulting embedding features as input to your trained model. - `vggish_inference_demo.py` shows how to produce VGGish embeddings from - arbitrary audio. - -* *As part of a larger model*: Here, we treat VGGish as a "warm start" for the - lower layers of a model that takes audio features as input and adds more - layers on top of the VGGish embedding. This can be used to fine-tune VGGish - (or parts thereof) if you have large datasets that might be very different - from the typical YouTube video clip. `vggish_train_demo.py` shows how to add - layers on top of VGGish and train the whole model. - -## About the Model - -The VGGish code layout is as follows: - -* `vggish_slim.py`: Model definition in TensorFlow Slim notation. -* `vggish_params.py`: Hyperparameters. -* `vggish_input.py`: Converter from audio waveform into input examples. -* `mel_features.py`: Audio feature extraction helpers. -* `vggish_postprocess.py`: Embedding postprocessing. -* `vggish_inference_demo.py`: Demo of VGGish in inference mode. -* `vggish_train_demo.py`: Demo of VGGish in training mode. -* `vggish_smoke_test.py`: Simple test of a VGGish installation - -### Architecture - -See `vggish_slim.py` and `vggish_params.py`. - -VGGish is a variant of the [VGG](https://arxiv.org/abs/1409.1556) model, in -particular Configuration A with 11 weight layers. Specifically, here are the -changes we made: - -* The input size was changed to 96x64 for log mel spectrogram audio inputs. - -* We drop the last group of convolutional and maxpool layers, so we now have - only four groups of convolution/maxpool layers instead of five. - -* Instead of a 1000-wide fully connected layer at the end, we use a 128-wide - fully connected layer. This acts as a compact embedding layer. - -The model definition provided here defines layers up to and including the -128-wide embedding layer. - -### Input: Audio Features - -See `vggish_input.py` and `mel_features.py`. - -VGGish was trained with audio features computed as follows: - -* All audio is resampled to 16 kHz mono. -* A spectrogram is computed using magnitudes of the Short-Time Fourier Transform - with a window size of 25 ms, a window hop of 10 ms, and a periodic Hann - window. -* A mel spectrogram is computed by mapping the spectrogram to 64 mel bins - covering the range 125-7500 Hz. -* A stabilized log mel spectrogram is computed by applying - log(mel-spectrum + 0.01) where the offset is used to avoid taking a logarithm - of zero. -* These features are then framed into non-overlapping examples of 0.96 seconds, - where each example covers 64 mel bands and 96 frames of 10 ms each. - -We provide our own NumPy implementation that produces features that are very -similar to those produced by our internal production code. This results in -embedding outputs that are closely match the embeddings that we have already -released. Note that these embeddings will *not* be bit-for-bit identical to the -released embeddings due to small differences between the feature computation -code paths, and even between two different installations of VGGish with -different underlying libraries and hardware. However, we expect that the -embeddings will be equivalent in the context of a downstream classification -task. - -### Output: Embeddings - -See `vggish_postprocess.py`. - -The released AudioSet embeddings were postprocessed before release by applying a -PCA transformation (which performs both PCA and whitening) as well as -quantization to 8 bits per embedding element. This was done to be compatible -with the [YouTube-8M](https://research.google.com/youtube8m) project which has -released visual and audio embeddings for millions of YouTube videos in the same -PCA/whitened/quantized format. - -We provide a Python implementation of the postprocessing which can be applied to -batches of embeddings produced by VGGish. `vggish_inference_demo.py` shows how -the postprocessor can be run after inference. - -If you don't need to use the released embeddings or YouTube-8M, then you could -skip postprocessing and use raw embeddings. - -A [Colab](https://colab.research.google.com/) -showing how to download the model and calculate the embeddings on your -own sound data is available here: -[AudioSet Embedding Colab](https://colab.research.google.com/drive/1TbX92UL9sYWbdwdGE0rJ9owmezB-Rl1C). - diff --git a/spaces/Nee001/bing0/src/lib/isomorphic/browser.ts b/spaces/Nee001/bing0/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/Neo-Salvatore/GPTBase/README.md b/spaces/Neo-Salvatore/GPTBase/README.md deleted file mode 100644 index a9df0bebf10a896f135fbfc36e6cece55246339e..0000000000000000000000000000000000000000 --- a/spaces/Neo-Salvatore/GPTBase/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GBPTBase -emoji: 🏢 -colorFrom: purple -colorTo: indigo -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NicolasGaudemet/LongDocumentSummarizer/README.md b/spaces/NicolasGaudemet/LongDocumentSummarizer/README.md deleted file mode 100644 index cd305acc0d9e6b8511e3d285af34faa7cb7e4569..0000000000000000000000000000000000000000 --- a/spaces/NicolasGaudemet/LongDocumentSummarizer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LongDocumentSummarizer -emoji: 🐠 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.28.0 -app_file: summarizer_app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NingKanae/anime-voice-generator/commons.py b/spaces/NingKanae/anime-voice-generator/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/NingKanae/anime-voice-generator/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Norod78/ComicsHeroU2Net/app.py b/spaces/Norod78/ComicsHeroU2Net/app.py deleted file mode 100644 index 41ba8fd394eaf4025e5f44d3e31a635b8ad864d3..0000000000000000000000000000000000000000 --- a/spaces/Norod78/ComicsHeroU2Net/app.py +++ /dev/null @@ -1,135 +0,0 @@ -import os -os.system("pip install dlib") -import sys -import face_detection -import PIL -from PIL import Image, ImageOps, ImageFile -import numpy as np -import cv2 as cv -import torch - -torch.set_grad_enabled(False) -model = torch.jit.load('u2net_bce_itr_16000_train_3.835149_tar_0.542587-400x_360x.jit.pt') -model.eval() - -# https://en.wikipedia.org/wiki/Unsharp_masking -# https://stackoverflow.com/a/55590133/1495606 -def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=2.0, threshold=0): - """Return a sharpened version of the image, using an unsharp mask.""" - blurred = cv.GaussianBlur(image, kernel_size, sigma) - sharpened = float(amount + 1) * image - float(amount) * blurred - sharpened = np.maximum(sharpened, np.zeros(sharpened.shape)) - sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape)) - sharpened = sharpened.round().astype(np.uint8) - if threshold > 0: - low_contrast_mask = np.absolute(image - blurred) < threshold - np.copyto(sharpened, image, where=low_contrast_mask) - return sharpened - -def normPRED(d): - ma = np.max(d) - mi = np.min(d) - - dn = (d-mi)/(ma-mi) - - return dn - -def array_to_np(array_in): - array_in = normPRED(array_in) - array_in = np.squeeze(255.0*(array_in)) - array_in = np.transpose(array_in, (1, 2, 0)) - return array_in - -def array_to_image(array_in): - array_in = normPRED(array_in) - array_in = np.squeeze(255.0*(array_in)) - array_in = np.transpose(array_in, (1, 2, 0)) - im = Image.fromarray(array_in.astype(np.uint8)) - return im - - -def image_as_array(image_in): - image_in = np.array(image_in, np.float32) - tmpImg = np.zeros((image_in.shape[0],image_in.shape[1],3)) - image_in = image_in/np.max(image_in) - if image_in.shape[2]==1: - tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229 - tmpImg[:,:,1] = (image_in[:,:,0]-0.485)/0.229 - tmpImg[:,:,2] = (image_in[:,:,0]-0.485)/0.229 - else: - tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229 - tmpImg[:,:,1] = (image_in[:,:,1]-0.456)/0.224 - tmpImg[:,:,2] = (image_in[:,:,2]-0.406)/0.225 - - tmpImg = tmpImg.transpose((2, 0, 1)) - image_out = np.expand_dims(tmpImg, 0) - return image_out - -def find_aligned_face(image_in, size=400): - aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size) - return aligned_image, n_faces, quad - -def align_first_face(image_in, size=400): - aligned_image, n_faces, quad = find_aligned_face(image_in,size=size) - if n_faces == 0: - try: - image_in = ImageOps.exif_transpose(image_in) - except: - print("exif problem, not rotating") - image_in = image_in.resize((size, size)) - im_array = image_as_array(image_in) - else: - im_array = image_as_array(aligned_image) - - return im_array - -def img_concat_h(im1, im2): - dst = Image.new('RGB', (im1.width + im2.width, im1.height)) - dst.paste(im1, (0, 0)) - dst.paste(im2, (im1.width, 0)) - return dst - -import gradio as gr - -def face2hero( - img: Image.Image, - size: int -) -> Image.Image: - - aligned_img = align_first_face(img) - if aligned_img is None: - output=None - else: - input = torch.Tensor(aligned_img) - results = model(input) - hero_np_image = array_to_np(results[1].detach().numpy()) - hero_image = unsharp_mask(hero_np_image) - hero_image = Image.fromarray(hero_image) - - output = img_concat_h(array_to_image(aligned_img), hero_image) - del results - - return output - -def inference(img): - out = face2hero(img, 400) - return out - - -title = "Comics hero U2Net" -description = "Style transfer a face into one of a \"Comics hero\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created." -article = "

    See the Github Repo

    samples: Sample00001Sample00002Sample00003Sample00004Sample00005

    The \"Comics Hero (U2Net)\" model was trained by Doron Adler

    " - -examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']] - -gr.Interface( - inference, - gr.inputs.Image(type="pil", label="Input"), - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=examples, - enable_queue=True, - allow_flagging=False - ).launch() diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/train.py b/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/train.py deleted file mode 100644 index 5097ff26a590abd8fb47b4875c8ea71db79f7a02..0000000000000000000000000000000000000000 --- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/train.py +++ /dev/null @@ -1,11 +0,0 @@ -# flake8: noqa -import os.path as osp -from basicsr.train import train_pipeline - -import realesrgan.archs -import realesrgan.data -import realesrgan.models - -if __name__ == "__main__": - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - train_pipeline(root_path) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py deleted file mode 100644 index 6bbd0ae821b42ab693f4141e7c161d6d7cb0b15a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy as np -from fairseq.data.audio.feature_transforms import ( - AudioFeatureTransform, - register_audio_feature_transform, -) - - -@register_audio_feature_transform("utterance_cmvn") -class UtteranceCMVN(AudioFeatureTransform): - """Utterance-level CMVN (cepstral mean and variance normalization)""" - - @classmethod - def from_config_dict(cls, config=None): - _config = {} if config is None else config - return UtteranceCMVN( - _config.get("norm_means", True), - _config.get("norm_vars", True), - ) - - def __init__(self, norm_means=True, norm_vars=True): - self.norm_means, self.norm_vars = norm_means, norm_vars - - def __repr__(self): - return ( - self.__class__.__name__ - + f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})" - ) - - def __call__(self, x): - mean = x.mean(axis=0) - square_sums = (x ** 2).sum(axis=0) - - if self.norm_means: - x = np.subtract(x, mean) - if self.norm_vars: - var = square_sums / x.shape[0] - mean ** 2 - std = np.sqrt(np.maximum(var, 1e-10)) - x = np.divide(x, std) - - return x diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/bytes.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/bytes.py deleted file mode 100644 index f88f8f6929f5b6bdb0db470be9ebedf8fe1f752d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/bytes.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from fairseq.data.encoders import register_bpe -from fairseq.data.encoders.byte_utils import ( - SPACE, - SPACE_ESCAPE, - byte_encode, - smart_byte_decode, -) - - -@register_bpe("bytes") -class Bytes(object): - def __init__(self, *unused): - pass - - @staticmethod - def add_args(parser): - pass - - @staticmethod - def encode(x: str) -> str: - encoded = byte_encode(x) - escaped = encoded.replace(SPACE, SPACE_ESCAPE) - return SPACE.join(list(escaped)) - - @staticmethod - def decode(x: str) -> str: - unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) - return smart_byte_decode(unescaped) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh deleted file mode 100644 index e74953194d41f0d93855d41b2acef08556d92477..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh +++ /dev/null @@ -1,15 +0,0 @@ -# you can change cmd.sh depending on what type of queue you are using. -# If you have no queueing system and want to run on a local machine, you -# can change all instances 'queue.pl' to run.pl (but be careful and run -# commands one by one: most recipes will exhaust the memory on your -# machine). queue.pl works with GridEngine (qsub). slurm.pl works -# with slurm. Different queues are configured differently, with different -# queue names and different ways of specifying things like memory; -# to account for these differences you can create and edit the file -# conf/queue.conf to match your queue's configuration. Search for -# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, -# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. - -export train_cmd="run.pl --mem 2G" -export decode_cmd="run.pl --mem 4G" -export mkgraph_cmd="run.pl --mem 8G" diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh deleted file mode 100644 index a7ea3877beefe1d4d53f9f7e32b004d8ce01e22a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -num_sil_states=3 -num_nonsil_states=1 - -. ./cmd.sh -. ./path.sh -. parse_options.sh - -set -eux - -dict=$1 -data_dir=$2 -lexicon=$3 - -dict_dir=$data_dir/local/dict_word -tmplm_dir=$data_dir/local/lang_tmp_word -lm_dir=$data_dir/lang_word - -mkdir -p $dict_dir $tmplm_dir $lm_dir - -# prepare dict -echo "SIL" > $dict_dir/silence_phones.txt -echo "SIL" > $dict_dir/optional_silence.txt -awk '{print $1}' $dict > $dict_dir/nonsilence_phones.txt - -(echo "!SIL SIL"; echo " SIL";) | cat - $lexicon > $dict_dir/lexicon.txt - -echo "SIL" > $dict_dir/extra_questions.txt -awk '{printf $1" "} END {printf "\n"}' $dict >> $dict_dir/extra_questions.txt - -# prepare lang -utils/prepare_lang.sh --position-dependent-phones false \ - --num_sil_states $num_sil_states --num_nonsil_states $num_nonsil_states \ - $dict_dir "" $tmplm_dir $lm_dir diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/models/diffusion/ddim.py b/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/models/diffusion/ddim.py deleted file mode 100644 index 7a0429759fae780f515770bc5e58e8d0852d4347..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,203 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ...modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/utils.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/utils.py deleted file mode 100644 index 6d7c15c9242ed8a9bc59fbb3b450cca394720bb8..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/utils.py +++ /dev/null @@ -1,28 +0,0 @@ -from enum import Enum - -import yaml -from easydict import EasyDict as edict -import torch.nn as nn -import torch - - -def load_yaml(path): - with open(path, 'r') as f: - return edict(yaml.safe_load(f)) - - -def move_to_device(obj, device): - if isinstance(obj, nn.Module): - return obj.to(device) - if torch.is_tensor(obj): - return obj.to(device) - if isinstance(obj, (tuple, list)): - return [move_to_device(el, device) for el in obj] - if isinstance(obj, dict): - return {name: move_to_device(val, device) for name, val in obj.items()} - raise ValueError(f'Unexpected type {type(obj)}') - - -class SmallMode(Enum): - DROP = "drop" - UPSCALE = "upscale" diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_20k.py deleted file mode 100644 index bf780a1b6f6521833c6a5859675147824efa599d..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=20000) -checkpoint_config = dict(by_epoch=False, interval=2000) -evaluation = dict(interval=2000, metric='mIoU') diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/wrappers.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/wrappers.py deleted file mode 100644 index 8aebf67bf52355a513f21756ee74fe510902d075..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/wrappers.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501 - -Wrap some nn modules to support empty tensor input. Currently, these wrappers -are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask -heads are trained on only positive RoIs. -""" -import math - -import torch -import torch.nn as nn -from torch.nn.modules.utils import _pair, _triple - -from .registry import CONV_LAYERS, UPSAMPLE_LAYERS - -if torch.__version__ == 'parrots': - TORCH_VERSION = torch.__version__ -else: - # torch.__version__ could be 1.3.1+cu92, we only need the first two - # for comparison - TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) - - -def obsolete_torch_version(torch_version, version_threshold): - return torch_version == 'parrots' or torch_version <= version_threshold - - -class NewEmptyTensorOp(torch.autograd.Function): - - @staticmethod - def forward(ctx, x, new_shape): - ctx.shape = x.shape - return x.new_empty(new_shape) - - @staticmethod - def backward(ctx, grad): - shape = ctx.shape - return NewEmptyTensorOp.apply(grad, shape), None - - -@CONV_LAYERS.register_module('Conv', force=True) -class Conv2d(nn.Conv2d): - - def forward(self, x): - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): - out_shape = [x.shape[0], self.out_channels] - for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size, - self.padding, self.stride, self.dilation): - o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 - out_shape.append(o) - empty = NewEmptyTensorOp.apply(x, out_shape) - if self.training: - # produce dummy gradient to avoid DDP warning. - dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + dummy - else: - return empty - - return super().forward(x) - - -@CONV_LAYERS.register_module('Conv3d', force=True) -class Conv3d(nn.Conv3d): - - def forward(self, x): - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): - out_shape = [x.shape[0], self.out_channels] - for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size, - self.padding, self.stride, self.dilation): - o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 - out_shape.append(o) - empty = NewEmptyTensorOp.apply(x, out_shape) - if self.training: - # produce dummy gradient to avoid DDP warning. - dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + dummy - else: - return empty - - return super().forward(x) - - -@CONV_LAYERS.register_module() -@CONV_LAYERS.register_module('deconv') -@UPSAMPLE_LAYERS.register_module('deconv', force=True) -class ConvTranspose2d(nn.ConvTranspose2d): - - def forward(self, x): - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): - out_shape = [x.shape[0], self.out_channels] - for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size, - self.padding, self.stride, - self.dilation, self.output_padding): - out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) - empty = NewEmptyTensorOp.apply(x, out_shape) - if self.training: - # produce dummy gradient to avoid DDP warning. - dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + dummy - else: - return empty - - return super().forward(x) - - -@CONV_LAYERS.register_module() -@CONV_LAYERS.register_module('deconv3d') -@UPSAMPLE_LAYERS.register_module('deconv3d', force=True) -class ConvTranspose3d(nn.ConvTranspose3d): - - def forward(self, x): - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): - out_shape = [x.shape[0], self.out_channels] - for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size, - self.padding, self.stride, - self.dilation, self.output_padding): - out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) - empty = NewEmptyTensorOp.apply(x, out_shape) - if self.training: - # produce dummy gradient to avoid DDP warning. - dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + dummy - else: - return empty - - return super().forward(x) - - -class MaxPool2d(nn.MaxPool2d): - - def forward(self, x): - # PyTorch 1.9 does not support empty tensor inference yet - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): - out_shape = list(x.shape[:2]) - for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size), - _pair(self.padding), _pair(self.stride), - _pair(self.dilation)): - o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 - o = math.ceil(o) if self.ceil_mode else math.floor(o) - out_shape.append(o) - empty = NewEmptyTensorOp.apply(x, out_shape) - return empty - - return super().forward(x) - - -class MaxPool3d(nn.MaxPool3d): - - def forward(self, x): - # PyTorch 1.9 does not support empty tensor inference yet - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): - out_shape = list(x.shape[:2]) - for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size), - _triple(self.padding), - _triple(self.stride), - _triple(self.dilation)): - o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 - o = math.ceil(o) if self.ceil_mode else math.floor(o) - out_shape.append(o) - empty = NewEmptyTensorOp.apply(x, out_shape) - return empty - - return super().forward(x) - - -class Linear(torch.nn.Linear): - - def forward(self, x): - # empty tensor forward of Linear layer is supported in Pytorch 1.6 - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)): - out_shape = [x.shape[0], self.out_features] - empty = NewEmptyTensorOp.apply(x, out_shape) - if self.training: - # produce dummy gradient to avoid DDP warning. - dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + dummy - else: - return empty - - return super().forward(x) diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/pretrain_model_loading.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/pretrain_model_loading.py deleted file mode 100644 index 2a497b24a5e775c288587732e0e7328bf75264a4..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/pretrain_model_loading.py +++ /dev/null @@ -1,49 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn - -from collections import OrderedDict - -def _remove_bn_statics(state_dict): - layer_keys = sorted(state_dict.keys()) - remove_list = [] - for key in layer_keys: - if 'running_mean' in key or 'running_var' in key or 'num_batches_tracked' in key: - remove_list.append(key) - for key in remove_list: - del state_dict[key] - return state_dict - -def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg): - import re - layer_keys = sorted(state_dict.keys()) - for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1): - if not stage_with_dcn: - continue - for old_key in layer_keys: - pattern = ".*layer{}.*conv2.*".format(ix) - r = re.match(pattern, old_key) - if r is None: - continue - for param in ["weight", "bias"]: - if old_key.find(param) is -1: - continue - if 'unit01' in old_key: - continue - new_key = old_key.replace( - "conv2.{}".format(param), "conv2.conv.{}".format(param) - ) - print("pattern: {}, old_key: {}, new_key: {}".format( - pattern, old_key, new_key - )) - state_dict[new_key] = state_dict[old_key] - del state_dict[old_key] - return state_dict - - -def load_pretrain_format(cfg, f): - model = torch.load(f) - model = _remove_bn_statics(model) - model = _rename_conv_weights_for_deformable_conv_layers(model, cfg) - - return dict(model=model) diff --git a/spaces/PockiBoi7/PockiGEN/app.py b/spaces/PockiBoi7/PockiGEN/app.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PurplePanda00/plant-leaf-detection/README.md b/spaces/PurplePanda00/plant-leaf-detection/README.md deleted file mode 100644 index 04845d681ba9241b1f4ebeba47923cf20bda1ed2..0000000000000000000000000000000000000000 --- a/spaces/PurplePanda00/plant-leaf-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Plant Leaf Detection -emoji: 🦀 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Qiukai/gpt/crazy_functions/test_project/latex/attention/introduction.tex b/spaces/Qiukai/gpt/crazy_functions/test_project/latex/attention/introduction.tex deleted file mode 100644 index 1baa8915f4cf7aec2520894a87470fc9436d954b..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/crazy_functions/test_project/latex/attention/introduction.tex +++ /dev/null @@ -1,18 +0,0 @@ -Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. - -Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples. -%\marginpar{not sure if the memory constraints are understandable here} -Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains. - -%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away} - -Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network. - -%\marginpar{not sure if "cross-positional communication" is understandable without explanation} -%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?} - -In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs. -%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.} - -% Just a standard paragraph with citations, rewrite. -%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do. \ No newline at end of file diff --git a/spaces/RTL/videomatch/README.md b/spaces/RTL/videomatch/README.md deleted file mode 100644 index 1c6c95d7d8d12c60bc85bc3f2374606cb457ba4c..0000000000000000000000000000000000000000 --- a/spaces/RTL/videomatch/README.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Videomatch -emoji: 🎥 -colorFrom: yellow -colorTo: orange -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false ---- - -# Videomatch -Huggingface space where you can check if a video that came from the [Algemene Politieke Beschouwingen](https://www.tweedekamer.nl/APB) (2022) is edited in terms of timing. This space serves as an API for the [video factchecker](www.google.com). - -# Usage -To use this space you can type a url in the search bar to a video that you think came from the Algemene Politieke Beschouwingen. Depending on the tab used there will be different outputs to this video. - -There are three tabs you can use in this space: -1. *Index*: Mostly there for historical purposes, but you can use this to index a video that powers the matching engine. -2. *PlotAutoCompare*: This compares the video to the database containing full length videos of the [Algemene Politieke Beschouwingen](https://www.tweedekamer.nl/APB) (2022) and will visualize how the video matches to all the other videos in the database. -3. *AutoCompare*: This compares the video to the database containing full length videos of the [Algemene Politieke Beschouwingen](https://www.tweedekamer.nl/APB) (2022) and will return .json-style information about how the video matches to all the other videos in the database. - -# Supported URL types -The video has to be input as a url, and we support the following video sources: -- Twitter -- Youtube -- Direct .mp4 download link - -# Example of the work in progres video factchecker site -![examplefactchecker.png](https://s3.amazonaws.com/moonup/production/uploads/1668527555344-61dfe6a07313f246ad804cd1.png) - -# Example plot -![exampleplot.png](https://s3.amazonaws.com/moonup/production/uploads/1668527600178-61dfe6a07313f246ad804cd1.png) - diff --git a/spaces/Rajagopal/ImageBind_zeroshot_demo2/CONTRIBUTING.md b/spaces/Rajagopal/ImageBind_zeroshot_demo2/CONTRIBUTING.md deleted file mode 100644 index 63d0b751e8a00b606ddff92e2524faa3c90a63b0..0000000000000000000000000000000000000000 --- a/spaces/Rajagopal/ImageBind_zeroshot_demo2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to ImageBind -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests -We actively welcome your pull requests. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to Omnivore, you agree that your contributions will be licensed -under the [LICENSE](LICENSE) file in the root directory of this source tree. diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py deleted file mode 100644 index ec7f4de32cfdf58f2bf54cc9fec089ac78b2a276..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py +++ /dev/null @@ -1,882 +0,0 @@ -""" - pygments.lexer - ~~~~~~~~~~~~~~ - - Base lexer classes. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import sys -import time - -from pip._vendor.pygments.filter import apply_filters, Filter -from pip._vendor.pygments.filters import get_filter_by_name -from pip._vendor.pygments.token import Error, Text, Other, _TokenType -from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ - make_analysator, Future, guess_decode -from pip._vendor.pygments.regexopt import regex_opt - -__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', - 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', - 'default', 'words'] - - -_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), - (b'\xff\xfe\0\0', 'utf-32'), - (b'\0\0\xfe\xff', 'utf-32be'), - (b'\xff\xfe', 'utf-16'), - (b'\xfe\xff', 'utf-16be')] - -_default_analyse = staticmethod(lambda x: 0.0) - - -class LexerMeta(type): - """ - This metaclass automagically converts ``analyse_text`` methods into - static methods which always return float values. - """ - - def __new__(mcs, name, bases, d): - if 'analyse_text' in d: - d['analyse_text'] = make_analysator(d['analyse_text']) - return type.__new__(mcs, name, bases, d) - - -class Lexer(metaclass=LexerMeta): - """ - Lexer for a specific language. - - Basic options recognized: - ``stripnl`` - Strip leading and trailing newlines from the input (default: True). - ``stripall`` - Strip all leading and trailing whitespace from the input - (default: False). - ``ensurenl`` - Make sure that the input ends with a newline (default: True). This - is required for some lexers that consume input linewise. - - .. versionadded:: 1.3 - - ``tabsize`` - If given and greater than 0, expand tabs in the input (default: 0). - ``encoding`` - If given, must be an encoding name. This encoding will be used to - convert the input string to Unicode, if it is not already a Unicode - string (default: ``'guess'``, which uses a simple UTF-8 / Locale / - Latin1 detection. Can also be ``'chardet'`` to use the chardet - library, if it is installed. - ``inencoding`` - Overrides the ``encoding`` if given. - """ - - #: Name of the lexer - name = None - - #: URL of the language specification/definition - url = None - - #: Shortcuts for the lexer - aliases = [] - - #: File name globs - filenames = [] - - #: Secondary file name globs - alias_filenames = [] - - #: MIME types - mimetypes = [] - - #: Priority, should multiple lexers match and no content is provided - priority = 0 - - def __init__(self, **options): - self.options = options - self.stripnl = get_bool_opt(options, 'stripnl', True) - self.stripall = get_bool_opt(options, 'stripall', False) - self.ensurenl = get_bool_opt(options, 'ensurenl', True) - self.tabsize = get_int_opt(options, 'tabsize', 0) - self.encoding = options.get('encoding', 'guess') - self.encoding = options.get('inencoding') or self.encoding - self.filters = [] - for filter_ in get_list_opt(options, 'filters', ()): - self.add_filter(filter_) - - def __repr__(self): - if self.options: - return '' % (self.__class__.__name__, - self.options) - else: - return '' % self.__class__.__name__ - - def add_filter(self, filter_, **options): - """ - Add a new stream filter to this lexer. - """ - if not isinstance(filter_, Filter): - filter_ = get_filter_by_name(filter_, **options) - self.filters.append(filter_) - - def analyse_text(text): - """ - Has to return a float between ``0`` and ``1`` that indicates - if a lexer wants to highlight this text. Used by ``guess_lexer``. - If this method returns ``0`` it won't highlight it in any case, if - it returns ``1`` highlighting with this lexer is guaranteed. - - The `LexerMeta` metaclass automatically wraps this function so - that it works like a static method (no ``self`` or ``cls`` - parameter) and the return value is automatically converted to - `float`. If the return value is an object that is boolean `False` - it's the same as if the return values was ``0.0``. - """ - - def get_tokens(self, text, unfiltered=False): - """ - Return an iterable of (tokentype, value) pairs generated from - `text`. If `unfiltered` is set to `True`, the filtering mechanism - is bypassed even if filters are defined. - - Also preprocess the text, i.e. expand tabs and strip it if - wanted and applies registered filters. - """ - if not isinstance(text, str): - if self.encoding == 'guess': - text, _ = guess_decode(text) - elif self.encoding == 'chardet': - try: - from pip._vendor import chardet - except ImportError as e: - raise ImportError('To enable chardet encoding guessing, ' - 'please install the chardet library ' - 'from http://chardet.feedparser.org/') from e - # check for BOM first - decoded = None - for bom, encoding in _encoding_map: - if text.startswith(bom): - decoded = text[len(bom):].decode(encoding, 'replace') - break - # no BOM found, so use chardet - if decoded is None: - enc = chardet.detect(text[:1024]) # Guess using first 1KB - decoded = text.decode(enc.get('encoding') or 'utf-8', - 'replace') - text = decoded - else: - text = text.decode(self.encoding) - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - else: - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - - # text now *is* a unicode string - text = text.replace('\r\n', '\n') - text = text.replace('\r', '\n') - if self.stripall: - text = text.strip() - elif self.stripnl: - text = text.strip('\n') - if self.tabsize > 0: - text = text.expandtabs(self.tabsize) - if self.ensurenl and not text.endswith('\n'): - text += '\n' - - def streamer(): - for _, t, v in self.get_tokens_unprocessed(text): - yield t, v - stream = streamer() - if not unfiltered: - stream = apply_filters(stream, self.filters, self) - return stream - - def get_tokens_unprocessed(self, text): - """ - Return an iterable of (index, tokentype, value) pairs where "index" - is the starting position of the token within the input text. - - In subclasses, implement this method as a generator to - maximize effectiveness. - """ - raise NotImplementedError - - -class DelegatingLexer(Lexer): - """ - This lexer takes two lexer as arguments. A root lexer and - a language lexer. First everything is scanned using the language - lexer, afterwards all ``Other`` tokens are lexed using the root - lexer. - - The lexers from the ``template`` lexer package use this base lexer. - """ - - def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): - self.root_lexer = _root_lexer(**options) - self.language_lexer = _language_lexer(**options) - self.needle = _needle - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - buffered = '' - insertions = [] - lng_buffer = [] - for i, t, v in self.language_lexer.get_tokens_unprocessed(text): - if t is self.needle: - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - lng_buffer = [] - buffered += v - else: - lng_buffer.append((i, t, v)) - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - return do_insertions(insertions, - self.root_lexer.get_tokens_unprocessed(buffered)) - - -# ------------------------------------------------------------------------------ -# RegexLexer and ExtendedRegexLexer -# - - -class include(str): # pylint: disable=invalid-name - """ - Indicates that a state should include rules from another state. - """ - pass - - -class _inherit: - """ - Indicates the a state should inherit from its superclass. - """ - def __repr__(self): - return 'inherit' - -inherit = _inherit() # pylint: disable=invalid-name - - -class combined(tuple): # pylint: disable=invalid-name - """ - Indicates a state combined from multiple states. - """ - - def __new__(cls, *args): - return tuple.__new__(cls, args) - - def __init__(self, *args): - # tuple.__init__ doesn't do anything - pass - - -class _PseudoMatch: - """ - A pseudo match object constructed from a string. - """ - - def __init__(self, start, text): - self._text = text - self._start = start - - def start(self, arg=None): - return self._start - - def end(self, arg=None): - return self._start + len(self._text) - - def group(self, arg=None): - if arg: - raise IndexError('No such group') - return self._text - - def groups(self): - return (self._text,) - - def groupdict(self): - return {} - - -def bygroups(*args): - """ - Callback that yields multiple actions for each group in the match. - """ - def callback(lexer, match, ctx=None): - for i, action in enumerate(args): - if action is None: - continue - elif type(action) is _TokenType: - data = match.group(i + 1) - if data: - yield match.start(i + 1), action, data - else: - data = match.group(i + 1) - if data is not None: - if ctx: - ctx.pos = match.start(i + 1) - for item in action(lexer, - _PseudoMatch(match.start(i + 1), data), ctx): - if item: - yield item - if ctx: - ctx.pos = match.end() - return callback - - -class _This: - """ - Special singleton used for indicating the caller class. - Used by ``using``. - """ - -this = _This() - - -def using(_other, **kwargs): - """ - Callback that processes the match with a different lexer. - - The keyword arguments are forwarded to the lexer, except `state` which - is handled separately. - - `state` specifies the state that the new lexer will start in, and can - be an enumerable such as ('root', 'inline', 'string') or a simple - string which is assumed to be on top of the root state. - - Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. - """ - gt_kwargs = {} - if 'state' in kwargs: - s = kwargs.pop('state') - if isinstance(s, (list, tuple)): - gt_kwargs['stack'] = s - else: - gt_kwargs['stack'] = ('root', s) - - if _other is this: - def callback(lexer, match, ctx=None): - # if keyword arguments are given the callback - # function has to create a new lexer instance - if kwargs: - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = lexer.__class__(**kwargs) - else: - lx = lexer - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - else: - def callback(lexer, match, ctx=None): - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = _other(**kwargs) - - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - return callback - - -class default: - """ - Indicates a state or state action (e.g. #pop) to apply. - For example default('#pop') is equivalent to ('', Token, '#pop') - Note that state tuples may be used as well. - - .. versionadded:: 2.0 - """ - def __init__(self, state): - self.state = state - - -class words(Future): - """ - Indicates a list of literal words that is transformed into an optimized - regex that matches any of the words. - - .. versionadded:: 2.0 - """ - def __init__(self, words, prefix='', suffix=''): - self.words = words - self.prefix = prefix - self.suffix = suffix - - def get(self): - return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) - - -class RegexLexerMeta(LexerMeta): - """ - Metaclass for RegexLexer, creates the self._tokens attribute from - self.tokens on the first instantiation. - """ - - def _process_regex(cls, regex, rflags, state): - """Preprocess the regular expression component of a token definition.""" - if isinstance(regex, Future): - regex = regex.get() - return re.compile(regex, rflags).match - - def _process_token(cls, token): - """Preprocess the token component of a token definition.""" - assert type(token) is _TokenType or callable(token), \ - 'token type must be simple type or callable, not %r' % (token,) - return token - - def _process_new_state(cls, new_state, unprocessed, processed): - """Preprocess the state transition action of a token definition.""" - if isinstance(new_state, str): - # an existing state - if new_state == '#pop': - return -1 - elif new_state in unprocessed: - return (new_state,) - elif new_state == '#push': - return new_state - elif new_state[:5] == '#pop:': - return -int(new_state[5:]) - else: - assert False, 'unknown new state %r' % new_state - elif isinstance(new_state, combined): - # combine a new state from existing ones - tmp_state = '_tmp_%d' % cls._tmpname - cls._tmpname += 1 - itokens = [] - for istate in new_state: - assert istate != new_state, 'circular state ref %r' % istate - itokens.extend(cls._process_state(unprocessed, - processed, istate)) - processed[tmp_state] = itokens - return (tmp_state,) - elif isinstance(new_state, tuple): - # push more than one state - for istate in new_state: - assert (istate in unprocessed or - istate in ('#pop', '#push')), \ - 'unknown new state ' + istate - return new_state - else: - assert False, 'unknown new state def %r' % new_state - - def _process_state(cls, unprocessed, processed, state): - """Preprocess a single state definition.""" - assert type(state) is str, "wrong state name %r" % state - assert state[0] != '#', "invalid state name %r" % state - if state in processed: - return processed[state] - tokens = processed[state] = [] - rflags = cls.flags - for tdef in unprocessed[state]: - if isinstance(tdef, include): - # it's a state reference - assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state(unprocessed, processed, - str(tdef))) - continue - if isinstance(tdef, _inherit): - # should be processed already, but may not in the case of: - # 1. the state has no counterpart in any parent - # 2. the state includes more than one 'inherit' - continue - if isinstance(tdef, default): - new_state = cls._process_new_state(tdef.state, unprocessed, processed) - tokens.append((re.compile('').match, None, new_state)) - continue - - assert type(tdef) is tuple, "wrong rule def %r" % tdef - - try: - rex = cls._process_regex(tdef[0], rflags, state) - except Exception as err: - raise ValueError("uncompilable regex %r in state %r of %r: %s" % - (tdef[0], state, cls, err)) from err - - token = cls._process_token(tdef[1]) - - if len(tdef) == 2: - new_state = None - else: - new_state = cls._process_new_state(tdef[2], - unprocessed, processed) - - tokens.append((rex, token, new_state)) - return tokens - - def process_tokendef(cls, name, tokendefs=None): - """Preprocess a dictionary of token definitions.""" - processed = cls._all_tokens[name] = {} - tokendefs = tokendefs or cls.tokens[name] - for state in list(tokendefs): - cls._process_state(tokendefs, processed, state) - return processed - - def get_tokendefs(cls): - """ - Merge tokens from superclasses in MRO order, returning a single tokendef - dictionary. - - Any state that is not defined by a subclass will be inherited - automatically. States that *are* defined by subclasses will, by - default, override that state in the superclass. If a subclass wishes to - inherit definitions from a superclass, it can use the special value - "inherit", which will cause the superclass' state definition to be - included at that point in the state. - """ - tokens = {} - inheritable = {} - for c in cls.__mro__: - toks = c.__dict__.get('tokens', {}) - - for state, items in toks.items(): - curitems = tokens.get(state) - if curitems is None: - # N.b. because this is assigned by reference, sufficiently - # deep hierarchies are processed incrementally (e.g. for - # A(B), B(C), C(RegexLexer), B will be premodified so X(B) - # will not see any inherits in B). - tokens[state] = items - try: - inherit_ndx = items.index(inherit) - except ValueError: - continue - inheritable[state] = inherit_ndx - continue - - inherit_ndx = inheritable.pop(state, None) - if inherit_ndx is None: - continue - - # Replace the "inherit" value with the items - curitems[inherit_ndx:inherit_ndx+1] = items - try: - # N.b. this is the index in items (that is, the superclass - # copy), so offset required when storing below. - new_inh_ndx = items.index(inherit) - except ValueError: - pass - else: - inheritable[state] = inherit_ndx + new_inh_ndx - - return tokens - - def __call__(cls, *args, **kwds): - """Instantiate cls after preprocessing its token definitions.""" - if '_tokens' not in cls.__dict__: - cls._all_tokens = {} - cls._tmpname = 0 - if hasattr(cls, 'token_variants') and cls.token_variants: - # don't process yet - pass - else: - cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) - - return type.__call__(cls, *args, **kwds) - - -class RegexLexer(Lexer, metaclass=RegexLexerMeta): - """ - Base for simple stateful regular expression-based lexers. - Simplifies the lexing process so that you need only - provide a list of states and regular expressions. - """ - - #: Flags for compiling the regular expressions. - #: Defaults to MULTILINE. - flags = re.MULTILINE - - #: At all time there is a stack of states. Initially, the stack contains - #: a single state 'root'. The top of the stack is called "the current state". - #: - #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` - #: - #: ``new_state`` can be omitted to signify no state transition. - #: If ``new_state`` is a string, it is pushed on the stack. This ensure - #: the new current state is ``new_state``. - #: If ``new_state`` is a tuple of strings, all of those strings are pushed - #: on the stack and the current state will be the last element of the list. - #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` - #: to signify a new, anonymous state combined from the rules of two - #: or more existing ones. - #: Furthermore, it can be '#pop' to signify going back one step in - #: the state stack, or '#push' to push the current state on the stack - #: again. Note that if you push while in a combined state, the combined - #: state itself is pushed, and not only the state in which the rule is - #: defined. - #: - #: The tuple can also be replaced with ``include('state')``, in which - #: case the rules from the state named by the string are included in the - #: current one. - tokens = {} - - def get_tokens_unprocessed(self, text, stack=('root',)): - """ - Split ``text`` into (tokentype, text) pairs. - - ``stack`` is the initial stack (default: ``['root']``) - """ - pos = 0 - tokendefs = self._tokens - statestack = list(stack) - statetokens = tokendefs[statestack[-1]] - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, pos) - if m: - if action is not None: - if type(action) is _TokenType: - yield pos, action, m.group() - else: - yield from action(self, m) - pos = m.end() - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(statestack) > 1: - statestack.pop() - elif state == '#push': - statestack.append(statestack[-1]) - else: - statestack.append(state) - elif isinstance(new_state, int): - # pop, but keep at least one state on the stack - # (random code leading to unexpected pops should - # not allow exceptions) - if abs(new_state) >= len(statestack): - del statestack[1:] - else: - del statestack[new_state:] - elif new_state == '#push': - statestack.append(statestack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[statestack[-1]] - break - else: - # We are here only if all state tokens have been considered - # and there was not a match on any of them. - try: - if text[pos] == '\n': - # at EOL, reset state to "root" - statestack = ['root'] - statetokens = tokendefs['root'] - yield pos, Text, '\n' - pos += 1 - continue - yield pos, Error, text[pos] - pos += 1 - except IndexError: - break - - -class LexerContext: - """ - A helper object that holds lexer position data. - """ - - def __init__(self, text, pos, stack=None, end=None): - self.text = text - self.pos = pos - self.end = end or len(text) # end=0 not supported ;-) - self.stack = stack or ['root'] - - def __repr__(self): - return 'LexerContext(%r, %r, %r)' % ( - self.text, self.pos, self.stack) - - -class ExtendedRegexLexer(RegexLexer): - """ - A RegexLexer that uses a context object to store its state. - """ - - def get_tokens_unprocessed(self, text=None, context=None): - """ - Split ``text`` into (tokentype, text) pairs. - If ``context`` is given, use this lexer context instead. - """ - tokendefs = self._tokens - if not context: - ctx = LexerContext(text, 0) - statetokens = tokendefs['root'] - else: - ctx = context - statetokens = tokendefs[ctx.stack[-1]] - text = ctx.text - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, ctx.pos, ctx.end) - if m: - if action is not None: - if type(action) is _TokenType: - yield ctx.pos, action, m.group() - ctx.pos = m.end() - else: - yield from action(self, m, ctx) - if not new_state: - # altered the state stack? - statetokens = tokendefs[ctx.stack[-1]] - # CAUTION: callback must set ctx.pos! - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(ctx.stack) > 1: - ctx.stack.pop() - elif state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - ctx.stack.append(state) - elif isinstance(new_state, int): - # see RegexLexer for why this check is made - if abs(new_state) >= len(ctx.stack): - del ctx.stack[1:] - else: - del ctx.stack[new_state:] - elif new_state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[ctx.stack[-1]] - break - else: - try: - if ctx.pos >= ctx.end: - break - if text[ctx.pos] == '\n': - # at EOL, reset state to "root" - ctx.stack = ['root'] - statetokens = tokendefs['root'] - yield ctx.pos, Text, '\n' - ctx.pos += 1 - continue - yield ctx.pos, Error, text[ctx.pos] - ctx.pos += 1 - except IndexError: - break - - -def do_insertions(insertions, tokens): - """ - Helper for lexers which must combine the results of several - sublexers. - - ``insertions`` is a list of ``(index, itokens)`` pairs. - Each ``itokens`` iterable should be inserted at position - ``index`` into the token stream given by the ``tokens`` - argument. - - The result is a combined token stream. - - TODO: clean up the code here. - """ - insertions = iter(insertions) - try: - index, itokens = next(insertions) - except StopIteration: - # no insertions - yield from tokens - return - - realpos = None - insleft = True - - # iterate over the token stream where we want to insert - # the tokens from the insertion list. - for i, t, v in tokens: - # first iteration. store the position of first item - if realpos is None: - realpos = i - oldi = 0 - while insleft and i + len(v) >= index: - tmpval = v[oldi:index - i] - if tmpval: - yield realpos, t, tmpval - realpos += len(tmpval) - for it_index, it_token, it_value in itokens: - yield realpos, it_token, it_value - realpos += len(it_value) - oldi = index - i - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - if oldi < len(v): - yield realpos, t, v[oldi:] - realpos += len(v) - oldi - - # leftover tokens - while insleft: - # no normal tokens, set realpos to zero - realpos = realpos or 0 - for p, t, v in itokens: - yield realpos, t, v - realpos += len(v) - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - - -class ProfilingRegexLexerMeta(RegexLexerMeta): - """Metaclass for ProfilingRegexLexer, collects regex timing info.""" - - def _process_regex(cls, regex, rflags, state): - if isinstance(regex, words): - rex = regex_opt(regex.words, prefix=regex.prefix, - suffix=regex.suffix) - else: - rex = regex - compiled = re.compile(rex, rflags) - - def match_func(text, pos, endpos=sys.maxsize): - info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) - t0 = time.time() - res = compiled.match(text, pos, endpos) - t1 = time.time() - info[0] += 1 - info[1] += t1 - t0 - return res - return match_func - - -class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): - """Drop-in replacement for RegexLexer that does profiling of its regexes.""" - - _prof_data = [] - _prof_sort_index = 4 # defaults to time per call - - def get_tokens_unprocessed(self, text, stack=('root',)): - # this needs to be a stack, since using(this) will produce nested calls - self.__class__._prof_data.append({}) - yield from RegexLexer.get_tokens_unprocessed(self, text, stack) - rawdata = self.__class__._prof_data.pop() - data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], - n, 1000 * t, 1000 * t / n) - for ((s, r), (n, t)) in rawdata.items()), - key=lambda x: x[self._prof_sort_index], - reverse=True) - sum_total = sum(x[3] for x in data) - - print() - print('Profiling result for %s lexing %d chars in %.3f ms' % - (self.__class__.__name__, len(text), sum_total)) - print('=' * 110) - print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) - print('-' * 110) - for d in data: - print('%-20s %-65s %5d %8.4f %8.4f' % d) - print('=' * 110) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py deleted file mode 100644 index ff69593b05b5eb5fcd336b4bd16193c44dc48ef5..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py +++ /dev/null @@ -1,268 +0,0 @@ -from __future__ import absolute_import - -import time - -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT - -from ..exceptions import TimeoutStateError - -# A sentinel value to indicate that no timeout was specified by the user in -# urllib3 -_Default = object() - - -# Use time.monotonic if available. -current_time = getattr(time, "monotonic", time.time) - - -class Timeout(object): - """Timeout configuration. - - Timeouts can be defined as a default for a pool: - - .. code-block:: python - - timeout = Timeout(connect=2.0, read=7.0) - http = PoolManager(timeout=timeout) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool): - - .. code-block:: python - - response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) - - Timeouts can be disabled by setting all the parameters to ``None``: - - .. code-block:: python - - no_timeout = Timeout(connect=None, read=None) - response = http.request('GET', 'http://example.com/, timeout=no_timeout) - - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: int, float, or None - - :param connect: - The maximum amount of time (in seconds) to wait for a connection - attempt to a server to succeed. Omitting the parameter will default the - connect timeout to the system default, probably `the global default - timeout in socket.py - `_. - None will set an infinite timeout for connection attempts. - - :type connect: int, float, or None - - :param read: - The maximum amount of time (in seconds) to wait between consecutive - read operations for a response from the server. Omitting the parameter - will default the read timeout to the system default, probably `the - global default timeout in socket.py - `_. - None will set an infinite timeout. - - :type read: int, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. - - For example, Python's DNS resolver does not obey the timeout specified - on the socket. Other factors that can affect total request time include - high CPU load, high swap, the program running at a low priority level, - or other behaviors. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not trigger, even though the request will take - several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, "connect") - self._read = self._validate_timeout(read, "read") - self.total = self._validate_timeout(total, "total") - self._start_connect = None - - def __repr__(self): - return "%s(connect=%r, read=%r, total=%r)" % ( - type(self).__name__, - self._connect, - self._read, - self.total, - ) - - # __str__ provided for backwards compatibility - __str__ = __repr__ - - @classmethod - def _validate_timeout(cls, value, name): - """Check that a timeout attribute is valid. - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is - used to specify in error messages. - :return: The validated and casted version of the given value. - :raises ValueError: If it is a numeric value less than or equal to - zero, or the type is not an integer, float, or None. - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - if isinstance(value, bool): - raise ValueError( - "Timeout cannot be a boolean value. It must " - "be an int, float or None." - ) - try: - float(value) - except (TypeError, ValueError): - raise ValueError( - "Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value) - ) - - try: - if value <= 0: - raise ValueError( - "Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value) - ) - except TypeError: - # Python 3 - raise ValueError( - "Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value) - ) - - return value - - @classmethod - def from_float(cls, timeout): - """Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value - passed to this function. - - :param timeout: The legacy timeout value. - :type timeout: integer, float, sentinel default object, or None - :return: Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, total=self.total) - - def start_connect(self): - """Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """Gets the time elapsed since the call to :meth:`start_connect`. - - :return: Elapsed time in seconds. - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError( - "Can't get connect duration for timer that has not started." - ) - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: Connect timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: Value to use for the read timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if ( - self.total is not None - and self.total is not self.DEFAULT_TIMEOUT - and self._read is not None - and self._read is not self.DEFAULT_TIMEOUT - ): - # In case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read diff --git a/spaces/Rayzggz/illi-Bert-VITS2/mel_processing.py b/spaces/Rayzggz/illi-Bert-VITS2/mel_processing.py deleted file mode 100644 index aab5bd926a194610b7ce3da29c553bd877341aa4..0000000000000000000000000000000000000000 --- a/spaces/Rayzggz/illi-Bert-VITS2/mel_processing.py +++ /dev/null @@ -1,139 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=spec.dtype, device=spec.device - ) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=y.dtype, device=y.device - ) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/__init__.py b/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/__init__.py deleted file mode 100644 index 617ba38c34b1801b2db2e0209b4e886c9d24c490..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .visualization_utils import show_bboxes -from .detector import detect_faces diff --git a/spaces/Ridwanz/sdrv1_4/README.md b/spaces/Ridwanz/sdrv1_4/README.md deleted file mode 100644 index 832aaccac2f5c6781fd8de3fa0c56158ff7a1032..0000000000000000000000000000000000000000 --- a/spaces/Ridwanz/sdrv1_4/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SD Realistic Vision v1.4 -emoji: ⚡ -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: true -duplicated_from: Thafx/sdrv1_4 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/RobLi/ControlNet-v1-1/style.css b/spaces/RobLi/ControlNet-v1-1/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/RobLi/ControlNet-v1-1/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/train.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/train.py deleted file mode 100644 index 63f319a919ff023931a6a663e668f27dd1a07a2e..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/train.py +++ /dev/null @@ -1,116 +0,0 @@ -import random -import warnings - -import numpy as np -import torch -from annotator.uniformer.mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from annotator.uniformer.mmcv.runner import build_optimizer, build_runner - -from annotator.uniformer.mmseg.core import DistEvalHook, EvalHook -from annotator.uniformer.mmseg.datasets import build_dataloader, build_dataset -from annotator.uniformer.mmseg.utils import get_root_logger - - -def set_random_seed(seed, deterministic=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def train_segmentor(model, - dataset, - cfg, - distributed=False, - validate=False, - timestamp=None, - meta=None): - """Launch segmentor training.""" - logger = get_root_logger(cfg.log_level) - - # prepare data loaders - dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] - data_loaders = [ - build_dataloader( - ds, - cfg.data.samples_per_gpu, - cfg.data.workers_per_gpu, - # cfg.gpus will be ignored if distributed - len(cfg.gpu_ids), - dist=distributed, - seed=cfg.seed, - drop_last=True) for ds in dataset - ] - - # put model on gpus - if distributed: - find_unused_parameters = cfg.get('find_unused_parameters', False) - # Sets the `find_unused_parameters` parameter in - # torch.nn.parallel.DistributedDataParallel - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False, - find_unused_parameters=find_unused_parameters) - else: - model = MMDataParallel( - model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) - - # build runner - optimizer = build_optimizer(model, cfg.optimizer) - - if cfg.get('runner') is None: - cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters} - warnings.warn( - 'config is now expected to have a `runner` section, ' - 'please set `runner` in your config.', UserWarning) - - runner = build_runner( - cfg.runner, - default_args=dict( - model=model, - batch_processor=None, - optimizer=optimizer, - work_dir=cfg.work_dir, - logger=logger, - meta=meta)) - - # register hooks - runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, - cfg.checkpoint_config, cfg.log_config, - cfg.get('momentum_config', None)) - - # an ugly walkaround to make the .log and .log.json filenames the same - runner.timestamp = timestamp - - # register eval hooks - if validate: - val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) - val_dataloader = build_dataloader( - val_dataset, - samples_per_gpu=1, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False) - eval_cfg = cfg.get('evaluation', {}) - eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' - eval_hook = DistEvalHook if distributed else EvalHook - runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') - - if cfg.resume_from: - runner.resume(cfg.resume_from) - elif cfg.load_from: - runner.load_checkpoint(cfg.load_from) - runner.run(data_loaders, cfg.workflow) diff --git a/spaces/Rongjiehuang/GenerSpeech/tasks/run.py b/spaces/Rongjiehuang/GenerSpeech/tasks/run.py deleted file mode 100644 index 82c7559cec873eebf7c2c0ab6554895e21de7e7c..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/tasks/run.py +++ /dev/null @@ -1,15 +0,0 @@ -import importlib -from utils.hparams import set_hparams, hparams - - -def run_task(): - assert hparams['task_cls'] != '' - pkg = ".".join(hparams["task_cls"].split(".")[:-1]) - cls_name = hparams["task_cls"].split(".")[-1] - task_cls = getattr(importlib.import_module(pkg), cls_name) - task_cls.start() - - -if __name__ == '__main__': - set_hparams() - run_task() diff --git a/spaces/Rubens/semantic_similarity/app.py b/spaces/Rubens/semantic_similarity/app.py deleted file mode 100644 index 8dbe2988c329fd98cf51397a083ce0908b262f87..0000000000000000000000000000000000000000 --- a/spaces/Rubens/semantic_similarity/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import openai -import gradio as gr - - -def metadata_generator(api, text): - openai.api_key = api - response = openai.Completion.create( - engine="text-davinci-003", - prompt="{}\n\nPlease provide 6 semantically similar phrases for the phrase above mentioned".format( - text), - temperature=0.9, - max_tokens=800, - top_p=1.0, - frequency_penalty=0.0, - presence_penalty=0.0 - ) - resumo = response["choices"][0].text - return resumo - -demo = gr.Interface(fn=metadata_generator, inputs=[gr.inputs.Textbox(label='ENTER OPEN AI API KEY'), - gr.inputs.Textbox(label='ENTER A PHRASE FOR SEMANTIC SIMILARITY - Click *Clear* before adding new input')], - outputs=gr.outputs.Textbox( - label='SEMANTIC SIMILAR PHRASES'), - css='div {margin-left: auto; margin-right: auto; width: 100%;\ - background-image: url("https://drive.google.com/uc?export=view&id=1KNnISAUcvh2Pt08f-EJZJYCIgkrKw3PI"); repeat 0 0;}').launch(share=False) diff --git a/spaces/RyanX/BookSearch/bert/module.py b/spaces/RyanX/BookSearch/bert/module.py deleted file mode 100644 index 70abbf864e26337e781851eab5853f4efa4e3e02..0000000000000000000000000000000000000000 --- a/spaces/RyanX/BookSearch/bert/module.py +++ /dev/null @@ -1,23 +0,0 @@ -import torch.nn as nn - - -class IntentClassifier(nn.Module): - def __init__(self, input_dim, num_intent_labels, dropout_rate=0.): - super(IntentClassifier, self).__init__() - self.dropout = nn.Dropout(dropout_rate) - self.linear = nn.Linear(input_dim, num_intent_labels) - - def forward(self, x): - x = self.dropout(x) - return self.linear(x) - - -class SlotClassifier(nn.Module): - def __init__(self, input_dim, num_slot_labels, dropout_rate=0.): - super(SlotClassifier, self).__init__() - self.dropout = nn.Dropout(dropout_rate) - self.linear = nn.Linear(input_dim, num_slot_labels) - - def forward(self, x): - x = self.dropout(x) - return self.linear(x) diff --git a/spaces/Ryzal/rvc-models-new/vc_infer_pipeline.py b/spaces/Ryzal/rvc-models-new/vc_infer_pipeline.py deleted file mode 100644 index 82c15f59a8072e1b317fa1d750ccc1b814a6989d..0000000000000000000000000000000000000000 --- a/spaces/Ryzal/rvc-models-new/vc_infer_pipeline.py +++ /dev/null @@ -1,443 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/Sarst/VITS-Umamusume-voice-synthesizer2/text/english.py b/spaces/Sarst/VITS-Umamusume-voice-synthesizer2/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/Sarst/VITS-Umamusume-voice-synthesizer2/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/SarthakSidhant/Go-Cattle/diseases/yersiniosis.md b/spaces/SarthakSidhant/Go-Cattle/diseases/yersiniosis.md deleted file mode 100644 index c7b9c7b1c920929a23768f938ff1cf55a5774c08..0000000000000000000000000000000000000000 --- a/spaces/SarthakSidhant/Go-Cattle/diseases/yersiniosis.md +++ /dev/null @@ -1,41 +0,0 @@ -## Yersiniosis - -**Information** : Yersiniosis is a bacterial infection caused by a bacterium called Yersinia enterocolitica. Yersinia enterocolitica is a Gram-negative, rod-shaped bacterium that can be found in the environment, in animals, and in humans. - - -**Symptoms** - -The symptoms of yersiniosis in cattle can vary depending on the strain of Yersinia and the animal's individual immune response. Some infected cattle may show no symptoms at all, while others may develop a range of symptoms, including: - -* Fever -* Diarrhea -* Vomiting -* Depression -* Weight loss -* Death - -**Remedies** - -There is no specific treatment for yersiniosis. Treatment is usually supportive and may include: - -* Administering fluids and electrolytes -* Treating secondary bacterial infections -* Administering antibiotics - -**Causes** - -Yersiniosis is caused by a bacterium called Yersinia enterocolitica. Yersinia enterocolitica is a Gram-negative, rod-shaped bacterium that can be found in the environment, in animals, and in humans. Cattle can become infected with Yersinia enterocolitica through contact with infected animals or their feces, contaminated feed or water, or through contact with contaminated surfaces. - -**Prevention** - -There is no vaccine available for yersiniosis in cattle. However, there are some preventive measures that can be taken to reduce the risk of infection, such as: - -* Practicing good hygiene and biosecurity measures -* Vaccinating cattle against other diseases that can weaken the immune system, such as bovine viral diarrhea virus (BVDV) and rotavirus -* Testing cattle for Yersinia infection -* Isolating infected animals from healthy animals -* Treating contaminated feed and water - -**Differential diagnosis** - -Yersiniosis can be difficult to distinguish from other diseases that cause diarrhea, such as salmonella, campylobacter, and coliform bacteria. A veterinarian can diagnose yersiniosis by testing a sample of the feces for the presence of Yersinia enterocolitica. diff --git a/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/app.py b/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/app.py deleted file mode 100644 index 2beba810e6b4eca862f7cc881f95bef0846304fc..0000000000000000000000000000000000000000 --- a/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/garage-bAInd/Platypus2-70B").launch() \ No newline at end of file diff --git a/spaces/SeViLA/SeViLA/lavis/models/blip2_models/modeling_opt.py b/spaces/SeViLA/SeViLA/lavis/models/blip2_models/modeling_opt.py deleted file mode 100644 index 1d4077c83a706825131be82702deba5e344b87e0..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/blip2_models/modeling_opt.py +++ /dev/null @@ -1,1113 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch OPT model.""" -import random -from typing import List, Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -from transformers.activations import ACT2FN -from transformers.modeling_outputs import ( - BaseModelOutputWithPast, - CausalLMOutputWithPast, -) -from transformers.modeling_utils import PreTrainedModel -from transformers.utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from transformers.models.opt.configuration_opt import OPTConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "facebook/opt-350m" -_CONFIG_FOR_DOC = "OPTConfig" -_TOKENIZER_FOR_DOC = "GPT2Tokenizer" - -# Base model docstring -_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] - -# SequenceClassification docstring -_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc" -_SEQ_CLASS_EXPECTED_LOSS = 1.71 -_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" - -# QuestionAnswering docstring -_QA_EXPECTED_OUTPUT = "'a nice puppet'" -_QA_EXPECTED_LOSS = 7.41 -_QA_TARGET_START_INDEX = 14 -_QA_TARGET_END_INDEX = 15 - -OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "facebook/opt-125m", - "facebook/opt-350m", - "facebook/opt-1.3b", - "facebook/opt-2.7b", - "facebook/opt-6.7b", - "facebook/opt-13b", - "facebook/opt-30b", - # See all OPT models at https://huggingface.co/models?filter=opt -] - - -def _make_causal_mask( - input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0 -): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) - mask_cond = torch.arange(mask.size(-1)) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) - mask = mask.to(dtype) - - if past_key_values_length > 0: - mask = torch.cat( - [torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1 - ) - return mask[None, None, :, :].expand( - bsz, 1, tgt_len, tgt_len + past_key_values_length - ) - - -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill( - inverted_mask.to(torch.bool), torch.finfo(dtype).min - ) - - -class OPTLearnedPositionalEmbedding(nn.Embedding): - """ - This module learns positional embeddings up to a fixed maximum size. - """ - - def __init__(self, num_embeddings: int, embedding_dim: int): - # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 - # and adjust num_embeddings appropriately. Other models don't have this hack - self.offset = 2 - super().__init__(num_embeddings + self.offset, embedding_dim) - - def forward( - self, attention_mask: torch.LongTensor, past_key_values_length: int = 0 - ): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - attention_mask = attention_mask.long() - - # create positions depending on attention_mask - positions = ( - torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask - ).long() - 1 - - # cut positions if `past_key_values_length` is > 0 - positions = positions[:, past_key_values_length:] - - return super().forward(positions + self.offset) - - -class OPTAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - is_decoder: bool = False, - bias: bool = True, - ): - super().__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - - if (self.head_dim * num_heads) != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" - f" and `num_heads`: {num_heads})." - ) - self.scaling = self.head_dim**-0.5 - self.is_decoder = is_decoder - - self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return ( - tensor.view(bsz, seq_len, self.num_heads, self.head_dim) - .transpose(1, 2) - .contiguous() - ) - - def forward( - self, - hidden_states: torch.Tensor, - key_value_states: Optional[torch.Tensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - attention_mask: Optional[torch.Tensor] = None, - layer_head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - """Input shape: Batch x Time x Channel""" - - # if key_value_states are provided this layer is used as a cross-attention layer - # for the decoder - is_cross_attention = key_value_states is not None - - bsz, tgt_len, _ = hidden_states.size() - - # get query proj - query_states = self.q_proj(hidden_states) * self.scaling - # get key, value proj - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_states = past_key_value[0] - value_states = past_key_value[1] - elif is_cross_attention: - # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states), -1, bsz) - elif past_key_value is not None: - # reuse k, v, self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - else: - # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - if self.is_decoder: - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_states, value_states) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_states = value_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" - ) - attn_weights = ( - attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - + attention_mask - ) - attn_weights = torch.max( - attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) - ) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 - if attn_weights.dtype == torch.float16: - attn_weights = nn.functional.softmax( - attn_weights, dim=-1, dtype=torch.float32 - ).to(torch.float16) - else: - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - - if layer_head_mask is not None: - if layer_head_mask.size() != (self.num_heads,): - raise ValueError( - f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" - f" {layer_head_mask.size()}" - ) - attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( - bsz, self.num_heads, tgt_len, src_len - ) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if output_attentions: - # this operation is a bit awkward, but it's required to - # make sure that attn_weights keeps its gradient. - # In order to do so, attn_weights have to be reshaped - # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view( - bsz, self.num_heads, tgt_len, src_len - ) - attn_weights = attn_weights_reshaped.view( - bsz * self.num_heads, tgt_len, src_len - ) - else: - attn_weights_reshaped = None - - attn_probs = nn.functional.dropout( - attn_weights, p=self.dropout, training=self.training - ) - - attn_output = torch.bmm(attn_probs, value_states) - - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output = attn_output.transpose(1, 2) - - # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be - # partitioned aross GPUs when using tensor-parallelism. - attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, attn_weights_reshaped, past_key_value - - -class OPTDecoderLayer(nn.Module): - def __init__(self, config: OPTConfig): - super().__init__() - self.embed_dim = config.hidden_size - self.self_attn = OPTAttention( - embed_dim=self.embed_dim, - num_heads=config.num_attention_heads, - dropout=config.attention_dropout, - is_decoder=True, - ) - self.do_layer_norm_before = config.do_layer_norm_before - self.dropout = config.dropout - self.activation_fn = ACT2FN[config.activation_function] - - self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) - self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) - self.final_layer_norm = nn.LayerNorm(self.embed_dim) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - layer_head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - ) -> Tuple[ - torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] - ]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size - `(encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - """ - - residual = hidden_states - - # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention - if self.do_layer_norm_before: - hidden_states = self.self_attn_layer_norm(hidden_states) - - # Self Attention - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - past_key_value=past_key_value, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - output_attentions=output_attentions, - ) - hidden_states = nn.functional.dropout( - hidden_states, p=self.dropout, training=self.training - ) - hidden_states = residual + hidden_states - - # 350m applies layer norm AFTER attention - if not self.do_layer_norm_before: - hidden_states = self.self_attn_layer_norm(hidden_states) - - # Fully Connected - hidden_states_shape = hidden_states.shape - hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) - residual = hidden_states - - # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention - if self.do_layer_norm_before: - hidden_states = self.final_layer_norm(hidden_states) - - hidden_states = self.fc1(hidden_states) - hidden_states = self.activation_fn(hidden_states) - - hidden_states = self.fc2(hidden_states) - hidden_states = nn.functional.dropout( - hidden_states, p=self.dropout, training=self.training - ) - - hidden_states = (residual + hidden_states).view(hidden_states_shape) - - # 350m applies layer norm AFTER attention - if not self.do_layer_norm_before: - hidden_states = self.final_layer_norm(hidden_states) - - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - - return outputs - - -OPT_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`OPTConfig`]): - Model configuration class with all the parameters of the model. Initializing with a config file does not - load the weights associated with the model, only the configuration. Check out the - [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - - -@add_start_docstrings( - "The bare OPT Model outputting raw hidden-states without any specific head on top.", - OPT_START_DOCSTRING, -) -class OPTPreTrainedModel(PreTrainedModel): - - config_class = OPTConfig - base_model_prefix = "model" - supports_gradient_checkpointing = True - _no_split_modules = ["OPTDecoderLayer"] - _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] - - def _init_weights(self, module): - std = self.config.init_std - if isinstance(module, nn.Linear): - module.weight.data.normal_(mean=0.0, std=std) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=std) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (OPTDecoder)): - module.gradient_checkpointing = value - - -OPT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. - - Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - - Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see - `past_key_values`). - - If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] - and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more - information on the default strategy. - head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape - `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape - `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. - - Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention - blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -class OPTDecoder(OPTPreTrainedModel): - """ - Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] - - Args: - config: OPTConfig - """ - - def __init__(self, config: OPTConfig): - super().__init__(config) - self.dropout = config.dropout - self.layerdrop = config.layerdrop - self.padding_idx = config.pad_token_id - self.max_target_positions = config.max_position_embeddings - self.vocab_size = config.vocab_size - - self.embed_tokens = nn.Embedding( - config.vocab_size, config.word_embed_proj_dim, self.padding_idx - ) - self.embed_positions = OPTLearnedPositionalEmbedding( - config.max_position_embeddings, config.hidden_size - ) - - if config.word_embed_proj_dim != config.hidden_size: - self.project_out = nn.Linear( - config.hidden_size, config.word_embed_proj_dim, bias=False - ) - else: - self.project_out = None - - if config.word_embed_proj_dim != config.hidden_size: - self.project_in = nn.Linear( - config.word_embed_proj_dim, config.hidden_size, bias=False - ) - else: - self.project_in = None - - # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility - # with checkpoints that have been fine-tuned before transformers v4.20.1 - # see https://github.com/facebookresearch/metaseq/pull/164 - if config.do_layer_norm_before and not config._remove_final_layer_norm: - self.final_layer_norm = nn.LayerNorm(config.hidden_size) - else: - self.final_layer_norm = None - - self.layers = nn.ModuleList( - [OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)] - ) - - self.gradient_checkpointing = False - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value - - # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask - def _prepare_decoder_attention_mask( - self, attention_mask, input_shape, inputs_embeds, past_key_values_length - ): - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - combined_attention_mask = None - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask( - input_shape, - inputs_embeds.dtype, - past_key_values_length=past_key_values_length, - ).to(inputs_embeds.device) - - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask( - attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] - ).to(inputs_embeds.device) - combined_attention_mask = ( - expanded_attn_mask - if combined_attention_mask is None - else expanded_attn_mask + combined_attention_mask - ) - - return combined_attention_mask - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - query_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPast]: - r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of - shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of - - Contains pre-computed hidden-states (key and values in the self-attention blocks and in the - cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those - that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of - all `decoder_input_ids` of shape `(batch_size, sequence_length)`. - - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError( - "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" - ) - elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError( - "You have to specify either decoder_input_ids or decoder_inputs_embeds" - ) - - past_key_values_length = ( - past_key_values[0][0].shape[2] if past_key_values is not None else 0 - ) - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) - - if query_embeds is not None: - inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1) - input_shape = inputs_embeds.size()[:-1] - - # embed positions - if attention_mask is None: - attention_mask = torch.ones( - inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device - ) - pos_embeds = self.embed_positions(attention_mask, past_key_values_length) - - attention_mask = self._prepare_decoder_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) - - if self.project_in is not None: - inputs_embeds = self.project_in(inputs_embeds) - - hidden_states = inputs_embeds + pos_embeds - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - next_decoder_cache = () if use_cache else None - - # check if head_mask has a correct number of layers specified if desired - for attn_mask, mask_name in zip([head_mask], ["head_mask"]): - if attn_mask is not None: - if attn_mask.size()[0] != (len(self.layers)): - raise ValueError( - f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" - f" {head_mask.size()[0]}." - ) - - for idx, decoder_layer in enumerate(self.layers): - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - if output_hidden_states: - all_hidden_states += (hidden_states,) - - dropout_probability = random.uniform(0, 1) - if self.training and (dropout_probability < self.layerdrop): - continue - - past_key_value = ( - past_key_values[idx] if past_key_values is not None else None - ) - - if self.gradient_checkpointing and self.training: - - if use_cache: - logger.warning( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, output_attentions, None) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(decoder_layer), - hidden_states, - attention_mask, - head_mask[idx] if head_mask is not None else None, - None, - ) - else: - - layer_outputs = decoder_layer( - hidden_states, - attention_mask=attention_mask, - layer_head_mask=(head_mask[idx] if head_mask is not None else None), - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - ) - - hidden_states = layer_outputs[0] - - if use_cache: - next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) - - if output_attentions: - all_self_attns += (layer_outputs[1],) - - if self.final_layer_norm is not None: - hidden_states = self.final_layer_norm(hidden_states) - - if self.project_out is not None: - hidden_states = self.project_out(hidden_states) - - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - - next_cache = next_decoder_cache if use_cache else None - if not return_dict: - return tuple( - v - for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] - if v is not None - ) - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=next_cache, - hidden_states=all_hidden_states, - attentions=all_self_attns, - ) - - -@add_start_docstrings( - "The bare OPT Model outputting raw hidden-states without any specific head on top.", - OPT_START_DOCSTRING, -) -class OPTModel(OPTPreTrainedModel): - def __init__(self, config: OPTConfig): - super().__init__(config) - self.decoder = OPTDecoder(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.decoder.embed_tokens - - def set_input_embeddings(self, value): - self.decoder.embed_tokens = value - - def get_decoder(self): - return self.decoder - - @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPast, - config_class=_CONFIG_FOR_DOC, - expected_output=_EXPECTED_OUTPUT_SHAPE, - ) - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - query_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPast]: - - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) - decoder_outputs = self.decoder( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - query_embeds=query_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - if not return_dict: - return decoder_outputs - - return BaseModelOutputWithPast( - last_hidden_state=decoder_outputs.last_hidden_state, - past_key_values=decoder_outputs.past_key_values, - hidden_states=decoder_outputs.hidden_states, - attentions=decoder_outputs.attentions, - ) - - -class OPTForCausalLM(OPTPreTrainedModel): - _keys_to_ignore_on_load_missing = [r"lm_head.weight"] - - def __init__(self, config): - super().__init__(config) - self.model = OPTModel(config) - - # the lm_head weight is automatically tied to the embed tokens weight - self.lm_head = nn.Linear( - config.word_embed_proj_dim, config.vocab_size, bias=False - ) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.model.decoder.embed_tokens - - def set_input_embeddings(self, value): - self.model.decoder.embed_tokens = value - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings): - self.lm_head = new_embeddings - - def set_decoder(self, decoder): - self.model.decoder = decoder - - def get_decoder(self): - return self.model.decoder - - @replace_return_docstrings( - output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC - ) - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - query_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - reduction: Optional[str] = "mean", - ) -> Union[Tuple, CausalLMOutputWithPast]: - r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of - shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of - shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional - tensors are only required when the model is used as a decoder in a Sequence to Sequence model. - - Contains pre-computed hidden-states (key and values in the self-attention blocks and in the - cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those - that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of - all `decoder_input_ids` of shape `(batch_size, sequence_length)`. - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., - config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored - (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - - Returns: - - Example: - - ```python - >>> from transformers import GPT2Tokenizer, OPTForCausalLM - - >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") - >>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") - - >>> prompt = "Hey, are you consciours? Can you talk to me?" - >>> inputs = tokenizer(prompt, return_tensors="pt") - - >>> # Generate - >>> generate_ids = model.generate(inputs.input_ids, max_length=30) - >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] - "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." - ```""" - - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - outputs = self.model.decoder( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - query_embeds=query_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - logits = self.lm_head(outputs[0]).contiguous() - - loss = None - if labels is not None: - logits = logits[:, -labels.size(1) :, :] - - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss(reduction=reduction) - loss = loss_fct( - shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1) - ) - if reduction == "none": - loss = loss.view(shift_logits.size(0), -1).sum(1) - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - def prepare_inputs_for_generation( - self, - input_ids=None, - query_embeds=None, - past=None, - attention_mask=None, - use_cache=None, - **kwargs, - ): - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - if attention_mask is None: - if input_ids is not None: - attention_mask = input_ids.new_ones(input_ids.shape) - if past: - input_ids = input_ids[:, -1:] - query_embeds = None - # first step, decoder_cached_states are empty - return { - "input_ids": input_ids, - "query_embeds": query_embeds, - "attention_mask": attention_mask, - "past_key_values": past, - "use_cache": use_cache, - } - - @staticmethod - def _reorder_cache(past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += ( - tuple( - past_state.index_select(0, beam_idx) for past_state in layer_past - ), - ) - return reordered_past diff --git a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/models/parallel_wavegan.py b/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/models/parallel_wavegan.py deleted file mode 100644 index c63b59f67aa48342179415c1d1beac68574a5498..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/models/parallel_wavegan.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Parallel WaveGAN Modules.""" - -import logging -import math - -import torch -from torch import nn - -from modules.parallel_wavegan.layers import Conv1d -from modules.parallel_wavegan.layers import Conv1d1x1 -from modules.parallel_wavegan.layers import ResidualBlock -from modules.parallel_wavegan.layers import upsample -from modules.parallel_wavegan import models - - -class ParallelWaveGANGenerator(torch.nn.Module): - """Parallel WaveGAN Generator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - aux_channels=80, - aux_context_window=2, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - upsample_conditional_features=True, - upsample_net="ConvInUpsampleNetwork", - upsample_params={"upsample_scales": [4, 4, 4, 4]}, - use_pitch_embed=False, - ): - """Initialize Parallel WaveGAN Generator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - aux_channels (int): Number of channels for auxiliary feature conv. - aux_context_window (int): Context window size for auxiliary feature. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv layer. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - upsample_conditional_features (bool): Whether to use upsampling network. - upsample_net (str): Upsampling network architecture. - upsample_params (dict): Upsampling network parameters. - - """ - super(ParallelWaveGANGenerator, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.aux_channels = aux_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True) - - # define conv + upsampling network - if upsample_conditional_features: - upsample_params.update({ - "use_causal_conv": use_causal_conv, - }) - if upsample_net == "MelGANGenerator": - assert aux_context_window == 0 - upsample_params.update({ - "use_weight_norm": False, # not to apply twice - "use_final_nonlinear_activation": False, - }) - self.upsample_net = getattr(models, upsample_net)(**upsample_params) - else: - if upsample_net == "ConvInUpsampleNetwork": - upsample_params.update({ - "aux_channels": aux_channels, - "aux_context_window": aux_context_window, - }) - self.upsample_net = getattr(upsample, upsample_net)(**upsample_params) - else: - self.upsample_net = None - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=aux_channels, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, skip_channels, bias=True), - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - self.use_pitch_embed = use_pitch_embed - if use_pitch_embed: - self.pitch_embed = nn.Embedding(300, aux_channels, 0) - self.c_proj = nn.Linear(2 * aux_channels, aux_channels) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x, c=None, pitch=None, **kwargs): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, C_in, T). - c (Tensor): Local conditioning auxiliary features (B, C ,T'). - pitch (Tensor): Local conditioning pitch (B, T'). - - Returns: - Tensor: Output tensor (B, C_out, T) - - """ - # perform upsampling - if c is not None and self.upsample_net is not None: - if self.use_pitch_embed: - p = self.pitch_embed(pitch) - c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2) - c = self.upsample_net(c) - assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1)) - - # encode to hidden representation - x = self.first_conv(x) - skips = 0 - for f in self.conv_layers: - x, h = f(x, c) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - - return x - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - @staticmethod - def _get_receptive_field_size(layers, stacks, kernel_size, - dilation=lambda x: 2 ** x): - assert layers % stacks == 0 - layers_per_cycle = layers // stacks - dilations = [dilation(i % layers_per_cycle) for i in range(layers)] - return (kernel_size - 1) * sum(dilations) + 1 - - @property - def receptive_field_size(self): - """Return receptive field size.""" - return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) - - -class ParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=10, - conv_channels=64, - dilation_factor=1, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - bias=True, - use_weight_norm=True, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Number of output channels. - layers (int): Number of conv layers. - conv_channels (int): Number of chnn layers. - dilation_factor (int): Dilation factor. For example, if dilation_factor = 2, - the dilation will be 2, 4, 8, ..., and so on. - nonlinear_activation (str): Nonlinear function after each conv. - nonlinear_activation_params (dict): Nonlinear function parameters - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool) Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - - """ - super(ParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - assert dilation_factor > 0, "Dilation factor must be > 0." - self.conv_layers = torch.nn.ModuleList() - conv_in_channels = in_channels - for i in range(layers - 1): - if i == 0: - dilation = 1 - else: - dilation = i if dilation_factor == 1 else dilation_factor ** i - conv_in_channels = conv_channels - padding = (kernel_size - 1) // 2 * dilation - conv_layer = [ - Conv1d(conv_in_channels, conv_channels, - kernel_size=kernel_size, padding=padding, - dilation=dilation, bias=bias), - getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) - ] - self.conv_layers += conv_layer - padding = (kernel_size - 1) // 2 - last_conv_layer = Conv1d( - conv_in_channels, out_channels, - kernel_size=kernel_size, padding=padding, bias=bias) - self.conv_layers += [last_conv_layer] - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - for f in self.conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - -class ResidualParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - nonlinear_activation_params (dict): Nonlinear function parameters - - """ - super(ResidualParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - - self.in_channels = in_channels - self.out_channels = out_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = torch.nn.Sequential( - Conv1d1x1(in_channels, residual_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - ) - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=-1, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, skip_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - x = self.first_conv(x) - - skips = 0 - for f in self.conv_layers: - x, h = f(x, None) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) diff --git a/spaces/SkyYeXianer/vits-uma-genshin-honkai/Docker/Dockerfile b/spaces/SkyYeXianer/vits-uma-genshin-honkai/Docker/Dockerfile deleted file mode 100644 index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000 --- a/spaces/SkyYeXianer/vits-uma-genshin-honkai/Docker/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM python:3.9-bullseye -VOLUME ["/app"] -WORKDIR /app -# Set apt to Chinese mirror -RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list -RUN apt-get update && apt-get -y install cmake git -RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai -WORKDIR /app/vits-uma-genshin-honkai -RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py -ADD vits.sh /app/vits.sh -EXPOSE 7860 -ENTRYPOINT [ "/app/vits.sh" ] \ No newline at end of file diff --git a/spaces/SouthCity/ShuruiXu/check_proxy.py b/spaces/SouthCity/ShuruiXu/check_proxy.py deleted file mode 100644 index a6919dd37a559d0f3868fdc74b54c488779083d3..0000000000000000000000000000000000000000 --- a/spaces/SouthCity/ShuruiXu/check_proxy.py +++ /dev/null @@ -1,27 +0,0 @@ - -def check_proxy(proxies): - import requests - proxies_https = proxies['https'] if proxies is not None else '无' - try: - response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4) - data = response.json() - print(f'查询代理的地理位置,返回的结果是{data}') - if 'country_name' in data: - country = data['country_name'] - result = f"代理配置 {proxies_https}, 代理所在地:{country}" - elif 'error' in data: - result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限" - print(result) - return result - except: - result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效" - print(result) - return result - - -if __name__ == '__main__': - import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 - from toolbox import get_conf - proxies, = get_conf('proxies') - check_proxy(proxies) - \ No newline at end of file diff --git a/spaces/Sultannn/Text_summarization_with-MBART/app.py b/spaces/Sultannn/Text_summarization_with-MBART/app.py deleted file mode 100644 index dae4861428fc50d93092a63dbc392336f79a86ce..0000000000000000000000000000000000000000 --- a/spaces/Sultannn/Text_summarization_with-MBART/app.py +++ /dev/null @@ -1,101 +0,0 @@ - -#MBART FINE TUNE -#import os -#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' untuk mengatasi error di TF (tidak bisa) - -import gradio as gr -#import tensorflow as tf masih error -import torch #ganti ke pt - -import sentencepiece -from transformers import MBartTokenizer, MBartForConditionalGeneration - -def run_model(input_text, - min_length, - max_length, - length_penalty): - - #MBART Transformer - mbart_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50")#, from_pt=True) untuk TFMbart - mbart_tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-50") - - #encode input to vector - input_text = str(input_text) - input_text = ' '.join(input_text.split()) # hapus white space - input_tokenized = mbart_tokenizer.encode(input_text, return_tensors='pt') - - #generate input - summary_ids = mbart_model.generate(input_tokenized, - length_penalty = length_penalty, #Atur ke nilai <1.0 untuk menghasilkan urutan yang lebih pendek, ke nilai > 1.0 untuk menghasilkan urutan yang lebih panjang) - min_length = min_length, #Panjang minimum urutan yang akan dihasilkan) - max_length = max_length, #Panjang maksimum urutan yang akan dihasilkan)""" - num_beams = 5,#pencarian - no_repeat_ngram_size = 2,#jika diatur ke int > 0, semua ngram dengan ukuran tersebut hanya dapat muncul sekali. - early_stopping = True) - - #decode output to text - output = mbart_tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) - - return output[0] - -# end - -#example - # source Wikipedia -contoh = [["Dota 2 adalah sebuah permainan arena pertarungan daring multipemain, dan merupakan sekuel dari Defense of the Ancients mod pada Warcraft 3: Reign of Chaos dan Warcraft 3: The Frozen Throne. DotA 2 dikembangkan oleh Valve Corporation, terbit juli 2013 dota 2 dapat dimainkan secara gratis pada sistem operasi Microsoft Windows, OS X and Linux. Dota 2 dapat dimainkan secara eksklusif melalui distributor resmi valve, Steam.Dota 2 dimainkan oleh 2 tim yang beranggota 5 orang pemain, setiap tim memiliki markas yang berada dipojok peta, setiap markas memiliki satu bangunan bernama 'Ancient', Di mana tim harus berusaha menghancurkan 'Ancient' tim lawan agar dapat memenangkan pertandingan. Setiap pemain hanya dapat mengontrol satu karakter Hero yang berfokus pada menaikan level, mengumpulkan gold, membeli item dan melawan tim lawan untuk menang.Pengembangan Dota 2 dimulai sejak tahun 2009. Ketika pengembang mod DotA, Icefrog, dipekerjakan oleh Valve sebagai lead designer. Dota 2 dipuji oleh kritikus karena gameplay-nya, kualitas pembuatan dan kesetiaan pada gameplay pendahulu (DotA mod Warcraft 3). tetapi Dota 2 juga menuai kritik sebagai game yang susah dipelajari dan para pemain yang tidak ramah.Sampai pertengahan 2017 Dota 2 menjadi game yang memiliki aktivitas pemain paling banyak di Steam, dengan pucak 800,000 pemain online bersamaan setiap hari", 30, 100, 2], - ["Gangguan jiwa atau penyakit jiwa adalah pola psikologis atau perilaku yang pada umumnya terkait dengan stres atau kelainan jiwa yang tidak dianggap sebagai bagian dari perkembangan normal manusia.[1] Gangguan tersebut didefinisikan sebagai kombinasi afektif, perilaku, komponen kognitif atau persepsi yang berhubungan dengan fungsi tertentu pada daerah otak atau sistem saraf yang menjalankan fungsi sosial manusia. Penemuan dan pengetahuan tentang kondisi kesehatan jiwa telah berubah sepanjang perubahan waktu dan perubahan budaya, dan saat ini masih terdapat perbedaan tentang definisi, penilaan dan klasifikasi, meskipun kriteria pedoman standar telah digunakan secara luas. Lebih dari sepertiga orang di sebagian besar negara-negara melaporkan masalah pada satu waktu pada hidup mereka yang memenuhi kriteria salah satu atau beberapa tipe umum dari kelainan jiwa.", 30, 100, 1]] - -#judul -title = "Text Sumarization id2id" - -#deskripsi -description = "Demo for Text Sumarization id2id. Models are MBART(50 languages)" - -#footer -article = "

    Untuk penjelasan lihat di repo ku 😁

    " - -#run gradio -gr.Interface( - fn=run_model, - #input text - inputs=[ - gr.inputs.Textbox( - lines=7, - placeholder="Ketik disini...", - label="Text", - ), - #fine tune - #min length - gr.inputs.Slider( - minimum=10, - maximum=50, - step=5, - default=20, - label="Min Length(panjang minimal urutan)", - ), - #max length - gr.inputs.Slider( - minimum=100, - maximum=2500, - step=100, - default=300, - label="Max Length(panjang maksimum urutan)", - ), - #length_penalty - gr.inputs.Slider( - minimum=1, - maximum=3, - step=1, - default=1, - label="Length Penalty", - ), - ], - #output text - outputs=gr.outputs.Textbox( - label="Output text", - ), - title=title, - description=description, - article=article, - examples=contoh, - theme = "dark-peach").launch(debug = True) \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/log.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/log.py deleted file mode 100644 index f9dea91ce90f6d0454e98961e104e292fb6c05bd..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/log.py +++ /dev/null @@ -1,5 +0,0 @@ -from warnings import warn - -warn("IPython.utils.log has moved to traitlets.log", stacklevel=2) - -from traitlets.log import * diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/tracking/utils.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/tracking/utils.py deleted file mode 100644 index 78d19984f772c030982402d52307f303b84f98b4..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/tracking/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -from typing import List - -from annotator.oneformer.detectron2.structures import Instances - - -def create_prediction_pairs( - instances: Instances, - prev_instances: Instances, - iou_all: np.ndarray, - threshold: float = 0.5, -) -> List: - """ - Args: - instances: predictions from current frame - prev_instances: predictions from previous frame - iou_all: 2D numpy array containing iou for each bbox pair - threshold: below the threshold, doesn't consider the pair of bbox is valid - Return: - List of bbox pairs - """ - bbox_pairs = [] - for i in range(len(instances)): - for j in range(len(prev_instances)): - if iou_all[i, j] < threshold: - continue - bbox_pairs.append( - { - "idx": i, - "prev_idx": j, - "prev_id": prev_instances.ID[j], - "IoU": iou_all[i, j], - "prev_period": prev_instances.ID_period[j], - } - ) - return bbox_pairs - - -LARGE_COST_VALUE = 100000 diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/__init__.py deleted file mode 100644 index 835df136bdcf69348281d22914d41aa84cdf92b1..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .color import Color, color_val -from .image import imshow, imshow_bboxes, imshow_det_bboxes -from .optflow import flow2rgb, flowshow, make_color_wheel - -__all__ = [ - 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', - 'flowshow', 'flow2rgb', 'make_color_wheel' -] diff --git a/spaces/Surn/UnlimitedMusicGen/audiocraft/models/musicgen.py b/spaces/Surn/UnlimitedMusicGen/audiocraft/models/musicgen.py deleted file mode 100644 index 5001a602c58fa3abc02f1b43d1b40c3c4e60a355..0000000000000000000000000000000000000000 --- a/spaces/Surn/UnlimitedMusicGen/audiocraft/models/musicgen.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Main model for using MusicGen. This will combine all the required components -and provide easy access to the generation API. -""" - -import os -import typing as tp - -import torch - -from .encodec import CompressionModel -from .lm import LMModel -from .builders import get_debug_compression_model, get_debug_lm_model -from .loaders import load_compression_model, load_lm_model, HF_MODEL_CHECKPOINTS_MAP -from ..data.audio_utils import convert_audio -from ..modules.conditioners import ConditioningAttributes, WavCondition -from ..utils.autocast import TorchAutocast - - -MelodyList = tp.List[tp.Optional[torch.Tensor]] -MelodyType = tp.Union[torch.Tensor, MelodyList] - - -class MusicGen: - """MusicGen main model with convenient generation API. - - Args: - name (str): name of the model. - compression_model (CompressionModel): Compression model - used to map audio to invertible discrete representations. - lm (LMModel): Language model over discrete representations. - """ - def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, max_duration: float = 30): - self.name = name - self.compression_model = compression_model - self.lm = lm - self.max_duration = max_duration - self.duration = 15.0 # default duration - self.device = next(iter(lm.parameters())).device - self.generation_params: dict = {} - self.set_generation_params(duration=self.duration) # 15 seconds by default - self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None - if self.device.type == 'cpu': - self.autocast = TorchAutocast(enabled=False) - else: - self.autocast = TorchAutocast( - enabled=True, device_type=self.device.type, dtype=torch.float16) - - @property - def frame_rate(self) -> int: - """Roughly the number of AR steps per seconds.""" - return self.compression_model.frame_rate - - @property - def sample_rate(self) -> int: - """Sample rate of the generated audio.""" - return self.compression_model.sample_rate - - @property - def audio_channels(self) -> int: - """Audio channels of the generated audio.""" - return self.compression_model.channels - - @staticmethod - def get_pretrained(name: str = 'melody', device=None): - """Return pretrained model, we provide four models: - - small (300M), text to music, # see: https://huggingface.co/facebook/musicgen-small - - medium (1.5B), text to music, # see: https://huggingface.co/facebook/musicgen-medium - - melody (1.5B) text to music and text+melody to music, # see: https://huggingface.co/facebook/musicgen-melody - - large (3.3B), text to music, # see: https://huggingface.co/facebook/musicgen-large - """ - - if device is None: - if torch.cuda.device_count(): - device = 'cuda' - else: - device = 'cpu' - - if name == 'debug': - # used only for unit tests - compression_model = get_debug_compression_model(device) - lm = get_debug_lm_model(device) - return MusicGen(name, compression_model, lm) - - if name not in HF_MODEL_CHECKPOINTS_MAP: - if not os.path.isfile(name) and not os.path.isdir(name): - raise ValueError( - f"{name} is not a valid checkpoint name. " - f"Choose one of {', '.join(HF_MODEL_CHECKPOINTS_MAP.keys())}" - ) - - cache_dir = os.environ.get('MUSICGEN_ROOT', None) - compression_model = load_compression_model(name, device=device, cache_dir=cache_dir) - lm = load_lm_model(name, device=device, cache_dir=cache_dir) - if name == 'melody': - lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True - - return MusicGen(name, compression_model, lm) - - def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, - top_p: float = 0.0, temperature: float = 1.0, - duration: float = 30.0, cfg_coef: float = 3.0, - two_step_cfg: bool = False, extend_stride: float = 18, rep_penalty: float = None): - """Set the generation parameters for MusicGen. - - Args: - use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. - top_k (int, optional): top_k used for sampling. Defaults to 250. - top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. - temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. - duration (float, optional): Duration of the generated waveform. Defaults to 30.0. - cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. - two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, - instead of batching together the two. This has some impact on how things - are padded but seems to have little impact in practice. - rep_penalty (float, optional): If set, use repetition penalty during generation. Not Implemented. - """ - assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." - self.extend_stride = extend_stride - self.duration = duration - self.generation_params = { - #'max_gen_len': int(duration * self.frame_rate), - 'use_sampling': use_sampling, - 'temp': temperature, - 'top_k': top_k, - 'top_p': top_p, - 'cfg_coef': cfg_coef, - 'two_step_cfg': two_step_cfg, - } - - def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): - """Override the default progress callback.""" - self._progress_callback = progress_callback - - def generate_unconditional(self, num_samples: int, progress: bool = False) -> torch.Tensor: - """Generate samples in an unconditional manner. - - Args: - num_samples (int): Number of samples to be generated. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - descriptions: tp.List[tp.Optional[str]] = [None] * num_samples - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, - melody_sample_rate: int, progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text and melody. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as - melody conditioning. Should have shape [B, C, T] with B matching the description length, - C=1 or 2. It can be [C, T] if there is a single description. It can also be - a list of [C, T] tensors. - melody_sample_rate: (int): Sample rate of the melody waveforms. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if isinstance(melody_wavs, torch.Tensor): - if melody_wavs.dim() == 2: - melody_wavs = melody_wavs[None] - if melody_wavs.dim() != 3: - raise ValueError("Melody wavs should have a shape [B, C, T].") - melody_wavs = list(melody_wavs) - else: - for melody in melody_wavs: - if melody is not None: - assert melody.dim() == 2, "One melody in the list has the wrong number of dims." - - melody_wavs = [ - convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels) - if wav is not None else None - for wav in melody_wavs] - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, - melody_wavs=melody_wavs) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_with_all(self, descriptions: tp.List[str], melody_wavs: MelodyType, - sample_rate: int, progress: bool = False, prompt: tp.Optional[torch.Tensor] = None) -> torch.Tensor: - """Generate samples conditioned on text and melody and audio prompts. - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as - melody conditioning. Should have shape [B, C, T] with B matching the description length, - C=1 or 2. It can be [C, T] if there is a single description. It can also be - a list of [C, T] tensors. - sample_rate: (int): Sample rate of the melody waveforms. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - """ - if isinstance(melody_wavs, torch.Tensor): - if melody_wavs.dim() == 2: - melody_wavs = melody_wavs[None] - if melody_wavs.dim() != 3: - raise ValueError("Melody wavs should have a shape [B, C, T].") - melody_wavs = list(melody_wavs) - else: - for melody in melody_wavs: - if melody is not None: - assert melody.dim() == 2, "One melody in the list has the wrong number of dims." - - melody_wavs = [ - convert_audio(wav, sample_rate, self.sample_rate, self.audio_channels) - if wav is not None else None - for wav in melody_wavs] - #attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, - # melody_wavs=melody_wavs) - - if prompt is not None: - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - - #if prompt is not None: - # attributes_gen, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, - melody_wavs=melody_wavs) - if prompt is not None: - assert prompt_tokens is not None - else: - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, - descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, - progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on audio prompts. - - Args: - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - prompt_sample_rate (int): Sampling rate of the given audio waveforms. - descriptions (tp.List[str], optional): A list of strings used as text conditioning. Defaults to None. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - assert prompt_tokens is not None - return self._generate_tokens(attributes, prompt_tokens, progress) - - @torch.no_grad() - def _prepare_tokens_and_attributes( - self, - descriptions: tp.Sequence[tp.Optional[str]], - prompt: tp.Optional[torch.Tensor], - melody_wavs: tp.Optional[MelodyList] = None, - ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: - """Prepare model inputs. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - prompt (torch.Tensor): A batch of waveforms used for continuation. - melody_wavs (tp.Optional[torch.Tensor], optional): A batch of waveforms - used as melody conditioning. Defaults to None. - """ - attributes = [ - ConditioningAttributes(text={'description': description}) - for description in descriptions] - - if melody_wavs is None: - for attr in attributes: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - if self.name != "melody": - raise RuntimeError("This model doesn't support melody conditioning. " - "Use the `melody` model.") - assert len(melody_wavs) == len(descriptions), \ - f"number of melody wavs must match number of descriptions! " \ - f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}" - for attr, melody in zip(attributes, melody_wavs): - if melody is None: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - attr.wav['self_wav'] = WavCondition( - melody.to(device=self.device), - torch.tensor([melody.shape[-1]], device=self.device)) - - if prompt is not None: - if descriptions is not None: - assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" - prompt = prompt.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt) - assert scale is None - else: - prompt_tokens = None - return attributes, prompt_tokens - - def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - """Generate discrete audio tokens given audio prompt and/or conditions. - - Args: - attributes (tp.List[ConditioningAttributes]): Conditions used for generation (text/melody). - prompt_tokens (tp.Optional[torch.Tensor]): Audio prompt used for continuation. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - Returns: - torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - """ - total_gen_len = int(self.duration * self.frame_rate) - max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) - current_gen_offset: int = 0 - - def _progress_callback(generated_tokens: int, tokens_to_generate: int): - generated_tokens += current_gen_offset - if self._progress_callback is not None: - # Note that total_gen_len might be quite wrong depending on the - # codebook pattern used, but with delay it is almost accurate. - self._progress_callback(generated_tokens, total_gen_len) - else: - print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') - - if prompt_tokens is not None: - assert max_prompt_len >= prompt_tokens.shape[-1], \ - "Prompt is longer than audio to generate" - - callback = None - if progress: - callback = _progress_callback - - if self.duration <= self.max_duration: - # generate by sampling from LM, simple case. - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=total_gen_len, **self.generation_params) - - else: - # now this gets a bit messier, we need to handle prompts, - # melody conditioning etc. - ref_wavs = [attr.wav['self_wav'] for attr in attributes] - all_tokens = [] - if prompt_tokens is None: - prompt_length = 0 - else: - all_tokens.append(prompt_tokens) - prompt_length = prompt_tokens.shape[-1] - - stride_tokens = int(self.frame_rate * self.extend_stride) - - while current_gen_offset + prompt_length < total_gen_len: - time_offset = current_gen_offset / self.frame_rate - chunk_duration = min(self.duration - time_offset, self.max_duration) - max_gen_len = int(chunk_duration * self.frame_rate) - for attr, ref_wav in zip(attributes, ref_wavs): - wav_length = ref_wav.length.item() - if wav_length == 0: - continue - # We will extend the wav periodically if it not long enough. - # we have to do it here rather than in conditioners.py as otherwise - # we wouldn't have the full wav. - initial_position = int(time_offset * self.sample_rate) - wav_target_length = int(self.max_duration * self.sample_rate) - print(initial_position / self.sample_rate, wav_target_length / self.sample_rate) - positions = torch.arange(initial_position, - initial_position + wav_target_length, device=self.device) - attr.wav['self_wav'] = WavCondition( - ref_wav[0][:, positions % wav_length], - torch.full_like(ref_wav[1], wav_target_length)) - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=max_gen_len, **self.generation_params) - if prompt_tokens is None: - all_tokens.append(gen_tokens) - else: - all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) - prompt_tokens = gen_tokens[:, :, stride_tokens:] - prompt_length = prompt_tokens.shape[-1] - current_gen_offset += stride_tokens - - gen_tokens = torch.cat(all_tokens, dim=-1) - - # generate audio - assert gen_tokens.dim() == 3 - with torch.no_grad(): - gen_audio = self.compression_model.decode(gen_tokens, None) - return gen_audio - - #def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - # prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - # """Generate discrete audio tokens given audio prompt and/or conditions. - - # Args: - # attributes (tp.List[ConditioningAttributes]): Conditions used for generation (text/melody). - # prompt_tokens (tp.Optional[torch.Tensor]): Audio prompt used for continuation. - # progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - # Returns: - # torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - # """ - # def _progress_callback(generated_tokens: int, tokens_to_generate: int): - # print(f'{generated_tokens: 6d} / {tokens_to_generate: 6d}', end='\r') - - # if prompt_tokens is not None: - # assert self.generation_params['max_gen_len'] > prompt_tokens.shape[-1], \ - # "Prompt is longer than audio to generate" - - # callback = None - # if progress: - # callback = _progress_callback - - # # generate by sampling from LM - # with self.autocast: - # gen_tokens = self.lm.generate(prompt_tokens, attributes, callback=callback, **self.generation_params) - - # # generate audio - # assert gen_tokens.dim() == 3 - # with torch.no_grad(): - # gen_audio = self.compression_model.decode(gen_tokens, None) - # return gen_audio - - def to(self, device: str): - self.compression_model.to(device) - self.lm.to(device) - return self \ No newline at end of file diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/windows.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/windows.py deleted file mode 100644 index b52c9c6ea89fc6859fbf3e489072c1b3b0af77fc..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/windows.py +++ /dev/null @@ -1,255 +0,0 @@ -"""Windows.""" -from __future__ import annotations - -import ctypes -import os -import sys -from functools import lru_cache -from typing import TYPE_CHECKING - -from .api import PlatformDirsABC - -if TYPE_CHECKING: - from collections.abc import Callable - - -class Windows(PlatformDirsABC): - """ - `MSDN on where to store app data files - `_. - Makes use of the - `appname `, - `appauthor `, - `version `, - `roaming `, - `opinion `, - `ensure_exists `. - """ - - @property - def user_data_dir(self) -> str: - """ - :return: data directory tied to the user, e.g. - ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or - ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) - """ - const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(get_win_folder(const)) - return self._append_parts(path) - - def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: - params = [] - if self.appname: - if self.appauthor is not False: - author = self.appauthor or self.appname - params.append(author) - params.append(self.appname) - if opinion_value is not None and self.opinion: - params.append(opinion_value) - if self.version: - params.append(self.version) - path = os.path.join(path, *params) # noqa: PTH118 - self._optionally_create_directory(path) - return path - - @property - def site_data_dir(self) -> str: - """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" - path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) - return self._append_parts(path) - - @property - def user_config_dir(self) -> str: - """:return: config directory tied to the user, same as `user_data_dir`""" - return self.user_data_dir - - @property - def site_config_dir(self) -> str: - """:return: config directory shared by the users, same as `site_data_dir`""" - return self.site_data_dir - - @property - def user_cache_dir(self) -> str: - """ - :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. - ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` - """ - path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) - return self._append_parts(path, opinion_value="Cache") - - @property - def site_cache_dir(self) -> str: - """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``""" - path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) - return self._append_parts(path, opinion_value="Cache") - - @property - def user_state_dir(self) -> str: - """:return: state directory tied to the user, same as `user_data_dir`""" - return self.user_data_dir - - @property - def user_log_dir(self) -> str: - """:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it""" - path = self.user_data_dir - if self.opinion: - path = os.path.join(path, "Logs") # noqa: PTH118 - self._optionally_create_directory(path) - return path - - @property - def user_documents_dir(self) -> str: - """:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``""" - return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) - - @property - def user_downloads_dir(self) -> str: - """:return: downloads directory tied to the user e.g. ``%USERPROFILE%\\Downloads``""" - return os.path.normpath(get_win_folder("CSIDL_DOWNLOADS")) - - @property - def user_pictures_dir(self) -> str: - """:return: pictures directory tied to the user e.g. ``%USERPROFILE%\\Pictures``""" - return os.path.normpath(get_win_folder("CSIDL_MYPICTURES")) - - @property - def user_videos_dir(self) -> str: - """:return: videos directory tied to the user e.g. ``%USERPROFILE%\\Videos``""" - return os.path.normpath(get_win_folder("CSIDL_MYVIDEO")) - - @property - def user_music_dir(self) -> str: - """:return: music directory tied to the user e.g. ``%USERPROFILE%\\Music``""" - return os.path.normpath(get_win_folder("CSIDL_MYMUSIC")) - - @property - def user_runtime_dir(self) -> str: - """ - :return: runtime directory tied to the user, e.g. - ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` - """ - path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) # noqa: PTH118 - return self._append_parts(path) - - -def get_win_folder_from_env_vars(csidl_name: str) -> str: - """Get folder from environment variables.""" - result = get_win_folder_if_csidl_name_not_env_var(csidl_name) - if result is not None: - return result - - env_var_name = { - "CSIDL_APPDATA": "APPDATA", - "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", - "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", - }.get(csidl_name) - if env_var_name is None: - msg = f"Unknown CSIDL name: {csidl_name}" - raise ValueError(msg) - result = os.environ.get(env_var_name) - if result is None: - msg = f"Unset environment variable: {env_var_name}" - raise ValueError(msg) - return result - - -def get_win_folder_if_csidl_name_not_env_var(csidl_name: str) -> str | None: - """Get folder for a CSIDL name that does not exist as an environment variable.""" - if csidl_name == "CSIDL_PERSONAL": - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") # noqa: PTH118 - - if csidl_name == "CSIDL_DOWNLOADS": - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Downloads") # noqa: PTH118 - - if csidl_name == "CSIDL_MYPICTURES": - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Pictures") # noqa: PTH118 - - if csidl_name == "CSIDL_MYVIDEO": - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Videos") # noqa: PTH118 - - if csidl_name == "CSIDL_MYMUSIC": - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Music") # noqa: PTH118 - return None - - -def get_win_folder_from_registry(csidl_name: str) -> str: - """ - Get folder from the registry. - - This is a fallback technique at best. I'm not sure if using the registry for these guarantees us the correct answer - for all CSIDL_* names. - """ - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - "CSIDL_PERSONAL": "Personal", - "CSIDL_DOWNLOADS": "{374DE290-123F-4565-9164-39C4925E467B}", - "CSIDL_MYPICTURES": "My Pictures", - "CSIDL_MYVIDEO": "My Video", - "CSIDL_MYMUSIC": "My Music", - }.get(csidl_name) - if shell_folder_name is None: - msg = f"Unknown CSIDL name: {csidl_name}" - raise ValueError(msg) - if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows - raise NotImplementedError - import winreg - - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") - directory, _ = winreg.QueryValueEx(key, shell_folder_name) - return str(directory) - - -def get_win_folder_via_ctypes(csidl_name: str) -> str: - """Get folder with ctypes.""" - # There is no 'CSIDL_DOWNLOADS'. - # Use 'CSIDL_PROFILE' (40) and append the default folder 'Downloads' instead. - # https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - "CSIDL_PERSONAL": 5, - "CSIDL_MYPICTURES": 39, - "CSIDL_MYVIDEO": 14, - "CSIDL_MYMUSIC": 13, - "CSIDL_DOWNLOADS": 40, - }.get(csidl_name) - if csidl_const is None: - msg = f"Unknown CSIDL name: {csidl_name}" - raise ValueError(msg) - - buf = ctypes.create_unicode_buffer(1024) - windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker - windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if it has highbit chars. - if any(ord(c) > 255 for c in buf): # noqa: PLR2004 - buf2 = ctypes.create_unicode_buffer(1024) - if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - if csidl_name == "CSIDL_DOWNLOADS": - return os.path.join(buf.value, "Downloads") # noqa: PTH118 - - return buf.value - - -def _pick_get_win_folder() -> Callable[[str], str]: - if hasattr(ctypes, "windll"): - return get_win_folder_via_ctypes - try: - import winreg # noqa: F401 - except ImportError: - return get_win_folder_from_env_vars - else: - return get_win_folder_from_registry - - -get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) - -__all__ = [ - "Windows", -] diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/fancy_getopt.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/fancy_getopt.py deleted file mode 100644 index 3b887dc5a41e550047477a66a3b4838d9ef2f515..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/fancy_getopt.py +++ /dev/null @@ -1,470 +0,0 @@ -"""distutils.fancy_getopt - -Wrapper around the standard getopt module that provides the following -additional features: - * short and long options are tied together - * options have help strings, so fancy_getopt could potentially - create a complete usage summary - * options set attributes of a passed-in object -""" - -import sys -import string -import re -import getopt -from .errors import DistutilsGetoptError, DistutilsArgError - -# Much like command_re in distutils.core, this is close to but not quite -# the same as a Python NAME -- except, in the spirit of most GNU -# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!) -# The similarities to NAME are again not a coincidence... -longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)' -longopt_re = re.compile(r'^%s$' % longopt_pat) - -# For recognizing "negative alias" options, eg. "quiet=!verbose" -neg_alias_re = re.compile("^({})=!({})$".format(longopt_pat, longopt_pat)) - -# This is used to translate long options to legitimate Python identifiers -# (for use as attributes of some object). -longopt_xlate = str.maketrans('-', '_') - - -class FancyGetopt: - """Wrapper around the standard 'getopt()' module that provides some - handy extra functionality: - * short and long options are tied together - * options have help strings, and help text can be assembled - from them - * options set attributes of a passed-in object - * boolean options can have "negative aliases" -- eg. if - --quiet is the "negative alias" of --verbose, then "--quiet" - on the command line sets 'verbose' to false - """ - - def __init__(self, option_table=None): - # The option table is (currently) a list of tuples. The - # tuples may have 3 or four values: - # (long_option, short_option, help_string [, repeatable]) - # if an option takes an argument, its long_option should have '=' - # appended; short_option should just be a single character, no ':' - # in any case. If a long_option doesn't have a corresponding - # short_option, short_option should be None. All option tuples - # must have long options. - self.option_table = option_table - - # 'option_index' maps long option names to entries in the option - # table (ie. those 3-tuples). - self.option_index = {} - if self.option_table: - self._build_index() - - # 'alias' records (duh) alias options; {'foo': 'bar'} means - # --foo is an alias for --bar - self.alias = {} - - # 'negative_alias' keeps track of options that are the boolean - # opposite of some other option - self.negative_alias = {} - - # These keep track of the information in the option table. We - # don't actually populate these structures until we're ready to - # parse the command-line, since the 'option_table' passed in here - # isn't necessarily the final word. - self.short_opts = [] - self.long_opts = [] - self.short2long = {} - self.attr_name = {} - self.takes_arg = {} - - # And 'option_order' is filled up in 'getopt()'; it records the - # original order of options (and their values) on the command-line, - # but expands short options, converts aliases, etc. - self.option_order = [] - - def _build_index(self): - self.option_index.clear() - for option in self.option_table: - self.option_index[option[0]] = option - - def set_option_table(self, option_table): - self.option_table = option_table - self._build_index() - - def add_option(self, long_option, short_option=None, help_string=None): - if long_option in self.option_index: - raise DistutilsGetoptError( - "option conflict: already an option '%s'" % long_option - ) - else: - option = (long_option, short_option, help_string) - self.option_table.append(option) - self.option_index[long_option] = option - - def has_option(self, long_option): - """Return true if the option table for this parser has an - option with long name 'long_option'.""" - return long_option in self.option_index - - def get_attr_name(self, long_option): - """Translate long option name 'long_option' to the form it - has as an attribute of some object: ie., translate hyphens - to underscores.""" - return long_option.translate(longopt_xlate) - - def _check_alias_dict(self, aliases, what): - assert isinstance(aliases, dict) - for alias, opt in aliases.items(): - if alias not in self.option_index: - raise DistutilsGetoptError( - ("invalid %s '%s': " "option '%s' not defined") - % (what, alias, alias) - ) - if opt not in self.option_index: - raise DistutilsGetoptError( - ("invalid %s '%s': " "aliased option '%s' not defined") - % (what, alias, opt) - ) - - def set_aliases(self, alias): - """Set the aliases for this option parser.""" - self._check_alias_dict(alias, "alias") - self.alias = alias - - def set_negative_aliases(self, negative_alias): - """Set the negative aliases for this option parser. - 'negative_alias' should be a dictionary mapping option names to - option names, both the key and value must already be defined - in the option table.""" - self._check_alias_dict(negative_alias, "negative alias") - self.negative_alias = negative_alias - - def _grok_option_table(self): # noqa: C901 - """Populate the various data structures that keep tabs on the - option table. Called by 'getopt()' before it can do anything - worthwhile. - """ - self.long_opts = [] - self.short_opts = [] - self.short2long.clear() - self.repeat = {} - - for option in self.option_table: - if len(option) == 3: - long, short, help = option - repeat = 0 - elif len(option) == 4: - long, short, help, repeat = option - else: - # the option table is part of the code, so simply - # assert that it is correct - raise ValueError("invalid option tuple: {!r}".format(option)) - - # Type- and value-check the option names - if not isinstance(long, str) or len(long) < 2: - raise DistutilsGetoptError( - ("invalid long option '%s': " "must be a string of length >= 2") - % long - ) - - if not ((short is None) or (isinstance(short, str) and len(short) == 1)): - raise DistutilsGetoptError( - "invalid short option '%s': " - "must a single character or None" % short - ) - - self.repeat[long] = repeat - self.long_opts.append(long) - - if long[-1] == '=': # option takes an argument? - if short: - short = short + ':' - long = long[0:-1] - self.takes_arg[long] = 1 - else: - # Is option is a "negative alias" for some other option (eg. - # "quiet" == "!verbose")? - alias_to = self.negative_alias.get(long) - if alias_to is not None: - if self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid negative alias '%s': " - "aliased option '%s' takes a value" % (long, alias_to) - ) - - self.long_opts[-1] = long # XXX redundant?! - self.takes_arg[long] = 0 - - # If this is an alias option, make sure its "takes arg" flag is - # the same as the option it's aliased to. - alias_to = self.alias.get(long) - if alias_to is not None: - if self.takes_arg[long] != self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid alias '%s': inconsistent with " - "aliased option '%s' (one of them takes a value, " - "the other doesn't" % (long, alias_to) - ) - - # Now enforce some bondage on the long option name, so we can - # later translate it to an attribute name on some object. Have - # to do this a bit late to make sure we've removed any trailing - # '='. - if not longopt_re.match(long): - raise DistutilsGetoptError( - "invalid long option name '%s' " - "(must be letters, numbers, hyphens only" % long - ) - - self.attr_name[long] = self.get_attr_name(long) - if short: - self.short_opts.append(short) - self.short2long[short[0]] = long - - def getopt(self, args=None, object=None): # noqa: C901 - """Parse command-line options in args. Store as attributes on object. - - If 'args' is None or not supplied, uses 'sys.argv[1:]'. If - 'object' is None or not supplied, creates a new OptionDummy - object, stores option values there, and returns a tuple (args, - object). If 'object' is supplied, it is modified in place and - 'getopt()' just returns 'args'; in both cases, the returned - 'args' is a modified copy of the passed-in 'args' list, which - is left untouched. - """ - if args is None: - args = sys.argv[1:] - if object is None: - object = OptionDummy() - created_object = True - else: - created_object = False - - self._grok_option_table() - - short_opts = ' '.join(self.short_opts) - try: - opts, args = getopt.getopt(args, short_opts, self.long_opts) - except getopt.error as msg: - raise DistutilsArgError(msg) - - for opt, val in opts: - if len(opt) == 2 and opt[0] == '-': # it's a short option - opt = self.short2long[opt[1]] - else: - assert len(opt) > 2 and opt[:2] == '--' - opt = opt[2:] - - alias = self.alias.get(opt) - if alias: - opt = alias - - if not self.takes_arg[opt]: # boolean option? - assert val == '', "boolean option can't have value" - alias = self.negative_alias.get(opt) - if alias: - opt = alias - val = 0 - else: - val = 1 - - attr = self.attr_name[opt] - # The only repeating option at the moment is 'verbose'. - # It has a negative option -q quiet, which should set verbose = 0. - if val and self.repeat.get(attr) is not None: - val = getattr(object, attr, 0) + 1 - setattr(object, attr, val) - self.option_order.append((opt, val)) - - # for opts - if created_object: - return args, object - else: - return args - - def get_option_order(self): - """Returns the list of (option, value) tuples processed by the - previous run of 'getopt()'. Raises RuntimeError if - 'getopt()' hasn't been called yet. - """ - if self.option_order is None: - raise RuntimeError("'getopt()' hasn't been called yet") - else: - return self.option_order - - def generate_help(self, header=None): # noqa: C901 - """Generate help text (a list of strings, one per suggested line of - output) from the option table for this FancyGetopt object. - """ - # Blithely assume the option table is good: probably wouldn't call - # 'generate_help()' unless you've already called 'getopt()'. - - # First pass: determine maximum length of long option names - max_opt = 0 - for option in self.option_table: - long = option[0] - short = option[1] - ell = len(long) - if long[-1] == '=': - ell = ell - 1 - if short is not None: - ell = ell + 5 # " (-x)" where short == 'x' - if ell > max_opt: - max_opt = ell - - opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter - - # Typical help block looks like this: - # --foo controls foonabulation - # Help block for longest option looks like this: - # --flimflam set the flim-flam level - # and with wrapped text: - # --flimflam set the flim-flam level (must be between - # 0 and 100, except on Tuesdays) - # Options with short names will have the short name shown (but - # it doesn't contribute to max_opt): - # --foo (-f) controls foonabulation - # If adding the short option would make the left column too wide, - # we push the explanation off to the next line - # --flimflam (-l) - # set the flim-flam level - # Important parameters: - # - 2 spaces before option block start lines - # - 2 dashes for each long option name - # - min. 2 spaces between option and explanation (gutter) - # - 5 characters (incl. space) for short option name - - # Now generate lines of help text. (If 80 columns were good enough - # for Jesus, then 78 columns are good enough for me!) - line_width = 78 - text_width = line_width - opt_width - big_indent = ' ' * opt_width - if header: - lines = [header] - else: - lines = ['Option summary:'] - - for option in self.option_table: - long, short, help = option[:3] - text = wrap_text(help, text_width) - if long[-1] == '=': - long = long[0:-1] - - # Case 1: no short option at all (makes life easy) - if short is None: - if text: - lines.append(" --%-*s %s" % (max_opt, long, text[0])) - else: - lines.append(" --%-*s " % (max_opt, long)) - - # Case 2: we have a short option, so we have to include it - # just after the long option - else: - opt_names = "{} (-{})".format(long, short) - if text: - lines.append(" --%-*s %s" % (max_opt, opt_names, text[0])) - else: - lines.append(" --%-*s" % opt_names) - - for ell in text[1:]: - lines.append(big_indent + ell) - return lines - - def print_help(self, header=None, file=None): - if file is None: - file = sys.stdout - for line in self.generate_help(header): - file.write(line + "\n") - - -def fancy_getopt(options, negative_opt, object, args): - parser = FancyGetopt(options) - parser.set_negative_aliases(negative_opt) - return parser.getopt(args, object) - - -WS_TRANS = {ord(_wschar): ' ' for _wschar in string.whitespace} - - -def wrap_text(text, width): - """wrap_text(text : string, width : int) -> [string] - - Split 'text' into multiple lines of no more than 'width' characters - each, and return the list of strings that results. - """ - if text is None: - return [] - if len(text) <= width: - return [text] - - text = text.expandtabs() - text = text.translate(WS_TRANS) - chunks = re.split(r'( +|-+)', text) - chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings - lines = [] - - while chunks: - cur_line = [] # list of chunks (to-be-joined) - cur_len = 0 # length of current line - - while chunks: - ell = len(chunks[0]) - if cur_len + ell <= width: # can squeeze (at least) this chunk in - cur_line.append(chunks[0]) - del chunks[0] - cur_len = cur_len + ell - else: # this line is full - # drop last chunk if all space - if cur_line and cur_line[-1][0] == ' ': - del cur_line[-1] - break - - if chunks: # any chunks left to process? - # if the current line is still empty, then we had a single - # chunk that's too big too fit on a line -- so we break - # down and break it up at the line width - if cur_len == 0: - cur_line.append(chunks[0][0:width]) - chunks[0] = chunks[0][width:] - - # all-whitespace chunks at the end of a line can be discarded - # (and we know from the re.split above that if a chunk has - # *any* whitespace, it is *all* whitespace) - if chunks[0][0] == ' ': - del chunks[0] - - # and store this line in the list-of-all-lines -- as a single - # string, of course! - lines.append(''.join(cur_line)) - - return lines - - -def translate_longopt(opt): - """Convert a long option name to a valid Python identifier by - changing "-" to "_". - """ - return opt.translate(longopt_xlate) - - -class OptionDummy: - """Dummy class just used as a place to hold command-line option - values as instance attributes.""" - - def __init__(self, options=[]): - """Create a new OptionDummy instance. The attributes listed in - 'options' will be initialized to None.""" - for opt in options: - setattr(self, opt, None) - - -if __name__ == "__main__": - text = """\ -Tra-la-la, supercalifragilisticexpialidocious. -How *do* you spell that odd word, anyways? -(Someone ask Mary -- she'll know [or she'll -say, "How should I know?"].)""" - - for w in (10, 20, 30, 40): - print("width: %d" % w) - print("\n".join(wrap_text(text, w))) - print() diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/__init__.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/__init__.py deleted file mode 100644 index b794fd409a5e3b3b65ad76a43d6a01a318877640..0000000000000000000000000000000000000000 --- a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1.0' diff --git a/spaces/Toor1989/Toor1989/codebert_app.py b/spaces/Toor1989/Toor1989/codebert_app.py deleted file mode 100644 index b47b6ee5fc03783ebf15753dbdab6f60b2676770..0000000000000000000000000000000000000000 --- a/spaces/Toor1989/Toor1989/codebert_app.py +++ /dev/null @@ -1,73 +0,0 @@ -import streamlit as st -from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer -import pyperclip - -# Cargar modelo y tokenizer -model_name = "microsoft/codebert-base-mlm" -tokenizer = AutoTokenizer.from_pretrained(model_name) -model = AutoModelForCausalLM.from_pretrained(model_name) - -# Configurar pipeline de generación de texto -text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer) - -# Crear la interfaz de usuario -st.set_page_config(page_title="CodeBERT Autocompletado y Corrección de Errores") -st.title("CodeBERT Autocompletado y Corrección de Errores") -st.sidebar.title("Configuración de la Generación de Texto") - -# Agregar opciones de configuración -num_suggestions = st.sidebar.slider("Número de Sugerencias", 1, 10, 3) -max_length = st.sidebar.slider("Longitud Máxima de la Secuencia de Salida", 10, 1000, 100) -languages = ['python', 'java', 'javascript', 'c#', 'ruby', 'php', 'go', 'swift', 'kotlin', 'pine'] -language = st.sidebar.selectbox('Lenguaje de Programación', options=languages) -code_type = st.sidebar.selectbox('Tipo de Código', options=['Indicador', 'Estrategia']) - -# Agregar sección de información -st.info("Ingrese su código en el cuadro de texto a continuación y haga clic en el botón para generar sugerencias de autocompletado o corrección de errores utilizando el modelo CodeBERT.") - -# Agregar cuadro de texto para ingresar el código -input_code = st.text_area("Ingresa tu código aquí:", height=250) - -# Agregar botón para generar sugerencias -if st.button("Generar Sugerencias"): - # Agregar indicador de carga mientras se generan las sugerencias - with st.spinner('Generando sugerencias...'): - # Generar sugerencias de autocompletado o corrección de errores utilizando la API de transformers - if code_type == 'Indicador': - prefix = f"{language}: " - else: - prefix = f"{language} strategy: " - generated_text = text_generator( - input_code, - max_length=max_length, - num_return_sequences=num_suggestions, - do_sample=True, - top_p=0.9, - temperature=0.8, - repetition_penalty=2.0, - no_repeat_ngram_size=2, - pad_token_id=tokenizer.eos_token_id, - prefix=prefix - ) - - # Mostrar el botón "Limpiar" para borrar el cuadro de texto de entrada - st.button("Limpiar") - - # Mostrar las sugerencias generadas una a una en respuesta a un botón "Mostrar siguiente sugerencia" - if len(generated_text) > 0: - st.subheader("Sugerencias Generadas:") - suggestion_index = 0 - st.code(generated_text[suggestion_index]['generated_text'].strip(), language=language) - while suggestion_index < len(generated_text)-1: - if st.button("Mostrar siguiente sugerencia"): - suggestion_index += 1 - st.code(generated_text[suggestion_index]['generated_text'].strip(), language=language) - else: - break - - # Agregar botón "Copiar" para copiar la sugerencia seleccionada al portapapeles - if len(generated_text) == 1: - if st.button("Copiar"): - pyperclip.copy(generated_text[0]['generated_text'].strip()) - st.success("La sugerencia se ha copiado al portapapeles") - \ No newline at end of file diff --git a/spaces/Vicent3/ocr-endpoint/index.html b/spaces/Vicent3/ocr-endpoint/index.html deleted file mode 100644 index 9b535f616301eec9a1f6802ccd7c6af6659e0a8c..0000000000000000000000000000000000000000 --- a/spaces/Vicent3/ocr-endpoint/index.html +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - OCR Interference API Endpoint Core and OCR Wrapper - - - - - -
    -
    -

    OCR Interference API Endpoint Core and OCR Wrapper

    -

    Add OCR features to your code without modifying the core

    -
    -
    - -

    Easy to use

    -

    The OCR Interference API Endpoint Core and OCR Wrapper are easy to use. Simply make a call to the API and you will receive the OCR results in JSON format.

    -
    -
    - -

    No need to modify your code

    -

    You don't need to modify your code to use the OCR Interference API Endpoint Core and OCR Wrapper. Simply make a call to the API and you will receive the OCR results in JSON format.

    -
    -
    - -

    Supports multiple programming languages

    -

    The OCR Interference API Endpoint Core and OCR Wrapper support multiple programming languages, including Python and JavaScript.

    -
    -
    - -

    Provides JSON output

    -

    The OCR Interference API Endpoint Core and OCR Wrapper provide JSON output, which is easy to parse and use in your code.

    -
    -
    - -

    Accurate results

    -

    The OCR Interference API Endpoint Core and OCR Wrapper provide accurate OCR results

    -
    -
    -
    -
    - - \ No newline at end of file diff --git a/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/README.md b/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/README.md deleted file mode 100644 index e7b93eb555ef18344424a670326a6851612cac93..0000000000000000000000000000000000000000 --- a/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: WizardLM WizardLM 7B V1.0 -emoji: 🔥 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Violetmae14/Violet/index.html b/spaces/Violetmae14/Violet/index.html deleted file mode 100644 index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000 --- a/spaces/Violetmae14/Violet/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -
    -

    Welcome to your static Space!

    -

    You can modify this app directly by editing index.html in the Files and versions tab.

    -

    - Also don't forget to check the - Spaces documentation. -

    -
    - - diff --git a/spaces/Vishwas1/GPTStoryWriter/app.py b/spaces/Vishwas1/GPTStoryWriter/app.py deleted file mode 100644 index 71a159b2d3a7b3bb1a0c77d1f8093be24e3a1ad2..0000000000000000000000000000000000000000 --- a/spaces/Vishwas1/GPTStoryWriter/app.py +++ /dev/null @@ -1,10 +0,0 @@ -import gradio as gr -from gradio.mix import Parallel, Series - -api2 = gr.Interface.load("huggingface/facebook/nllb-200-distilled-600M",TASK="translation",src_lang="eng_Latn",tgt_lang="mar_DEVA") -generator1 = gr.Interface.load("huggingface/gpt2") -generator2 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B") -generator3 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B") -generator4 = gr.Interface.load("huggingface/bigscience/bloom") -Parallel(generator1, generator2, generator3, generator4).launch() -Series(api2).launch() diff --git a/spaces/Vision-CAIR/minigpt4/minigpt4/datasets/datasets/dataloader_utils.py b/spaces/Vision-CAIR/minigpt4/minigpt4/datasets/datasets/dataloader_utils.py deleted file mode 100644 index 3459972e5bda3e4a40788acf97ebe3c114fe7c3e..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/minigpt4/minigpt4/datasets/datasets/dataloader_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import time -import random -import torch -from minigpt4.datasets.data_utils import move_to_cuda -from torch.utils.data import DataLoader - - -class MultiIterLoader: - """ - A simple wrapper for iterating over multiple iterators. - - Args: - loaders (List[Loader]): List of Iterator loaders. - ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly. - """ - - def __init__(self, loaders, ratios=None): - # assert all loaders has __next__ method - for loader in loaders: - assert hasattr( - loader, "__next__" - ), "Loader {} has no __next__ method.".format(loader) - - if ratios is None: - ratios = [1.0] * len(loaders) - else: - assert len(ratios) == len(loaders) - ratios = [float(ratio) / sum(ratios) for ratio in ratios] - - self.loaders = loaders - self.ratios = ratios - - def __next__(self): - # random sample from each loader by ratio - loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0] - return next(self.loaders[loader_idx]) - - -class PrefetchLoader(object): - """ - Modified from https://github.com/ChenRocks/UNITER. - - overlap compute and cuda data transfer - (copied and then modified from nvidia apex) - """ - - def __init__(self, loader): - self.loader = loader - self.stream = torch.cuda.Stream() - - def __iter__(self): - loader_it = iter(self.loader) - self.preload(loader_it) - batch = self.next(loader_it) - while batch is not None: - is_tuple = isinstance(batch, tuple) - if is_tuple: - task, batch = batch - - if is_tuple: - yield task, batch - else: - yield batch - batch = self.next(loader_it) - - def __len__(self): - return len(self.loader) - - def preload(self, it): - try: - self.batch = next(it) - except StopIteration: - self.batch = None - return - # if record_stream() doesn't work, another option is to make sure - # device inputs are created on the main stream. - # self.next_input_gpu = torch.empty_like(self.next_input, - # device='cuda') - # self.next_target_gpu = torch.empty_like(self.next_target, - # device='cuda') - # Need to make sure the memory allocated for next_* is not still in use - # by the main stream at the time we start copying to next_*: - # self.stream.wait_stream(torch.cuda.current_stream()) - with torch.cuda.stream(self.stream): - self.batch = move_to_cuda(self.batch) - # more code for the alternative if record_stream() doesn't work: - # copy_ will record the use of the pinned source tensor in this - # side stream. - # self.next_input_gpu.copy_(self.next_input, non_blocking=True) - # self.next_target_gpu.copy_(self.next_target, non_blocking=True) - # self.next_input = self.next_input_gpu - # self.next_target = self.next_target_gpu - - def next(self, it): - torch.cuda.current_stream().wait_stream(self.stream) - batch = self.batch - if batch is not None: - record_cuda_stream(batch) - self.preload(it) - return batch - - def __getattr__(self, name): - method = self.loader.__getattribute__(name) - return method - - -def record_cuda_stream(batch): - if isinstance(batch, torch.Tensor): - batch.record_stream(torch.cuda.current_stream()) - elif isinstance(batch, list) or isinstance(batch, tuple): - for t in batch: - record_cuda_stream(t) - elif isinstance(batch, dict): - for t in batch.values(): - record_cuda_stream(t) - else: - pass - - -class IterLoader: - """ - A wrapper to convert DataLoader as an infinite iterator. - - Modified from: - https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py - """ - - def __init__(self, dataloader: DataLoader, use_distributed: bool = False): - self._dataloader = dataloader - self.iter_loader = iter(self._dataloader) - self._use_distributed = use_distributed - self._epoch = 0 - - @property - def epoch(self) -> int: - return self._epoch - - def __next__(self): - try: - data = next(self.iter_loader) - except StopIteration: - self._epoch += 1 - if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: - self._dataloader.sampler.set_epoch(self._epoch) - time.sleep(2) # Prevent possible deadlock during epoch transition - self.iter_loader = iter(self._dataloader) - data = next(self.iter_loader) - - return data - - def __iter__(self): - return self - - def __len__(self): - return len(self._dataloader) diff --git a/spaces/Wauplin/space_to_dataset_saver/app_image.py b/spaces/Wauplin/space_to_dataset_saver/app_image.py deleted file mode 100644 index c31f51aa1fe50b827e79a356b54d79069ada6028..0000000000000000000000000000000000000000 --- a/spaces/Wauplin/space_to_dataset_saver/app_image.py +++ /dev/null @@ -1,50 +0,0 @@ -import json -from datetime import datetime -from pathlib import Path -from uuid import uuid4 - -import gradio as gr -import numpy as np -from PIL import Image - -from huggingface_hub import CommitScheduler, InferenceClient - - -IMAGE_DATASET_DIR = Path("image_dataset") / f"train-{uuid4()}" -IMAGE_DATASET_DIR.mkdir(parents=True, exist_ok=True) -IMAGE_JSONL_PATH = IMAGE_DATASET_DIR / "metadata.jsonl" - -scheduler = CommitScheduler( - repo_id="example-space-to-dataset-image", - repo_type="dataset", - folder_path=IMAGE_DATASET_DIR, - path_in_repo=IMAGE_DATASET_DIR.name, -) - -client = InferenceClient() - - -def generate_image(prompt: str) -> Image: - return client.text_to_image(prompt) - - -def save_image(prompt: str, image_array: np.ndarray) -> None: - image_path = IMAGE_DATASET_DIR / f"{uuid4()}.png" - - with scheduler.lock: - Image.fromarray(image_array).save(image_path) - with IMAGE_JSONL_PATH.open("a") as f: - json.dump({"prompt": prompt, "file_name": image_path.name, "datetime": datetime.now().isoformat()}, f) - f.write("\n") - - -def get_demo(): - with gr.Row(): - prompt_value = gr.Textbox(label="Prompt") - image_value = gr.Image(label="Generated image") - text_to_image_btn = gr.Button("Generate") - text_to_image_btn.click(fn=generate_image, inputs=prompt_value, outputs=image_value).success( - fn=save_image, - inputs=[prompt_value, image_value], - outputs=None, - ) diff --git a/spaces/Wenjing2/ChatGPT_HF/README.md b/spaces/Wenjing2/ChatGPT_HF/README.md deleted file mode 100644 index 54d935c200d90b4e24727758e4582ae3322e65ed..0000000000000000000000000000000000000000 --- a/spaces/Wenjing2/ChatGPT_HF/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGPT HF -emoji: 🌖 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: Xhaheen/ChatGPT_HF ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/WhiteKnightAI/togethercomputer-LLaMA-2-7B-32K/app.py b/spaces/WhiteKnightAI/togethercomputer-LLaMA-2-7B-32K/app.py deleted file mode 100644 index 0eea9d6f508c3048be87fc452d36415699a6999e..0000000000000000000000000000000000000000 --- a/spaces/WhiteKnightAI/togethercomputer-LLaMA-2-7B-32K/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/togethercomputer/LLaMA-2-7B-32K").launch() \ No newline at end of file diff --git a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/arithmetic.py b/spaces/Xinyoumeng233hu/SteganographywithGPT-2/arithmetic.py deleted file mode 100644 index bfda92f2d467786d8db258bd9c134e163471aee4..0000000000000000000000000000000000000000 --- a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/arithmetic.py +++ /dev/null @@ -1,260 +0,0 @@ -import torch -import torch.nn.functional as F - -from utils import limit_past, kl, entropy, bits2int, int2bits, is_sent_finish, num_same_from_beg - -def encode_arithmetic(model, enc, message, context, finish_sent=False, device='cpu', temp=1.0, precision=16, topk=50000): - - context = torch.tensor(context[-1022:], device=device, dtype=torch.long) - - max_val = 2**precision - threshold = 2**(-precision) - cur_interval = [0, max_val] # bottom inclusive, top exclusive - - prev = context - output = context - past = None - - total_num = 0 - total_num_for_stats = 0 - total_log_probs = 0 - total_kl = 0 # in bits - total_entropy_ptau = 0 - total_num_sents = 0 - - with torch.no_grad(): - i = 0 - sent_finish = False - while i < len(message) or (finish_sent and not sent_finish): - logits, past = model(prev.unsqueeze(0), past=past) - past = limit_past(past) - logits[0, -1, -1] = -1e20 # endoftext token can't happen - logits[0, -1, 628] = -1e20 # 2 newlines token can't happen - logits, indices = logits[0, -1, :].sort(descending=True) - logits = logits.double() - logits_temp = logits / temp - probs_temp = F.softmax(logits_temp, dim=0) - log_probs_temp = F.log_softmax(logits_temp, dim=0) - log_probs = F.log_softmax(logits, dim=0) - - # conditions for having reached the end of the message - if i >= len(message): - selection = 0 - sent_finish = is_sent_finish(indices[selection].item(), enc) - else: - # Cutoff low probabilities that would be rounded to 0 - cur_int_range = cur_interval[1]-cur_interval[0] - cur_threshold = 1/cur_int_range - k = min(max(2, (probs_temp < cur_threshold).nonzero()[0].item()), topk) - probs_temp_int = probs_temp[:k] # Cutoff all but top k - - # Rescale to correct range - probs_temp_int = probs_temp_int/probs_temp_int.sum()*cur_int_range - - # Round probabilities to integers given precision - probs_temp_int = probs_temp_int.round().long() - cum_probs = probs_temp_int.cumsum(0) - - # Remove any elements from the bottom if rounding caused the total prob to be too large - overfill_index = (cum_probs > cur_int_range).nonzero() - if len(overfill_index) > 0: - cum_probs = cum_probs[:overfill_index[0]] - - # Add any mass to the top if removing/rounding causes the total prob to be too small - cum_probs += cur_int_range-cum_probs[-1] # add - - # Get out resulting probabilities - probs_final = cum_probs.clone() - probs_final[1:] = cum_probs[1:] - cum_probs[:-1] - - # Convert to position in range - cum_probs += cur_interval[0] - - # Get selected index based on binary fraction from message bits - message_bits = message[i:i+precision] - if i+precision > len(message): - message_bits = message_bits + [0]*(i+precision-len(message)) - message_idx = bits2int(reversed(message_bits)) - selection = (cum_probs > message_idx).nonzero()[0].item() - - # Calculate new range as ints - new_int_bottom = cum_probs[selection-1] if selection > 0 else cur_interval[0] - new_int_top = cum_probs[selection] - - # Convert range to bits - new_int_bottom_bits_inc = list(reversed(int2bits(new_int_bottom, precision))) - new_int_top_bits_inc = list(reversed(int2bits(new_int_top-1, precision))) # -1 here because upper bound is exclusive - - # Consume most significant bits which are now fixed and update interval - num_bits_encoded = num_same_from_beg(new_int_bottom_bits_inc, new_int_top_bits_inc) - i += num_bits_encoded - - new_int_bottom_bits = new_int_bottom_bits_inc[num_bits_encoded:] + [0]*num_bits_encoded - new_int_top_bits = new_int_top_bits_inc[num_bits_encoded:] + [1]*num_bits_encoded - - cur_interval[0] = bits2int(reversed(new_int_bottom_bits)) - cur_interval[1] = bits2int(reversed(new_int_top_bits))+1 # +1 here because upper bound is exclusive - - # Gather statistics - total_log_probs += log_probs[selection].item() - - q = probs_final.double()/probs_final.sum() - logq = q.log() - total_kl += kl(q, logq, log_probs[:len(q)]) - total_entropy_ptau += entropy(probs_temp, log_probs_temp) - total_num_for_stats += 1 - - # Update history with new token - prev = indices[selection].view(1) - output = torch.cat((output, prev)) - total_num += 1 - #print(enc.decode(prev.tolist()), message_bits[:num_bits_encoded]) - - # For text->bits->text - partial = enc.decode(output[len(context):].tolist()) - if '' in partial: - break - - avg_NLL = -total_log_probs/total_num_for_stats - avg_KL = total_kl/total_num_for_stats - words_per_bit = total_num_for_stats/i - # avg_Hq = total_entropy_ptau/total_num_for_stats - - return output[len(context):].tolist(), avg_NLL, avg_KL, words_per_bit - -def decode_arithmetic(model, enc, text, context, device='cpu', temp=1.0, precision=16, topk=50000): - # inp is a list of token indices - # context is a list of token indices - inp = enc.encode(text) - # common BPE error case: 128, 128 (2 newlines) is interpretted as 628 (2 newlines) - i = 0 - while i < len(inp): - if inp[i] == 628: - inp[i] = 198 - inp[i+1:i+1] = [198] - i += 2 - else: - i += 1 - - context = torch.tensor(context[-1022:], device=device, dtype=torch.long) - - max_val = 2**precision - threshold = 2**(-precision) - cur_interval = [0, max_val] # bottom inclusive, top exclusive - - prev = context - past = None - message = [] - with torch.no_grad(): - i = 0 - while i < len(inp): - logits, past = model(prev.unsqueeze(0), past=past) - past = limit_past(past) - logits[0, -1, -1] = -1e10 # endoftext can't happen - logits[0, -1, 628] = -1e10 # 2 newlines can't happen - logits, indices = logits[0, -1, :].sort(descending=True) - logits = logits.double() - logits_temp = logits / temp - probs_temp = F.softmax(logits_temp, dim=0) - - # Cutoff low probabilities that would be rounded to 0 - cur_int_range = cur_interval[1]-cur_interval[0] - cur_threshold = 1/cur_int_range - k = min(max(2, (probs_temp < cur_threshold).nonzero()[0].item()), topk) - probs_temp_int = probs_temp[:k] # Cutoff all but top k - - # Rescale to correct range - probs_temp_int = probs_temp_int/probs_temp_int.sum()*cur_int_range - - # Round probabilities to integers given precision - probs_temp_int = probs_temp_int.round().long() - cum_probs = probs_temp_int.cumsum(0) - - # Remove any elements from the bottom if rounding caused the total prob to be too large - overfill_index = (cum_probs > cur_int_range).nonzero() - if len(overfill_index) > 0: - cum_probs = cum_probs[:overfill_index[0]] - k = overfill_index[0].item() - - # Add any mass to the top if removing/rounding causes the total prob to be too small - cum_probs += cur_int_range-cum_probs[-1] # add - - # Covnert to position in range - cum_probs += cur_interval[0] - - rank = (indices == inp[i]).nonzero().item() - - # Handle most errors that could happen because of BPE with heuristic - if rank >= k: - true_token_text = enc.decoder[inp[i]] - for rank_idx in range(k): - prop_token_text = enc.decoder[indices[rank_idx].item()] - # common case that is not caught - if inp[i] == 128 and indices[rank_idx] == 198: - rank = rank_idx - inp[i] = indices[rank_idx].item() - break - - # Is there a more likely prefix token that could be the actual token generated? - if len(prop_token_text) <= len(true_token_text) and \ - prop_token_text == true_token_text[:len(prop_token_text)]: - rank = rank_idx - suffix = true_token_text[len(prop_token_text):] - suffix_tokens = enc.encode(suffix) # a list - inp[i] = indices[rank_idx].item() - inp[i+1:i+1] = suffix_tokens # insert suffix tokens into list - break - - # Is there a more likely longer token that could be the actual token generated? - elif len(prop_token_text) > len(true_token_text) and \ - true_token_text == prop_token_text[:len(true_token_text)]: - whole_text = true_token_text - num_extra = 1 - while len(whole_text) < len(prop_token_text): - whole_text += enc.decoder[inp[i+num_extra]] - num_extra += 1 - if prop_token_text == whole_text[:len(prop_token_text)]: - rank = rank_idx - inp[i] = indices[rank_idx].item() - for j in range(1, num_extra): - del inp[i+j] - - if len(whole_text) > len(prop_token_text): - suffix = whole_text[len(prop_token_text):] - suffix_tokens = enc.encode(suffix) # a list - inp[i+1:i+1] = suffix_tokens # insert suffix tokens into list - break - else: - print('Unable to fix BPE error: token received: %s=%d, text: %s' % (true_token_text, inp[i], text)) - rank = 0 - - selection = rank - - # Calculate new range as ints - new_int_bottom = cum_probs[selection-1] if selection > 0 else cur_interval[0] - new_int_top = cum_probs[selection] - - # Convert range to bits - new_int_bottom_bits_inc = list(reversed(int2bits(new_int_bottom, precision))) - new_int_top_bits_inc = list(reversed(int2bits(new_int_top-1, precision))) # -1 here because upper bound is exclusive - - # Emit most significant bits which are now fixed and update interval - num_bits_encoded = num_same_from_beg(new_int_bottom_bits_inc, new_int_top_bits_inc) - if i == len(inp)-1: - new_bits = new_int_bottom_bits_inc - else: - new_bits = new_int_top_bits_inc[:num_bits_encoded] - message += new_bits - - new_int_bottom_bits = new_int_bottom_bits_inc[num_bits_encoded:] + [0]*num_bits_encoded - new_int_top_bits = new_int_top_bits_inc[num_bits_encoded:] + [1]*num_bits_encoded - - cur_interval[0] = bits2int(reversed(new_int_bottom_bits)) - cur_interval[1] = bits2int(reversed(new_int_top_bits))+1 # +1 here because upper bound is exclusive - - # Update history with new token - prev = torch.tensor([inp[i]], device=device, dtype=torch.long) - #print(enc.decode([inp[i]]), new_bits) - i += 1 - - return message \ No newline at end of file diff --git a/spaces/XzJosh/Aatrox-Bert-VITS2/train_ms.py b/spaces/XzJosh/Aatrox-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Aatrox-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/YlcldKlns/bing/src/components/chat-history.tsx b/spaces/YlcldKlns/bing/src/components/chat-history.tsx deleted file mode 100644 index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/components/chat-history.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons" - -export function ChatHistory() { - return ( -
    -
    - 历史记录 -
    -
    -
    -
    -
    -
    -
    - -
    -

    无标题的聊天

    -
    -

    上午1:42

    -
    - - - - - - - - -
    -
    -
    -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/Zeltoria/anime-voice-generator/utils.py b/spaces/Zeltoria/anime-voice-generator/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/Zeltoria/anime-voice-generator/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/ZeroTwo3/WavJourney/VoiceParser/model.py b/spaces/ZeroTwo3/WavJourney/VoiceParser/model.py deleted file mode 100644 index 1fd265241d43953a04578a43fe0248dd2233348a..0000000000000000000000000000000000000000 --- a/spaces/ZeroTwo3/WavJourney/VoiceParser/model.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import json -import numpy as np - -import torch -import torchaudio -torchaudio.set_audio_backend("soundfile") # Use 'soundfile' backend - -from encodec import EncodecModel -from encodec.utils import convert_audio -from .hubert_manager import HuBERTManager -from .pre_kmeans_hubert import CustomHubert -from .customtokenizer import CustomTokenizer - -class VoiceParser(): - def __init__(self, device='cpu'): - model = ('quantifier_hubert_base_ls960_14.pth', 'tokenizer.pth') - - hubert_model = CustomHubert(HuBERTManager.make_sure_hubert_installed(), device=device) - quant_model = CustomTokenizer.load_from_checkpoint(HuBERTManager.make_sure_tokenizer_installed(model=model[0], local_file=model[1]), device) - encodec_model = EncodecModel.encodec_model_24khz() - encodec_model.set_target_bandwidth(6.0) - - self.hubert_model = hubert_model - self.quant_model = quant_model - self.encodec_model = encodec_model.to(device) - self.device = device - print('Loaded VoiceParser models!') - - - def extract_acoustic_embed(self, wav_path, npz_dir): - wav, sr = torchaudio.load(wav_path) - - wav_hubert = wav.to(self.device) - - if wav_hubert.shape[0] == 2: # Stereo to mono if needed - wav_hubert = wav_hubert.mean(0, keepdim=True) - - semantic_vectors = self.hubert_model.forward(wav_hubert, input_sample_hz=sr) - semantic_tokens = self.quant_model.get_token(semantic_vectors) - wav = convert_audio(wav, sr, self.encodec_model.sample_rate, 1).unsqueeze(0) - - wav = wav.to(self.device) - - with torch.no_grad(): - encoded_frames = self.encodec_model.encode(wav) - - codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() - - codes = codes.cpu() - semantic_tokens = semantic_tokens.cpu() - - wav_name = os.path.split(wav_path)[1] - npz_name = wav_name[:-4] + '.npz' - npz_path = os.path.join(npz_dir, npz_name) - - np.savez( - npz_path, - semantic_prompt=semantic_tokens, - fine_prompt=codes, - coarse_prompt=codes[:2, :] - ) - - return npz_path - - - def read_json_file(self, json_path): - with open(json_path, 'r') as file: - data = json.load(file) - return data - - - def parse_voice_json(self, voice_json, output_dir): - """ - Parse a voice json file, generate the corresponding output json and npz files - Params: - voice_json: path of a json file or List of json nodes - output_dir: output dir for new json and npz files - """ - if isinstance(voice_json, list): - voice_json = voice_json - else: - # If voice_json is a file path (str), read the JSON file - voice_json = self.read_json_file(voice_json) - for item in voice_json: - wav_path = item['wav'] - npz_path = self.extract_acoustic_embed(wav_path=wav_path, npz_dir=output_dir) - item['npz'] = npz_path - del item['wav'] - - output_json = os.path.join(output_dir, 'metadata.json') - - with open(output_json, 'w') as file: - json.dump(voice_json, file, indent=4) - - - - - - - - diff --git a/spaces/a-v-bely/spanish-task-generator/utilities_ui/custom_download_button.py b/spaces/a-v-bely/spanish-task-generator/utilities_ui/custom_download_button.py deleted file mode 100644 index cb0ab3ca61245011d213227a2bf855bf97078e1c..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/spanish-task-generator/utilities_ui/custom_download_button.py +++ /dev/null @@ -1,98 +0,0 @@ -import io -import re -import uuid -import base64 -import streamlit as st -from typing import Optional, Union -from streamlit.elements.widgets.button import DownloadButtonDataType - -DownloadButtonDataType = Union[DownloadButtonDataType, "pd.DataFrame", "Styler"] - -HAS_PD = True - - -def download_button(label: str, - data: DownloadButtonDataType, - file_name: Optional[str] = None) -> str: - """Generates a link to download the given data, support file-like object and pd.DataFrame. - Params - Args: - label: text show on page. - data: file-like object or pd.DataFrame. - file_name: filename and extension of file. e.g. mydata.csv, - Raises: - RuntimeError: when data type is not supported - Returns: - the anchor tag to download object_to_download - Examples: - download_button('Click to download data!', your_df, 'YOUR_DF.xlsx'), - download_button('Click to download text!', your_str.encode(), 'YOUR_STRING.txt') - """ - - # inspired by https://gist.github.com/chad-m/6be98ed6cf1c4f17d09b7f6e5ca2978f - - data_as_bytes: bytes - if isinstance(data, str): - data_as_bytes = data.encode() - elif isinstance(data, io.TextIOWrapper): - string_data = data.read() - data_as_bytes = string_data.encode() - # mimetype = mimetype or "text/plain" - # Assume bytes; try methods until we run out. - elif isinstance(data, bytes): - data_as_bytes = data - elif isinstance(data, io.BytesIO): - data.seek(0) - data_as_bytes = data.getvalue() - elif isinstance(data, io.BufferedReader): - data.seek(0) - data_as_bytes = data.read() - elif isinstance(data, io.RawIOBase): - data.seek(0) - data_as_bytes = data.read() or b"" - elif HAS_PD and hasattr(data, "to_excel"): - bio = io.BytesIO() - data.to_excel(bio) - bio.seek(0) - data_as_bytes = bio.read() - else: - raise RuntimeError("Invalid binary data format: %s" % type(data)) - - b64 = base64.b64encode(data_as_bytes).decode() - button_uuid = str(uuid.uuid4()).replace("-", "") - button_id = re.sub(r"\d+", "", button_uuid) - - custom_css = f""" - """ - - dl_link = ( - custom_css - + f'{label}

    ' - ) - - div_dl_link = f"""
    {dl_link}
    """ - st.markdown(div_dl_link, unsafe_allow_html=True) - return dl_link diff --git a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_70.md b/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_70.md deleted file mode 100644 index 4e779fae4e303cccafe7ad9d927f50024531e9e9..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_70.md +++ /dev/null @@ -1,12 +0,0 @@ -# v0.1.70 ---- -This is a scheduled release which contains all changes from OSS DataHub upto commit `70659711a841bcce4bb1e0350027704b3783f6a5`. In addition to all the features added in OSS DataHub below are Managed DataHub specific release notes. - -Release Availability Date ---- -30 Dec 2022 - -## Release Changlog ---- -- Improvements in Caching implementation to fix search consistency problems -- We have heard many organisations ask for metrics for the SaaS product. We have made good progress towards this goal which allows us to share Grafana dashboards. We will be testing it selectively. Expect more updates in coming month on this. diff --git a/spaces/aiEDUcurriculum/introtoAI-pets-project/info.md b/spaces/aiEDUcurriculum/introtoAI-pets-project/info.md deleted file mode 100644 index a75aa9a1e59cd40fae325ef5a3cc70b1054e6559..0000000000000000000000000000000000000000 --- a/spaces/aiEDUcurriculum/introtoAI-pets-project/info.md +++ /dev/null @@ -1,16 +0,0 @@ -# 😌 Get the Pet That's Best for Your Lifestyle - -### 🧐 Problem Statement and Research Summary -If you're like any of us, you have probably never had a scorpion for a pet. But what if you would really like it? One of our team members had a tarantula for a pet, but she accidentally killed it. Anyway, the point is, you might not know what pet is best for you! This survey is supposed to help you figure out the best pet for your lifestyle. We didn't want to tell you what kind of pet to get because we're obviously not experts, so instead we asked people what kind of pet was best for them and some other information about themselves. Take this as just a recommendation for a pet that might be good for you. Remember that all pets require a lot of care, even tarantulas, which need live crickets, water, and a lot of soil to burrow in. Don't find out the hard way. Anyway, that's not what our project is about. The point is, take the survey to see what kind of pet might be best. Enjoy! - -### 🎣 Data Collection Plan -The data for this model was collected on Amazon's Mechanical Turk! Mechanical Turk allows you to get a lot of survey responses quickly by paying people for their time. People can make a bit of extra money by completing short tasks while they ride the bus to work or wait for a friend, or even just to avoid boredom. You must be 18 years or older to be a worker. - -### 💥 Ethical Considerations (Data Privacy and Bias) -* Data privacy: we used a google form that was collected anonymously, since you honestly never know when someone will think some of their data is personal to them. Even though this is just a survey about pets, we tried to respect people's privacy. Also, it has been shown that you can identify a person based on just a few things about them like their date of birth, gender, and zip code. So it's just one more step for people to be figured out based on their pet preferences and how much free time they have. We just wanted to make sure that people who were giving us their time to take this survey felt safe. -* Bias: we were only able to collect about 95 or so responses in our survey, so our data is pretty limited. You can tell by the accuracy of the model that we weren't able to get enough data because it's not very accurate. This could cause bias because the model might not reflect the real world. What if actually people really want to adopt scorpions, just not the people we surveyed? We may never know tbh. However, people aren't using our data to make life-changing decisions, probably, so just keep informed about the possibility of bias and you won't base a big decision on a model that isn't perfect. Our project can be kind of like a suggestion for you. - -### 👻 Our Team -This app was designed and built by The Curriculum Team at [The AI Education Project](https://aiedu.org). - -![aiEDU logo](https://images.squarespace-cdn.com/content/v1/5e4efdef6d10420691f02bc1/5db5a8a3-1761-4fce-a096-bd5f2515162f/aiEDU+_black+logo+stacked.png?format=100w) diff --git a/spaces/akhaliq/Mask2Former/mask2former/maskformer_model.py b/spaces/akhaliq/Mask2Former/mask2former/maskformer_model.py deleted file mode 100644 index 88ce76d37adc678ed8c9c7df17271120c75512d3..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former/maskformer_model.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from typing import Tuple - -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.data import MetadataCatalog -from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head -from detectron2.modeling.backbone import Backbone -from detectron2.modeling.postprocessing import sem_seg_postprocess -from detectron2.structures import Boxes, ImageList, Instances, BitMasks -from detectron2.utils.memory import retry_if_cuda_oom - -from .modeling.criterion import SetCriterion -from .modeling.matcher import HungarianMatcher - - -@META_ARCH_REGISTRY.register() -class MaskFormer(nn.Module): - """ - Main class for mask classification semantic segmentation architectures. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - sem_seg_head: nn.Module, - criterion: nn.Module, - num_queries: int, - object_mask_threshold: float, - overlap_threshold: float, - metadata, - size_divisibility: int, - sem_seg_postprocess_before_inference: bool, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - # inference - semantic_on: bool, - panoptic_on: bool, - instance_on: bool, - test_topk_per_image: int, - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - sem_seg_head: a module that predicts semantic segmentation from backbone features - criterion: a module that defines the loss - num_queries: int, number of queries - object_mask_threshold: float, threshold to filter query based on classification score - for panoptic segmentation inference - overlap_threshold: overlap threshold used in general inference for panoptic segmentation - metadata: dataset meta, get `thing` and `stuff` category names for panoptic - segmentation inference - size_divisibility: Some backbones require the input height and width to be divisible by a - specific integer. We can use this to override such requirement. - sem_seg_postprocess_before_inference: whether to resize the prediction back - to original input size before semantic segmentation inference or after. - For high-resolution dataset like Mapillary, resizing predictions before - inference will cause OOM error. - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - semantic_on: bool, whether to output semantic segmentation prediction - instance_on: bool, whether to output instance segmentation prediction - panoptic_on: bool, whether to output panoptic segmentation prediction - test_topk_per_image: int, instance segmentation parameter, keep topk instances per image - """ - super().__init__() - self.backbone = backbone - self.sem_seg_head = sem_seg_head - self.criterion = criterion - self.num_queries = num_queries - self.overlap_threshold = overlap_threshold - self.object_mask_threshold = object_mask_threshold - self.metadata = metadata - if size_divisibility < 0: - # use backbone size_divisibility if not set - size_divisibility = self.backbone.size_divisibility - self.size_divisibility = size_divisibility - self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference - self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) - - # additional args - self.semantic_on = semantic_on - self.instance_on = instance_on - self.panoptic_on = panoptic_on - self.test_topk_per_image = test_topk_per_image - - if not self.semantic_on: - assert self.sem_seg_postprocess_before_inference - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) - - # Loss parameters: - deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT - - # loss weights - class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT - dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT - mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT - - # building criterion - matcher = HungarianMatcher( - cost_class=class_weight, - cost_mask=mask_weight, - cost_dice=dice_weight, - num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, - ) - - weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight} - - if deep_supervision: - dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ["labels", "masks"] - - criterion = SetCriterion( - sem_seg_head.num_classes, - matcher=matcher, - weight_dict=weight_dict, - eos_coef=no_object_weight, - losses=losses, - num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, - oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, - importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, - ) - - return { - "backbone": backbone, - "sem_seg_head": sem_seg_head, - "criterion": criterion, - "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, - "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, - "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, - "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), - "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, - "sem_seg_postprocess_before_inference": ( - cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE - or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON - or cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON - ), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - # inference - "semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON, - "instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON, - "panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON, - "test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, - } - - @property - def device(self): - return self.pixel_mean.device - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - * "image": Tensor, image in (C, H, W) format. - * "instances": per-region ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model (may be different - from input resolution), used in inference. - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - - * "sem_seg": - A Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - * "panoptic_seg": - A tuple that represent panoptic output - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.size_divisibility) - - features = self.backbone(images.tensor) - outputs = self.sem_seg_head(features) - - if self.training: - # mask classification target - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - targets = self.prepare_targets(gt_instances, images) - else: - targets = None - - # bipartite matching-based loss - losses = self.criterion(outputs, targets) - - for k in list(losses.keys()): - if k in self.criterion.weight_dict: - losses[k] *= self.criterion.weight_dict[k] - else: - # remove this loss if not specified in `weight_dict` - losses.pop(k) - return losses - else: - mask_cls_results = outputs["pred_logits"] - mask_pred_results = outputs["pred_masks"] - # upsample masks - mask_pred_results = F.interpolate( - mask_pred_results, - size=(images.tensor.shape[-2], images.tensor.shape[-1]), - mode="bilinear", - align_corners=False, - ) - - del outputs - - processed_results = [] - for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( - mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - processed_results.append({}) - - if self.sem_seg_postprocess_before_inference: - mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)( - mask_pred_result, image_size, height, width - ) - mask_cls_result = mask_cls_result.to(mask_pred_result) - - # semantic segmentation inference - if self.semantic_on: - r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result) - if not self.sem_seg_postprocess_before_inference: - r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width) - processed_results[-1]["sem_seg"] = r - - # panoptic segmentation inference - if self.panoptic_on: - panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["panoptic_seg"] = panoptic_r - - # instance segmentation inference - if self.instance_on: - instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["instances"] = instance_r - - return processed_results - - def prepare_targets(self, targets, images): - h_pad, w_pad = images.tensor.shape[-2:] - new_targets = [] - for targets_per_image in targets: - # pad gt - gt_masks = targets_per_image.gt_masks - padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device) - padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks - new_targets.append( - { - "labels": targets_per_image.gt_classes, - "masks": padded_masks, - } - ) - return new_targets - - def semantic_inference(self, mask_cls, mask_pred): - mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] - mask_pred = mask_pred.sigmoid() - semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) - return semseg - - def panoptic_inference(self, mask_cls, mask_pred): - scores, labels = F.softmax(mask_cls, dim=-1).max(-1) - mask_pred = mask_pred.sigmoid() - - keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) - cur_scores = scores[keep] - cur_classes = labels[keep] - cur_masks = mask_pred[keep] - cur_mask_cls = mask_cls[keep] - cur_mask_cls = cur_mask_cls[:, :-1] - - cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks - - h, w = cur_masks.shape[-2:] - panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) - segments_info = [] - - current_segment_id = 0 - - if cur_masks.shape[0] == 0: - # We didn't detect any mask :( - return panoptic_seg, segments_info - else: - # take argmax - cur_mask_ids = cur_prob_masks.argmax(0) - stuff_memory_list = {} - for k in range(cur_classes.shape[0]): - pred_class = cur_classes[k].item() - isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() - mask_area = (cur_mask_ids == k).sum().item() - original_area = (cur_masks[k] >= 0.5).sum().item() - mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) - - if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: - if mask_area / original_area < self.overlap_threshold: - continue - - # merge stuff regions - if not isthing: - if int(pred_class) in stuff_memory_list.keys(): - panoptic_seg[mask] = stuff_memory_list[int(pred_class)] - continue - else: - stuff_memory_list[int(pred_class)] = current_segment_id + 1 - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - - segments_info.append( - { - "id": current_segment_id, - "isthing": bool(isthing), - "category_id": int(pred_class), - } - ) - - return panoptic_seg, segments_info - - def instance_inference(self, mask_cls, mask_pred): - # mask_pred is already processed to have the same shape as original input - image_size = mask_pred.shape[-2:] - - # [Q, K] - scores = F.softmax(mask_cls, dim=-1)[:, :-1] - labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) - # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) - scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False) - labels_per_image = labels[topk_indices] - - topk_indices = topk_indices // self.sem_seg_head.num_classes - # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) - mask_pred = mask_pred[topk_indices] - - # if this is panoptic segmentation, we only keep the "thing" classes - if self.panoptic_on: - keep = torch.zeros_like(scores_per_image).bool() - for i, lab in enumerate(labels_per_image): - keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values() - - scores_per_image = scores_per_image[keep] - labels_per_image = labels_per_image[keep] - mask_pred = mask_pred[keep] - - result = Instances(image_size) - # mask (before sigmoid) - result.pred_masks = (mask_pred > 0).float() - result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4)) - # Uncomment the following to get boxes from masks (this is slow) - # result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes() - - # calculate average mask prob - mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6) - result.scores = scores_per_image * mask_scores_per_image - result.pred_classes = labels_per_image - return result diff --git a/spaces/akhaliq/PaintTransformer/train/train.py b/spaces/akhaliq/PaintTransformer/train/train.py deleted file mode 100644 index d475e5f38030b7e009333ebcda370711897b1c03..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/PaintTransformer/train/train.py +++ /dev/null @@ -1,58 +0,0 @@ -import time -from options.train_options import TrainOptions -from data import create_dataset -from models import create_model -from util.visualizer import Visualizer - -if __name__ == '__main__': - opt = TrainOptions().parse() # get training options - dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options - dataset_size = len(dataset) # get the number of images in the dataset. - print('The number of training images = %d' % dataset_size) - - model = create_model(opt) # create a model given opt.model and other options - model.setup(opt) # regular setup: load and print networks; create schedulers - visualizer = Visualizer(opt) # create a visualizer that display/save images and plots - total_iters = 0 # the total number of training iterations - - for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): - epoch_start_time = time.time() # timer for entire epoch - iter_data_time = time.time() # timer for data loading per iteration - epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch - visualizer.reset() # reset visualizer: make sure it saves results to HTML at least once every epoch - for i, data in enumerate(dataset): # inner loop within one epoch - iter_start_time = time.time() # timer for computation per iteration - if total_iters % opt.print_freq == 0: - t_data = iter_start_time - iter_data_time - - total_iters += opt.batch_size - epoch_iter += opt.batch_size - model.set_input(data) # unpack data from dataset and apply preprocessing - model.optimize_parameters() # calculate loss functions, get gradients, update network weights - - if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file - save_result = total_iters % opt.update_html_freq == 0 - model.compute_visuals() - visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) - - if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk - losses = model.get_current_losses() - t_comp = (time.time() - iter_start_time) / opt.batch_size - visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) - if opt.display_id > 0: - visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) - - if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations - print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) - save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' - model.save_networks(save_suffix) - - iter_data_time = time.time() - if epoch % opt.save_epoch_freq == 0: # cache our model every epochs - print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) - model.save_networks('latest') - model.save_networks(epoch) - - print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, - time.time() - epoch_start_time)) - model.update_learning_rate() # update learning rates in the beginning of every epoch. diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/bin/block_randomize.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/bin/block_randomize.py deleted file mode 100644 index d20c3583db347e51cb8407e8fc63ae92b1bec178..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/bin/block_randomize.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -#!/usr/bin/python3.6 - -# simple command-line wrapper around the chunked_dataset_iterator -# Example: -# block_randomize my_chunked_data_folder/ -# block_randomize --azure-storage-key $MY_KEY https://myaccount.blob.core.windows.net/mycontainer/my_chunked_data_folder - -import os, sys, inspect - -sys.path.insert( - 0, - os.path.dirname( - os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) - ), -) # find our imports - -from infinibatch.datasets import chunked_dataset_iterator - -from typing import Union, Iterator, Callable, Any, Optional, Dict -import os, sys, re -import gzip - - -# helper functions to abstract access to Azure blobs -# @TODO: These will be abstracted into a helper library in a future version. -def _try_parse_azure_blob_uri(path: str): - try: - m = re.compile("https://([a-z0-9]*).blob.core.windows.net/([^/]*)/(.*)").match( - path - ) - # print (m.group(1)) - # print (m.group(2)) - # print (m.group(3)) - return (m.group(1), m.group(2), m.group(3)) - except: - return None - - -def _get_azure_key( - storage_account: str, credentials: Optional[Union[str, Dict[str, str]]] -): - if not credentials: - return None - elif isinstance(credentials, str): - return credentials - else: - return credentials[storage_account] - - -def read_utf8_file( - path: str, credentials: Optional[Union[str, Dict[str, str]]] -) -> Iterator[str]: - blob_data = _try_parse_azure_blob_uri(path) - if blob_data is None: - with open(path, "rb") as f: - data = f.read() - else: - try: - # pip install azure-storage-blob - from azure.storage.blob import BlobClient - except: - print( - "Failed to import azure.storage.blob. Please pip install azure-storage-blob", - file=sys.stderr, - ) - raise - data = ( - BlobClient.from_blob_url( - path, - credential=_get_azure_key( - storage_account=blob_data[0], credentials=credentials - ), - ) - .download_blob() - .readall() - ) - if path.endswith(".gz"): - data = gzip.decompress(data) - # @TODO: auto-detect UCS-2 by BOM - return iter(data.decode(encoding="utf-8").splitlines()) - - -def enumerate_files( - dir: str, ext: str, credentials: Optional[Union[str, Dict[str, str]]] -): - blob_data = _try_parse_azure_blob_uri(dir) - if blob_data is None: - return [ - os.path.join(dir, path.name) - for path in os.scandir(dir) - if path.is_file() and (ext is None or path.name.endswith(ext)) - ] - else: - try: - # pip install azure-storage-blob - from azure.storage.blob import ContainerClient - except: - print( - "Failed to import azure.storage.blob. Please pip install azure-storage-blob", - file=sys.stderr, - ) - raise - account, container, blob_path = blob_data - - print("enumerate_files: enumerating blobs in", dir, file=sys.stderr, flush=True) - # @BUGBUG: The prefix does not seem to have to start; seems it can also be a substring - container_uri = "https://" + account + ".blob.core.windows.net/" + container - container_client = ContainerClient.from_container_url( - container_uri, credential=_get_azure_key(account, credentials) - ) - if not blob_path.endswith("/"): - blob_path += "/" - blob_uris = [ - container_uri + "/" + blob["name"] - for blob in container_client.walk_blobs(blob_path, delimiter="") - if (ext is None or blob["name"].endswith(ext)) - ] - print( - "enumerate_files:", - len(blob_uris), - "blobs found", - file=sys.stderr, - flush=True, - ) - for blob_name in blob_uris[:10]: - print(blob_name, file=sys.stderr, flush=True) - return blob_uris - - -if sys.argv[1] == "--azure-storage-key": - credential = sys.argv[2] - paths = sys.argv[3:] -else: - credential = None - paths = sys.argv[1:] - -chunk_file_paths = [ # enumerate all .gz files in the given paths - subpath for path in paths for subpath in enumerate_files(path, ".gz", credential) -] -chunk_file_paths.sort() # make sure file order is always the same, independent of OS -print( - "block_randomize: reading from", - len(chunk_file_paths), - "chunk files", - file=sys.stderr, -) - -ds = chunked_dataset_iterator( - chunk_refs=chunk_file_paths, - read_chunk_fn=lambda path: read_utf8_file(path, credential), - shuffle=True, - buffer_size=1000000, - seed=1, - use_windowed=True, -) -for line in ds: - print(line) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/sources.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/sources.py deleted file mode 100644 index eec3f12f7e394a9eba2ebc43cf754a0040cdebf3..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/sources.py +++ /dev/null @@ -1,224 +0,0 @@ -import logging -import mimetypes -import os -import pathlib -from typing import Callable, Iterable, Optional, Tuple - -from pip._internal.models.candidate import InstallationCandidate -from pip._internal.models.link import Link -from pip._internal.utils.urls import path_to_url, url_to_path -from pip._internal.vcs import is_url - -logger = logging.getLogger(__name__) - -FoundCandidates = Iterable[InstallationCandidate] -FoundLinks = Iterable[Link] -CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]] -PageValidator = Callable[[Link], bool] - - -class LinkSource: - @property - def link(self) -> Optional[Link]: - """Returns the underlying link, if there's one.""" - raise NotImplementedError() - - def page_candidates(self) -> FoundCandidates: - """Candidates found by parsing an archive listing HTML file.""" - raise NotImplementedError() - - def file_links(self) -> FoundLinks: - """Links found by specifying archives directly.""" - raise NotImplementedError() - - -def _is_html_file(file_url: str) -> bool: - return mimetypes.guess_type(file_url, strict=False)[0] == "text/html" - - -class _FlatDirectorySource(LinkSource): - """Link source specified by ``--find-links=``. - - This looks the content of the directory, and returns: - - * ``page_candidates``: Links listed on each HTML file in the directory. - * ``file_candidates``: Archives in the directory. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - path: str, - ) -> None: - self._candidates_from_page = candidates_from_page - self._path = pathlib.Path(os.path.realpath(path)) - - @property - def link(self) -> Optional[Link]: - return None - - def page_candidates(self) -> FoundCandidates: - for path in self._path.iterdir(): - url = path_to_url(str(path)) - if not _is_html_file(url): - continue - yield from self._candidates_from_page(Link(url)) - - def file_links(self) -> FoundLinks: - for path in self._path.iterdir(): - url = path_to_url(str(path)) - if _is_html_file(url): - continue - yield Link(url) - - -class _LocalFileSource(LinkSource): - """``--find-links=`` or ``--[extra-]index-url=``. - - If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to - the option, it is converted to a URL first. This returns: - - * ``page_candidates``: Links listed on an HTML file. - * ``file_candidates``: The non-HTML file. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - if not _is_html_file(self._link.url): - return - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - if _is_html_file(self._link.url): - return - yield self._link - - -class _RemoteFileSource(LinkSource): - """``--find-links=`` or ``--[extra-]index-url=``. - - This returns: - - * ``page_candidates``: Links listed on an HTML file. - * ``file_candidates``: The non-HTML file. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - page_validator: PageValidator, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._page_validator = page_validator - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - if not self._page_validator(self._link): - return - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - yield self._link - - -class _IndexDirectorySource(LinkSource): - """``--[extra-]index-url=``. - - This is treated like a remote URL; ``candidates_from_page`` contains logic - for this by appending ``index.html`` to the link. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - return () - - -def build_source( - location: str, - *, - candidates_from_page: CandidatesFromPage, - page_validator: PageValidator, - expand_dir: bool, - cache_link_parsing: bool, -) -> Tuple[Optional[str], Optional[LinkSource]]: - - path: Optional[str] = None - url: Optional[str] = None - if os.path.exists(location): # Is a local path. - url = path_to_url(location) - path = location - elif location.startswith("file:"): # A file: URL. - url = location - path = url_to_path(location) - elif is_url(location): - url = location - - if url is None: - msg = ( - "Location '%s' is ignored: " - "it is either a non-existing path or lacks a specific scheme." - ) - logger.warning(msg, location) - return (None, None) - - if path is None: - source: LinkSource = _RemoteFileSource( - candidates_from_page=candidates_from_page, - page_validator=page_validator, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - - if os.path.isdir(path): - if expand_dir: - source = _FlatDirectorySource( - candidates_from_page=candidates_from_page, - path=path, - ) - else: - source = _IndexDirectorySource( - candidates_from_page=candidates_from_page, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - elif os.path.isfile(path): - source = _LocalFileSource( - candidates_from_page=candidates_from_page, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - logger.warning( - "Location '%s' is ignored: it is neither a file nor a directory.", - location, - ) - return (url, None) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py deleted file mode 100644 index 9013acb709c231380df66186c40247a88fcb6dc9..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py +++ /dev/null @@ -1,212 +0,0 @@ -""" - pygments.token - ~~~~~~~~~~~~~~ - - Basic token types and the standard tokens. - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - - -class _TokenType(tuple): - parent = None - - def split(self): - buf = [] - node = self - while node is not None: - buf.append(node) - node = node.parent - buf.reverse() - return buf - - def __init__(self, *args): - # no need to call super.__init__ - self.subtypes = set() - - def __contains__(self, val): - return self is val or ( - type(val) is self.__class__ and - val[:len(self)] == self - ) - - def __getattr__(self, val): - if not val or not val[0].isupper(): - return tuple.__getattribute__(self, val) - new = _TokenType(self + (val,)) - setattr(self, val, new) - self.subtypes.add(new) - new.parent = self - return new - - def __repr__(self): - return 'Token' + (self and '.' or '') + '.'.join(self) - - def __copy__(self): - # These instances are supposed to be singletons - return self - - def __deepcopy__(self, memo): - # These instances are supposed to be singletons - return self - - -Token = _TokenType() - -# Special token types -Text = Token.Text -Whitespace = Text.Whitespace -Escape = Token.Escape -Error = Token.Error -# Text that doesn't belong to this lexer (e.g. HTML in PHP) -Other = Token.Other - -# Common token types for source code -Keyword = Token.Keyword -Name = Token.Name -Literal = Token.Literal -String = Literal.String -Number = Literal.Number -Punctuation = Token.Punctuation -Operator = Token.Operator -Comment = Token.Comment - -# Generic types for non-source code -Generic = Token.Generic - -# String and some others are not direct children of Token. -# alias them: -Token.Token = Token -Token.String = String -Token.Number = Number - - -def is_token_subtype(ttype, other): - """ - Return True if ``ttype`` is a subtype of ``other``. - - exists for backwards compatibility. use ``ttype in other`` now. - """ - return ttype in other - - -def string_to_tokentype(s): - """ - Convert a string into a token type:: - - >>> string_to_token('String.Double') - Token.Literal.String.Double - >>> string_to_token('Token.Literal.Number') - Token.Literal.Number - >>> string_to_token('') - Token - - Tokens that are already tokens are returned unchanged: - - >>> string_to_token(String) - Token.Literal.String - """ - if isinstance(s, _TokenType): - return s - if not s: - return Token - node = Token - for item in s.split('.'): - node = getattr(node, item) - return node - - -# Map standard token types to short names, used in CSS class naming. -# If you add a new item, please be sure to run this file to perform -# a consistency check for duplicate values. -STANDARD_TYPES = { - Token: '', - - Text: '', - Whitespace: 'w', - Escape: 'esc', - Error: 'err', - Other: 'x', - - Keyword: 'k', - Keyword.Constant: 'kc', - Keyword.Declaration: 'kd', - Keyword.Namespace: 'kn', - Keyword.Pseudo: 'kp', - Keyword.Reserved: 'kr', - Keyword.Type: 'kt', - - Name: 'n', - Name.Attribute: 'na', - Name.Builtin: 'nb', - Name.Builtin.Pseudo: 'bp', - Name.Class: 'nc', - Name.Constant: 'no', - Name.Decorator: 'nd', - Name.Entity: 'ni', - Name.Exception: 'ne', - Name.Function: 'nf', - Name.Function.Magic: 'fm', - Name.Property: 'py', - Name.Label: 'nl', - Name.Namespace: 'nn', - Name.Other: 'nx', - Name.Tag: 'nt', - Name.Variable: 'nv', - Name.Variable.Class: 'vc', - Name.Variable.Global: 'vg', - Name.Variable.Instance: 'vi', - Name.Variable.Magic: 'vm', - - Literal: 'l', - Literal.Date: 'ld', - - String: 's', - String.Affix: 'sa', - String.Backtick: 'sb', - String.Char: 'sc', - String.Delimiter: 'dl', - String.Doc: 'sd', - String.Double: 's2', - String.Escape: 'se', - String.Heredoc: 'sh', - String.Interpol: 'si', - String.Other: 'sx', - String.Regex: 'sr', - String.Single: 's1', - String.Symbol: 'ss', - - Number: 'm', - Number.Bin: 'mb', - Number.Float: 'mf', - Number.Hex: 'mh', - Number.Integer: 'mi', - Number.Integer.Long: 'il', - Number.Oct: 'mo', - - Operator: 'o', - Operator.Word: 'ow', - - Punctuation: 'p', - - Comment: 'c', - Comment.Hashbang: 'ch', - Comment.Multiline: 'cm', - Comment.Preproc: 'cp', - Comment.PreprocFile: 'cpf', - Comment.Single: 'c1', - Comment.Special: 'cs', - - Generic: 'g', - Generic.Deleted: 'gd', - Generic.Emph: 'ge', - Generic.Error: 'gr', - Generic.Heading: 'gh', - Generic.Inserted: 'gi', - Generic.Output: 'go', - Generic.Prompt: 'gp', - Generic.Strong: 'gs', - Generic.Subheading: 'gu', - Generic.Traceback: 'gt', -} diff --git a/spaces/ali-ghamdan/deoldify/fastai/data_block.py b/spaces/ali-ghamdan/deoldify/fastai/data_block.py deleted file mode 100644 index d630b10cb4f6fd27d84aa2ba4419d975b1b2f2cf..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/data_block.py +++ /dev/null @@ -1,804 +0,0 @@ -from .torch_core import * -from .basic_data import * -from .layers import * -from numbers import Integral - -__all__ = ['ItemList', 'CategoryList', 'MultiCategoryList', 'MultiCategoryProcessor', 'LabelList', 'ItemLists', 'get_files', - 'PreProcessor', 'LabelLists', 'FloatList', 'CategoryProcessor', 'EmptyLabelList', 'MixedItem', 'MixedProcessor', - 'MixedItemList'] - -def _decode(df): - return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x in df.values], dtype=np.object) - -def _maybe_squeeze(arr): return (arr if is1d(arr) else np.squeeze(arr)) - -def _path_to_same_str(p_fn): - "path -> str, but same on nt+posix, for alpha-sort only" - s_fn = str(p_fn) - s_fn = s_fn.replace('\\','.') - s_fn = s_fn.replace('/','.') - return s_fn - -def _get_files(parent, p, f, extensions): - p = Path(p)#.relative_to(parent) - if isinstance(extensions,str): extensions = [extensions] - low_extensions = [e.lower() for e in extensions] if extensions is not None else None - res = [p/o for o in f if not o.startswith('.') - and (extensions is None or f'.{o.split(".")[-1].lower()}' in low_extensions)] - return res - -def get_files(path:PathOrStr, extensions:Collection[str]=None, recurse:bool=False, - include:Optional[Collection[str]]=None, presort:bool=False)->FilePathList: - "Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`." - if recurse: - res = [] - for i,(p,d,f) in enumerate(os.walk(path)): - # skip hidden dirs - if include is not None and i==0: d[:] = [o for o in d if o in include] - else: d[:] = [o for o in d if not o.startswith('.')] - res += _get_files(path, p, f, extensions) - if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False) - return res - else: - f = [o.name for o in os.scandir(path) if o.is_file()] - res = _get_files(path, path, f, extensions) - if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False) - return res - -class PreProcessor(): - "Basic class for a processor that will be applied to items at the end of the data block API." - def __init__(self, ds:Collection=None): self.ref_ds = ds - def process_one(self, item:Any): return item - def process(self, ds:Collection): ds.items = array([self.process_one(item) for item in ds.items]) - -PreProcessors = Union[PreProcessor, Collection[PreProcessor]] -fastai_types[PreProcessors] = 'PreProcessors' - -class ItemList(): - "A collection of items with `__len__` and `__getitem__` with `ndarray` indexing semantics." - _bunch,_processor,_label_cls,_square_show,_square_show_res = DataBunch,None,None,False,False - - def __init__(self, items:Iterator, path:PathOrStr='.', label_cls:Callable=None, inner_df:Any=None, - processor:PreProcessors=None, x:'ItemList'=None, ignore_empty:bool=False): - self.path = Path(path) - self.num_parts = len(self.path.parts) - self.items,self.x,self.ignore_empty = items,x,ignore_empty - if not isinstance(self.items,np.ndarray): self.items = array(self.items, dtype=object) - self.label_cls,self.inner_df,self.processor = ifnone(label_cls,self._label_cls),inner_df,processor - self._label_list,self._split = LabelList,ItemLists - self.copy_new = ['x', 'label_cls', 'path'] - - def __len__(self)->int: return len(self.items) or 1 - def get(self, i)->Any: - "Subclass if you want to customize how to create item `i` from `self.items`." - return self.items[i] - def __repr__(self)->str: - items = [self[i] for i in range(min(5,len(self.items)))] - return f'{self.__class__.__name__} ({len(self.items)} items)\n{show_some(items)}\nPath: {self.path}' - - def process(self, processor:PreProcessors=None): - "Apply `processor` or `self.processor` to `self`." - if processor is not None: self.processor = processor - self.processor = listify(self.processor) - for p in self.processor: p.process(self) - return self - - def process_one(self, item:ItemBase, processor:PreProcessors=None): - "Apply `processor` or `self.processor` to `item`." - if processor is not None: self.processor = processor - self.processor = listify(self.processor) - for p in self.processor: item = p.process_one(item) - return item - - def analyze_pred(self, pred:Tensor): - "Called on `pred` before `reconstruct` for additional preprocessing." - return pred - - def reconstruct(self, t:Tensor, x:Tensor=None): - "Reconstruct one of the underlying item for its data `t`." - return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t) - - def new(self, items:Iterator, processor:PreProcessors=None, **kwargs)->'ItemList': - "Create a new `ItemList` from `items`, keeping the same attributes." - processor = ifnone(processor, self.processor) - copy_d = {o:getattr(self,o) for o in self.copy_new} - kwargs = {**copy_d, **kwargs} - return self.__class__(items=items, processor=processor, **kwargs) - - def add(self, items:'ItemList'): - self.items = np.concatenate([self.items, items.items], 0) - if self.inner_df is not None and items.inner_df is not None: - self.inner_df = pd.concat([self.inner_df, items.inner_df]) - else: self.inner_df = self.inner_df or items.inner_df - return self - - def __getitem__(self,idxs:int)->Any: - "returns a single item based if `idxs` is an integer or a new `ItemList` object if `idxs` is a range." - idxs = try_int(idxs) - if isinstance(idxs, Integral): return self.get(idxs) - else: return self.new(self.items[idxs], inner_df=index_row(self.inner_df, idxs)) - - @classmethod - def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True, - include:Optional[Collection[str]]=None, processor:PreProcessors=None, presort:Optional[bool]=False, **kwargs)->'ItemList': - """Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`. - `recurse` determines if we search subfolders.""" - path = Path(path) - return cls(get_files(path, extensions, recurse=recurse, include=include, presort=presort), path=path, processor=processor, **kwargs) - - @classmethod - def from_df(cls, df:DataFrame, path:PathOrStr='.', cols:IntsOrStrs=0, processor:PreProcessors=None, **kwargs)->'ItemList': - "Create an `ItemList` in `path` from the inputs in the `cols` of `df`." - inputs = df.iloc[:,df_names_to_idx(cols, df)] - assert not inputs.isna().any().any(), f"You have NaN values in column(s) {cols} of your dataframe, please fix it." - res = cls(items=_maybe_squeeze(inputs.values), path=path, inner_df=df, processor=processor, **kwargs) - return res - - @classmethod - def from_csv(cls, path:PathOrStr, csv_name:str, cols:IntsOrStrs=0, delimiter:str=None, header:str='infer', - processor:PreProcessors=None, **kwargs)->'ItemList': - """Create an `ItemList` in `path` from the inputs in the `cols` of `path/csv_name`""" - df = pd.read_csv(Path(path)/csv_name, delimiter=delimiter, header=header) - return cls.from_df(df, path=path, cols=cols, processor=processor, **kwargs) - - def _relative_item_path(self, i): return self.items[i].relative_to(self.path) - def _relative_item_paths(self): return [self._relative_item_path(i) for i in range_of(self.items)] - - def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList': - "Use only a sample of `sample_pct`of the full dataset and an optional `seed`." - if seed is not None: np.random.seed(seed) - rand_idx = np.random.permutation(range_of(self)) - cut = int(sample_pct * len(self)) - return self[rand_idx[:cut]] - - def to_text(self, fn:str): - "Save `self.items` to `fn` in `self.path`." - with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()]) - - def filter_by_func(self, func:Callable)->'ItemList': - "Only keep elements for which `func` returns `True`." - self.items = array([o for o in self.items if func(o)]) - return self - - def filter_by_folder(self, include=None, exclude=None): - "Only keep filenames in `include` folder or reject the ones in `exclude`." - include,exclude = listify(include),listify(exclude) - def _inner(o): - if isinstance(o, Path): n = o.relative_to(self.path).parts[0] - else: n = o.split(os.path.sep)[len(str(self.path).split(os.path.sep))] - if include and not n in include: return False - if exclude and n in exclude: return False - return True - return self.filter_by_func(_inner) - - def filter_by_rand(self, p:float, seed:int=None): - "Keep random sample of `items` with probability `p` and an optional `seed`." - if seed is not None: set_all_seed(seed) - return self.filter_by_func(lambda o: rand_bool(p)) - - def no_split(self): - warn("`no_split` is deprecated, please use `split_none`.") - return self.split_none() - - def split_none(self): - "Don't split the data and create an empty validation set." - val = self[[]] - val.ignore_empty = True - return self._split(self.path, self, val) - - def split_by_list(self, train, valid): - "Split the data between `train` and `valid`." - return self._split(self.path, train, valid) - - def split_by_idxs(self, train_idx, valid_idx): - "Split the data between `train_idx` and `valid_idx`." - return self.split_by_list(self[train_idx], self[valid_idx]) - - def split_by_idx(self, valid_idx:Collection[int])->'ItemLists': - "Split the data according to the indexes in `valid_idx`." - #train_idx = [i for i in range_of(self.items) if i not in valid_idx] - train_idx = np.setdiff1d(arange_of(self.items), valid_idx) - return self.split_by_idxs(train_idx, valid_idx) - - def _get_by_folder(self, name): - return [i for i in range_of(self) if (self.items[i].parts[self.num_parts] if isinstance(self.items[i], Path) - else self.items[i].split(os.path.sep)[0]) == name ] - - def split_by_folder(self, train:str='train', valid:str='valid')->'ItemLists': - "Split the data depending on the folder (`train` or `valid`) in which the filenames are." - return self.split_by_idxs(self._get_by_folder(train), self._get_by_folder(valid)) - - def random_split_by_pct(self, valid_pct:float=0.2, seed:int=None): - warn("`random_split_by_pct` is deprecated, please use `split_by_rand_pct`.") - return self.split_by_rand_pct(valid_pct=valid_pct, seed=seed) - - def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists': - "Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed." - if valid_pct==0.: return self.split_none() - if seed is not None: np.random.seed(seed) - rand_idx = np.random.permutation(range_of(self)) - cut = int(valid_pct * len(self)) - return self.split_by_idx(rand_idx[:cut]) - - def split_subsets(self, train_size:float, valid_size:float, seed=None) -> 'ItemLists': - "Split the items into train set with size `train_size * n` and valid set with size `valid_size * n`." - assert 0 < train_size < 1 - assert 0 < valid_size < 1 - assert train_size + valid_size <= 1. - if seed is not None: np.random.seed(seed) - n = len(self.items) - rand_idx = np.random.permutation(range(n)) - train_cut, valid_cut = int(train_size * n), int(valid_size * n) - return self.split_by_idxs(rand_idx[:train_cut], rand_idx[-valid_cut:]) - - def split_by_valid_func(self, func:Callable)->'ItemLists': - "Split the data by result of `func` (which returns `True` for validation set)." - valid_idx = [i for i,o in enumerate(self.items) if func(o)] - return self.split_by_idx(valid_idx) - - def split_by_files(self, valid_names:'ItemList')->'ItemLists': - "Split the data by using the names in `valid_names` for validation." - if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names) - else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names) - - def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists': - "Split the data by using the names in `fname` for the validation set. `path` will override `self.path`." - path = Path(ifnone(path, self.path)) - valid_names = loadtxt_str(path/fname) - return self.split_by_files(valid_names) - - def split_from_df(self, col:IntsOrStrs=2): - "Split the data from the `col` in the dataframe in `self.inner_df`." - valid_idx = np.where(self.inner_df.iloc[:,df_names_to_idx(col, self.inner_df)])[0] - return self.split_by_idx(valid_idx) - - def get_label_cls(self, labels, label_cls:Callable=None, label_delim:str=None, **kwargs): - "Return `label_cls` or guess one from the first element of `labels`." - if label_cls is not None: return label_cls - if self.label_cls is not None: return self.label_cls - if label_delim is not None: return MultiCategoryList - it = index_row(labels,0) - if isinstance(it, (float, np.float32)): return FloatList - if isinstance(try_int(it), (str, Integral)): return CategoryList - if isinstance(it, Collection): return MultiCategoryList - return ItemList #self.__class__ - - def _label_from_list(self, labels:Iterator, label_cls:Callable=None, from_item_lists:bool=False, **kwargs)->'LabelList': - "Label `self.items` with `labels`." - if not from_item_lists: - raise Exception("Your data isn't split, if you don't want a validation set, please use `split_none`.") - labels = array(labels, dtype=object) - label_cls = self.get_label_cls(labels, label_cls=label_cls, **kwargs) - y = label_cls(labels, path=self.path, **kwargs) - res = self._label_list(x=self, y=y) - return res - - def label_from_df(self, cols:IntsOrStrs=1, label_cls:Callable=None, **kwargs): - "Label `self.items` from the values in `cols` in `self.inner_df`." - labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)] - assert labels.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it." - if is_listy(cols) and len(cols) > 1 and (label_cls is None or label_cls == MultiCategoryList): - new_kwargs,label_cls = dict(one_hot=True, classes= cols),MultiCategoryList - kwargs = {**new_kwargs, **kwargs} - return self._label_from_list(_maybe_squeeze(labels), label_cls=label_cls, **kwargs) - - def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList': - "Label every item with `const`." - return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs) - - def label_empty(self, **kwargs): - "Label every item with an `EmptyLabel`." - kwargs['label_cls'] = EmptyLabelList - return self.label_from_func(func=lambda o: 0., **kwargs) - - def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList': - "Apply `func` to every input to get its label." - return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs) - - def label_from_folder(self, label_cls:Callable=None, **kwargs)->'LabelList': - "Give a label to each filename depending on its folder." - return self.label_from_func(func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2], - label_cls=label_cls, **kwargs) - - def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList': - "Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name." - pat = re.compile(pat) - def _inner(o): - s = str((os.path.join(self.path,o) if full_path else o).as_posix()) - res = pat.search(s) - assert res,f'Failed to find "{pat}" in "{s}"' - return res.group(1) - return self.label_from_func(_inner, label_cls=label_cls, **kwargs) - - def databunch(self, **kwargs): - "To throw a clear error message when the data wasn't split and labeled." - raise Exception("Your data is neither split nor labeled, can't turn it into a `DataBunch` yet.") - -class EmptyLabelList(ItemList): - "Basic `ItemList` for dummy labels." - def get(self, i): return EmptyLabel() - def reconstruct(self, t:Tensor, x:Tensor=None): - if len(t.size()) == 0: return EmptyLabel() - return self.x.reconstruct(t,x) if has_arg(self.x.reconstruct, 'x') else self.x.reconstruct(t) - -class CategoryProcessor(PreProcessor): - "`PreProcessor` that create `classes` from `ds.items` and handle the mapping." - def __init__(self, ds:ItemList): - self.create_classes(ds.classes) - self.state_attrs,self.warns = ['classes'],[] - - def create_classes(self, classes): - self.classes = classes - if classes is not None: self.c2i = {v:k for k,v in enumerate(classes)} - - def generate_classes(self, items): - "Generate classes from `items` by taking the sorted unique values." - return uniqueify(items, sort=True) - - def process_one(self,item): - if isinstance(item, EmptyLabel): return item - res = self.c2i.get(item,None) - if res is None: self.warns.append(str(item)) - return res - - def process(self, ds): - if self.classes is None: self.create_classes(self.generate_classes(ds.items)) - ds.classes = self.classes - ds.c2i = self.c2i - super().process(ds) - - def __getstate__(self): return {n:getattr(self,n) for n in self.state_attrs} - def __setstate__(self, state:dict): - self.create_classes(state['classes']) - self.state_attrs = state.keys() - for n in state.keys(): - if n!='classes': setattr(self, n, state[n]) - -class CategoryListBase(ItemList): - "Basic `ItemList` for classification." - def __init__(self, items:Iterator, classes:Collection=None, **kwargs): - self.classes=classes - self.filter_missing_y = True - super().__init__(items, **kwargs) - self.copy_new.append('classes') - - @property - def c(self): return len(self.classes) - -class CategoryList(CategoryListBase): - "Basic `ItemList` for single classification labels." - _processor=CategoryProcessor - def __init__(self, items:Iterator, classes:Collection=None, label_delim:str=None, **kwargs): - super().__init__(items, classes=classes, **kwargs) - self.loss_func = CrossEntropyFlat() - - def get(self, i): - o = self.items[i] - if o is None: return None - return Category(o, self.classes[o]) - - def analyze_pred(self, pred, thresh:float=0.5): return pred.argmax() - - def reconstruct(self, t): - return Category(t, self.classes[t]) - -class MultiCategoryProcessor(CategoryProcessor): - "`PreProcessor` that create `classes` from `ds.items` and handle the mapping." - def __init__(self, ds:ItemList, one_hot:bool=False): - super().__init__(ds) - self.one_hot = one_hot - self.state_attrs.append('one_hot') - - def process_one(self,item): - if self.one_hot or isinstance(item, EmptyLabel): return item - res = [super(MultiCategoryProcessor, self).process_one(o) for o in item] - return [r for r in res if r is not None] - - def generate_classes(self, items): - "Generate classes from `items` by taking the sorted unique values." - classes = set() - for c in items: classes = classes.union(set(c)) - classes = list(classes) - classes.sort() - return classes - -class MultiCategoryList(CategoryListBase): - "Basic `ItemList` for multi-classification labels." - _processor=MultiCategoryProcessor - def __init__(self, items:Iterator, classes:Collection=None, label_delim:str=None, one_hot:bool=False, **kwargs): - if label_delim is not None: items = array(csv.reader(items.astype(str), delimiter=label_delim)) - super().__init__(items, classes=classes, **kwargs) - if one_hot: - assert classes is not None, "Please provide class names with `classes=...`" - self.processor = [MultiCategoryProcessor(self, one_hot=True)] - self.loss_func = BCEWithLogitsFlat() - self.one_hot = one_hot - self.copy_new += ['one_hot'] - - def get(self, i): - o = self.items[i] - if o is None: return None - if self.one_hot: return self.reconstruct(o.astype(np.float32)) - return MultiCategory(one_hot(o, self.c), [self.classes[p] for p in o], o) - - def analyze_pred(self, pred, thresh:float=0.5): - return (pred >= thresh).float() - - def reconstruct(self, t): - o = [i for i in range(self.c) if t[i] == 1.] - return MultiCategory(t, [self.classes[p] for p in o], o) - -class FloatList(ItemList): - "`ItemList` suitable for storing the floats in items for regression. Will add a `log` if this flag is `True`." - def __init__(self, items:Iterator, log:bool=False, classes:Collection=None, **kwargs): - super().__init__(np.array(items, dtype=np.float32), **kwargs) - self.log = log - self.copy_new.append('log') - self.c = self.items.shape[1] if len(self.items.shape) > 1 else 1 - self.loss_func = MSELossFlat() - - def get(self, i): - o = super().get(i) - return FloatItem(np.log(o) if self.log else o) - - def reconstruct(self,t): return FloatItem(t.numpy()) - -class ItemLists(): - "An `ItemList` for each of `train` and `valid` (optional `test`)." - def __init__(self, path:PathOrStr, train:ItemList, valid:ItemList): - self.path,self.train,self.valid,self.test = Path(path),train,valid,None - if not self.train.ignore_empty and len(self.train.items) == 0: - warn("Your training set is empty. If this is by design, pass `ignore_empty=True` to remove this warning.") - if not self.valid.ignore_empty and len(self.valid.items) == 0: - warn("""Your validation set is empty. If this is by design, use `split_none()` - or pass `ignore_empty=True` when labelling to remove this warning.""") - if isinstance(self.train, LabelList): self.__class__ = LabelLists - - def __dir__(self)->List[str]: - default_dir = dir(type(self)) + list(self.__dict__.keys()) - add_ons = ['label_const', 'label_empty', 'label_from_df', 'label_from_folder', 'label_from_func', - 'label_from_list', 'label_from_re'] - return default_dir + add_ons - - def __repr__(self)->str: - return f'{self.__class__.__name__};\n\nTrain: {self.train};\n\nValid: {self.valid};\n\nTest: {self.test}' - - def __getattr__(self, k): - ft = getattr(self.train, k) - if not isinstance(ft, Callable): return ft - fv = getattr(self.valid, k) - assert isinstance(fv, Callable) - def _inner(*args, **kwargs): - self.train = ft(*args, from_item_lists=True, **kwargs) - assert isinstance(self.train, LabelList) - kwargs['label_cls'] = self.train.y.__class__ - self.valid = fv(*args, from_item_lists=True, **kwargs) - self.__class__ = LabelLists - self.process() - return self - return _inner - - def __setstate__(self,data:Any): self.__dict__.update(data) - - @property - def lists(self): - res = [self.train,self.valid] - if self.test is not None: res.append(self.test) - return res - - def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->'LabelList': - "Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default." - label_cls = self.train.get_label_cls(train_labels, label_cls) - self.train = self.train._label_list(x=self.train, y=label_cls(train_labels, **kwargs)) - self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs)) - self.__class__ = LabelLists - self.process() - return self - - def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs): - "Set `tfms` to be applied to the xs of the train and validation set." - if not tfms: tfms=(None,None) - assert is_listy(tfms) and len(tfms) == 2, "Please pass a list of two lists of transforms (train and valid)." - self.train.transform(tfms[0], **kwargs) - self.valid.transform(tfms[1], **kwargs) - if self.test: self.test.transform(tfms[1], **kwargs) - return self - - def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs): - "Set `tfms` to be applied to the ys of the train and validation set." - if not tfms: tfms=(None,None) - self.train.transform_y(tfms[0], **kwargs) - self.valid.transform_y(tfms[1], **kwargs) - if self.test: self.test.transform_y(tfms[1], **kwargs) - return self - - def databunch(self, **kwargs): - "To throw a clear error message when the data wasn't labeled." - raise Exception("Your data isn't labeled, can't turn it into a `DataBunch` yet!") - -class LabelLists(ItemLists): - "A `LabelList` for each of `train` and `valid` (optional `test`)." - def get_processors(self): - "Read the default class processors if none have been set." - procs_x,procs_y = listify(self.train.x._processor),listify(self.train.y._processor) - xp = ifnone(self.train.x.processor, [p(ds=self.train.x) for p in procs_x]) - yp = ifnone(self.train.y.processor, [p(ds=self.train.y) for p in procs_y]) - return xp,yp - - def process(self): - "Process the inner datasets." - xp,yp = self.get_processors() - for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n) - #progress_bar clear the outputs so in some case warnings issued during processing disappear. - for ds in self.lists: - if getattr(ds, 'warn', False): warn(ds.warn) - return self - - def filter_by_func(self, func:Callable): - for ds in self.lists: ds.filter_by_func(func) - return self - - def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, - dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, - no_check:bool=False, **kwargs)->'DataBunch': - "Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`." - path = Path(ifnone(path, self.path)) - data = self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, bs=bs, val_bs=val_bs, - num_workers=num_workers, dl_tfms=dl_tfms, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs) - if getattr(self, 'normalize', False):#In case a normalization was serialized - norm = self.normalize - data.normalize((norm['mean'], norm['std']), do_x=norm['do_x'], do_y=norm['do_y']) - data.label_list = self - return data - - def add_test(self, items:Iterator, label:Any=None, tfms=None, tfm_y=None): - "Add test set containing `items` with an arbitrary `label`." - # if no label passed, use label of first training item - if label is None: labels = EmptyLabelList([0] * len(items)) - else: labels = self.valid.y.new([label] * len(items)).process() - if isinstance(items, MixedItemList): items = self.valid.x.new(items.item_lists, inner_df=items.inner_df).process() - elif isinstance(items, ItemList): items = self.valid.x.new(items.items, inner_df=items.inner_df).process() - else: items = self.valid.x.new(items).process() - self.test = self.valid.new(items, labels, tfms=tfms, tfm_y=tfm_y) - return self - - def add_test_folder(self, test_folder:str='test', label:Any=None, tfms=None, tfm_y=None): - "Add test set containing items from `test_folder` and an arbitrary `label`." - # note: labels will be ignored if available in the test dataset - items = self.x.__class__.from_folder(self.path/test_folder) - return self.add_test(items.items, label=label, tfms=tfms, tfm_y=tfm_y) - - @classmethod - def load_state(cls, path:PathOrStr, state:dict): - "Create a `LabelLists` with empty sets from the serialized `state`." - path = Path(path) - train_ds = LabelList.load_state(path, state) - valid_ds = LabelList.load_state(path, state) - return LabelLists(path, train=train_ds, valid=valid_ds) - - @classmethod - def load_empty(cls, path:PathOrStr, fn:PathOrStr='export.pkl'): - "Create a `LabelLists` with empty sets from the serialized file in `path/fn`." - path = Path(path) - state = torch.load(open(path/fn, 'rb')) - return LabelLists.load_state(path, state) - -def _check_kwargs(ds:ItemList, tfms:TfmList, **kwargs): - tfms = listify(tfms) - if (tfms is None or len(tfms) == 0) and len(kwargs) == 0: return - if len(ds.items) >= 1: - x = ds[0] - try: x.apply_tfms(tfms, **kwargs) - except Exception as e: - raise Exception(f"It's not possible to apply those transforms to your dataset:\n {e}") - -class LabelList(Dataset): - "A list of inputs `x` and labels `y` with optional `tfms`." - def __init__(self, x:ItemList, y:ItemList, tfms:TfmList=None, tfm_y:bool=False, **kwargs): - self.x,self.y,self.tfm_y = x,y,tfm_y - self.y.x = x - self.item=None - self.transform(tfms, **kwargs) - - def __len__(self)->int: return len(self.x) if self.item is None else 1 - - @contextmanager - def set_item(self,item): - "For inference, will briefly replace the dataset with one that only contains `item`." - self.item = self.x.process_one(item) - yield None - self.item = None - - def __repr__(self)->str: - items = [self[i] for i in range(min(5,len(self.items)))] - res = f'{self.__class__.__name__} ({len(self.items)} items)\n' - res += f'x: {self.x.__class__.__name__}\n{show_some([i[0] for i in items])}\n' - res += f'y: {self.y.__class__.__name__}\n{show_some([i[1] for i in items])}\n' - return res + f'Path: {self.path}' - - def predict(self, res): - "Delegates predict call on `res` to `self.y`." - return self.y.predict(res) - - @property - def c(self): return self.y.c - - def new(self, x, y, tfms=None, tfm_y=None, **kwargs)->'LabelList': - tfms,tfm_y = ifnone(tfms, self.tfms),ifnone(tfm_y, self.tfm_y) - if isinstance(x, ItemList): - return self.__class__(x, y, tfms=tfms, tfm_y=tfm_y, **self.tfmargs) - else: - return self.new(self.x.new(x, **kwargs), self.y.new(y, **kwargs), tfms=tfms, tfm_y=tfm_y).process() - - def __getattr__(self,k:str)->Any: - x = super().__getattribute__('x') - res = getattr(x, k, None) - if res is not None and k not in ['classes', 'c']: return res - y = super().__getattribute__('y') - res = getattr(y, k, None) - if res is not None: return res - raise AttributeError(k) - - def __setstate__(self,data:Any): self.__dict__.update(data) - - def __getitem__(self,idxs:Union[int,np.ndarray])->'LabelList': - "return a single (x, y) if `idxs` is an integer or a new `LabelList` object if `idxs` is a range." - idxs = try_int(idxs) - if isinstance(idxs, Integral): - if self.item is None: x,y = self.x[idxs],self.y[idxs] - else: x,y = self.item ,0 - if self.tfms or self.tfmargs: - x = x.apply_tfms(self.tfms, is_x=True, **self.tfmargs) - if hasattr(self, 'tfms_y') and self.tfm_y and self.item is None: - y = y.apply_tfms(self.tfms_y, is_x=False, **{**self.tfmargs_y, 'do_resolve':False}) - if y is None: y=0 - return x,y - else: return self.new(self.x[idxs], self.y[idxs]) - - def to_df(self)->None: - "Create `pd.DataFrame` containing `items` from `self.x` and `self.y`." - return pd.DataFrame(dict(x=self.x._relative_item_paths(), y=[str(o) for o in self.y])) - - def to_csv(self, dest:str)->None: - "Save `self.to_df()` to a CSV file in `self.path`/`dest`." - self.to_df().to_csv(self.path/dest, index=False) - - def get_state(self, **kwargs): - "Return the minimal state for export." - state = {'x_cls':self.x.__class__, 'x_proc':self.x.processor, - 'y_cls':self.y.__class__, 'y_proc':self.y.processor, - 'tfms':self.tfms, 'tfm_y':self.tfm_y, 'tfmargs':self.tfmargs} - if hasattr(self, 'tfms_y'): state['tfms_y'] = self.tfms_y - if hasattr(self, 'tfmargs_y'): state['tfmargs_y'] = self.tfmargs_y - return {**state, **kwargs} - - def export(self, fn:PathOrStr, **kwargs): - "Export the minimal state and save it in `fn` to load an empty version for inference." - pickle.dump(self.get_state(**kwargs), open(fn, 'wb')) - - @classmethod - def load_empty(cls, path:PathOrStr, fn:PathOrStr): - "Load the state in `fn` to create an empty `LabelList` for inference." - return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb'))) - - @classmethod - def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList': - "Create a `LabelList` from `state`." - x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True) - y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True) - res = cls(x, y, tfms=state['tfms'], tfm_y=state['tfm_y'], **state['tfmargs']).process() - if state.get('tfms_y', False): res.tfms_y = state['tfms_y'] - if state.get('tfmargs_y', False): res.tfmargs_y = state['tfmargs_y'] - if state.get('normalize', False): res.normalize = state['normalize'] - return res - - def process(self, xp:PreProcessor=None, yp:PreProcessor=None, name:str=None): - "Launch the processing on `self.x` and `self.y` with `xp` and `yp`." - self.y.process(yp) - if getattr(self.y, 'filter_missing_y', False): - filt = array([o is None for o in self.y.items]) - if filt.sum()>0: - #Warnings are given later since progress_bar might make them disappear. - self.warn = f"You are labelling your items with {self.y.__class__.__name__}.\n" - self.warn += f"Your {name} set contained the following unknown labels, the corresponding items have been discarded.\n" - for p in self.y.processor: - if len(getattr(p, 'warns', [])) > 0: - warnings = list(set(p.warns)) - self.warn += ', '.join(warnings[:5]) - if len(warnings) > 5: self.warn += "..." - p.warns = [] - self.x,self.y = self.x[~filt],self.y[~filt] - self.x.process(xp) - return self - - def filter_by_func(self, func:Callable): - filt = array([func(x,y) for x,y in zip(self.x.items, self.y.items)]) - self.x,self.y = self.x[~filt],self.y[~filt] - return self - - def transform(self, tfms:TfmList, tfm_y:bool=None, **kwargs): - "Set the `tfms` and `tfm_y` value to be applied to the inputs and targets." - _check_kwargs(self.x, tfms, **kwargs) - if tfm_y is None: tfm_y = self.tfm_y - tfms_y = None if tfms is None else list(filter(lambda t: getattr(t, 'use_on_y', True), listify(tfms))) - if tfm_y: _check_kwargs(self.y, tfms_y, **kwargs) - self.tfms,self.tfmargs = tfms,kwargs - self.tfm_y,self.tfms_y,self.tfmargs_y = tfm_y,tfms_y,kwargs - return self - - def transform_y(self, tfms:TfmList=None, **kwargs): - "Set `tfms` to be applied to the targets only." - tfms_y = list(filter(lambda t: getattr(t, 'use_on_y', True), listify(self.tfms if tfms is None else tfms))) - tfmargs_y = {**self.tfmargs, **kwargs} if tfms is None else kwargs - _check_kwargs(self.y, tfms_y, **tfmargs_y) - self.tfm_y,self.tfms_y,self.tfmargs_y=True,tfms_y,tfmargs_y - return self - - def databunch(self, **kwargs): - "To throw a clear error message when the data wasn't split." - raise Exception("Your data isn't split, if you don't want a validation set, please use `split_none`") - -@classmethod -def _databunch_load_empty(cls, path, fname:str='export.pkl'): - "Load an empty `DataBunch` from the exported file in `path/fname` with optional `tfms`." - sd = LabelLists.load_empty(path, fn=fname) - return sd.databunch() - -DataBunch.load_empty = _databunch_load_empty - -class MixedProcessor(PreProcessor): - def __init__(self, procs:Collection[Union[PreProcessor, Collection[PreProcessor]]]): - self.procs = procs - - def process_one(self, item:Any): - res = [] - for procs, i in zip(self.procs, item): - for p in procs: i = p.process_one(i) - res.append(i) - return res - - def process(self, ds:Collection): - for procs, il in zip(self.procs, ds.item_lists): - for p in procs: p.process(il) - -class MixedItem(ItemBase): - def __init__(self, items): - self.obj = items - self.data = [item.data for item in items] - - def __repr__(self): return '\n'.join([f'{self.__class__.__name__}'] + [repr(item) for item in self.obj]) - - def apply_tfms(self, tfms:Collection, **kwargs): - self.obj = [item.apply_tfms(t, **kwargs) for item,t in zip(self.obj, tfms)] - self.data = [item.data for item in self.obj] - return self - -class MixedItemList(ItemList): - - def __init__(self, item_lists, path:PathOrStr=None, label_cls:Callable=None, inner_df:Any=None, - x:'ItemList'=None, ignore_empty:bool=False, processor=None): - self.item_lists = item_lists - if processor is None: - default_procs = [[p(ds=il) for p in listify(il._processor)] for il in item_lists] - processor = MixedProcessor([ifnone(il.processor, dp) for il,dp in zip(item_lists, default_procs)]) - items = range_of(item_lists[0]) if len(item_lists) >= 1 else [] - if path is None and len(item_lists) >= 1: path = item_lists[0].path - super().__init__(items, processor=processor, path=path, - label_cls=label_cls, inner_df=inner_df, x=x, ignore_empty=ignore_empty) - - def new(self, item_lists, processor:PreProcessor=None, **kwargs)->'ItemList': - "Create a new `ItemList` from `items`, keeping the same attributes." - processor = ifnone(processor, self.processor) - copy_d = {o:getattr(self,o) for o in self.copy_new} - kwargs = {**copy_d, **kwargs} - return self.__class__(item_lists, processor=processor, **kwargs) - - def get(self, i): - return MixedItem([il.get(i) for il in self.item_lists]) - - def __getitem__(self,idxs:int)->Any: - idxs = try_int(idxs) - if isinstance(idxs, Integral): return self.get(idxs) - else: - item_lists = [il.new(il.items[idxs], inner_df=index_row(il.inner_df, idxs)) for il in self.item_lists] - return self.new(item_lists, inner_df=index_row(self.inner_df, idxs)) diff --git a/spaces/ali-ghamdan/deoldify/fastai/utils/mem.py b/spaces/ali-ghamdan/deoldify/fastai/utils/mem.py deleted file mode 100644 index 1514def40cfd4176a0f3dd3187b1aff57ec94106..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/utils/mem.py +++ /dev/null @@ -1,218 +0,0 @@ -"Utility functions for memory management" - -from ..imports.torch import * -from ..core import * -from ..script import * -import functools, threading, time -from .pynvml_gate import * -from collections import namedtuple - -#is_osx = platform.system() == "Darwin" -use_gpu = torch.cuda.is_available() - -GPUMemory = namedtuple('GPUMemory', ['total', 'free', 'used']) - -if use_gpu: - pynvml = load_pynvml_env() - -def preload_pytorch(): - torch.ones((1, 1)).cuda() - -def b2mb(num): - """ convert Bs to MBs and round down """ - return int(num/2**20) - -def gpu_mem_get(id=None): - "get total, used and free memory (in MBs) for gpu `id`. if `id` is not passed, currently selected torch device is used" - if not use_gpu: return GPUMemory(0, 0, 0) - if id is None: id = torch.cuda.current_device() - try: - handle = pynvml.nvmlDeviceGetHandleByIndex(id) - info = pynvml.nvmlDeviceGetMemoryInfo(handle) - return GPUMemory(*(map(b2mb, [info.total, info.free, info.used]))) - except: - return GPUMemory(0, 0, 0) - -def gpu_mem_get_all(): - "get total, used and free memory (in MBs) for each available gpu" - if not use_gpu: return [] - return list(map(gpu_mem_get, range(pynvml.nvmlDeviceGetCount()))) - -def gpu_mem_get_free(): - "get free memory (in MBs) for the currently selected gpu id, w/o emptying the cache" - return gpu_mem_get().free - -def gpu_mem_get_free_no_cache(): - "get free memory (in MBs) for the currently selected gpu id, after emptying the cache" - torch.cuda.empty_cache() - return gpu_mem_get().free - -def gpu_mem_get_used(): - "get used memory (in MBs) for the currently selected gpu id, w/o emptying the cache" - return gpu_mem_get().used - -def gpu_mem_get_used_fast(gpu_handle): - "get used memory (in MBs) for the currently selected gpu id, w/o emptying the cache, and needing the `gpu_handle` arg" - info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle) - return b2mb(info.used) - -def gpu_mem_get_used_no_cache(): - "get used memory (in MBs) for the currently selected gpu id, after emptying the cache" - torch.cuda.empty_cache() - return gpu_mem_get().used - -def gpu_with_max_free_mem(): - "get [gpu_id, its_free_ram] for the first gpu with highest available RAM" - mem_all = gpu_mem_get_all() - if not len(mem_all): return None, 0 - free_all = np.array([x.free for x in mem_all]) - id = np.argmax(free_all) - return id, free_all[id] - -class GPUMemTrace(): - "Trace allocated and peaked GPU memory usage (deltas)." - def __init__(self, silent=False, ctx=None, on_exit_report=True): - assert torch.cuda.is_available(), "pytorch CUDA is required" - self.silent = silent # shortcut to turn off all reports from constructor - self.ctx = ctx # default context note in report - self.on_exit_report = on_exit_report # auto-report on ctx manager exit (default: True) - self.start() - - def reset(self): - self.used_start = gpu_mem_get_used_no_cache() - self.used_peak = self.used_start - - def data_set(self): - # delta_used is the difference between current used mem and used mem at the start - self.delta_used = gpu_mem_get_used_no_cache() - self.used_start - - # delta_peaked is the overhead if any. It is calculated as follows: - # - # 1. The difference between the peak memory and the used memory at the - # start is measured: - # 2a. If it's negative, then delta_peaked is 0 - # 2b. Otherwise, if used_delta is positive it gets subtracted from delta_peaked - # XXX: 2a shouldn't be needed once we have a reliable peak counter - self.delta_peaked = self.used_peak - self.used_start - if self.delta_peaked < 0: self.delta_peaked = 0 - elif self.delta_used > 0: self.delta_peaked -= self.delta_used - - def data(self): - if self.is_running: self.data_set() - return self.delta_used, self.delta_peaked - - def start(self): - self.is_running = True - self.reset() - self.peak_monitor_start() - - def stop(self): - self.peak_monitor_stop() - self.data_set() - self.is_running = False - - def __enter__(self): - self.start() - return self - - def __exit__(self, *exc): - self.stop() - if self.on_exit_report: self.report('exit') - - def __del__(self): - self.stop() - - def __repr__(self): - delta_used, delta_peaked = self.data() - return f"△Used Peaked MB: {delta_used:6,.0f} {delta_peaked:6,.0f}" - - def _get_ctx(self, subctx=None): - "Return ' (ctx: subctx)' or ' (ctx)' or ' (subctx)' or '' depending on this and constructor arguments" - l = [] - if self.ctx is not None: l.append(self.ctx) - if subctx is not None: l.append(subctx) - return '' if len(l) == 0 else f" ({': '.join(l)})" - - def silent(self, silent=True): - self.silent = silent - - def report(self, subctx=None): - "Print delta used+peaked, and an optional context note, which can also be preset in constructor" - if self.silent: return - print(f"{ self.__repr__() }{ self._get_ctx(subctx) }") - - def report_n_reset(self, subctx=None): - "Print delta used+peaked, and an optional context note. Then reset counters" - self.report(subctx) - self.reset() - - def peak_monitor_start(self): - self.peak_monitoring = True - - # continually sample GPU RAM usage - peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) - peak_monitor_thread.daemon = True - peak_monitor_thread.start() - - def peak_monitor_stop(self): - self.peak_monitoring = False - - # XXX: this is an unreliable function, since there is no thread priority - # control and it may not run enough or not run at all - def peak_monitor_func(self): - gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(torch.cuda.current_device()) - while True: - self.used_peak = max(gpu_mem_get_used_fast(gpu_handle), self.used_peak) - if not self.peak_monitoring: break - time.sleep(0.001) # 1msec - -def gpu_mem_trace(func): - "A decorator that runs `GPUMemTrace` w/ report on func" - @functools.wraps(func) - def wrapper(*args, **kwargs): - with GPUMemTrace(ctx=func.__qualname__, on_exit_report=True): - return func(*args, **kwargs) - return wrapper - -def reduce_mem_usage(df): - """ iterate through all the columns of a dataframe and modify the data type - to reduce memory usage. - """ - start_mem = df.memory_usage().sum() / 1024**2 - print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) - - #Removed from debugging - columns = df.columns - #.drop('index') - - for col in columns: - col_type = df[col].dtype - if str(col_type) != 'category' and col_type != 'datetime64[ns]' and col_type != bool: - if col_type != object: - c_min = df[col].min() - c_max = df[col].max() - if str(col_type)[:3] == 'int': - if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: - df[col] = df[col].astype(np.int8) - elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: - df[col] = df[col].astype(np.int16) - elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: - df[col] = df[col].astype(np.int32) - elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: - df[col] = df[col].astype(np.int64) - else: - #if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: - #df[col] = df[col].astype(np.float16) - #Sometimes causes and error and had to remove - if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: - df[col] = df[col].astype(np.float32) - else: - print('Error '+col+' Value would be a float64. Disregarding.') - else: - df[col] = df[col].astype('category') - - end_mem = df.memory_usage().sum() / 1024**2 - print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) - print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) - - return df diff --git a/spaces/ali-ghamdan/deoldify/fastai/vision/__init__.py b/spaces/ali-ghamdan/deoldify/fastai/vision/__init__.py deleted file mode 100644 index a6c8edb61c0e86e687b299e6965bff726183a648..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/vision/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .. import basics -from ..basics import * -from .learner import * -from .image import * -from .data import * -from .transform import * -from .tta import * -from . import models - -from .. import vision - -__all__ = [*basics.__all__, *learner.__all__, *data.__all__, *image.__all__, *transform.__all__, *tta.__all__, 'models', 'vision'] - diff --git a/spaces/allknowingroger/Image-Models-Test204/app.py b/spaces/allknowingroger/Image-Models-Test204/app.py deleted file mode 100644 index e4b2baa69e7d7268538294eb68f42932babee6cd..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test204/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Adalwolf10/profile_ST", - "ostris/super-cereal-sdxl-lora", - "shakti08/my-pet-dog", - "shikari2917/mypic5", - "jwhedbee/lora-trained-xl-telnyx-banner-ad-poc", - "digiplay/BeenReal_diffusers", - "Daniil-plotnikov/deepvision-v2-1", - "livingbox/model-test-10-oct-with-ext-cap", - "Yntec/3DKXv11", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/amulyaprasanth/car_price_prediction/app.py b/spaces/amulyaprasanth/car_price_prediction/app.py deleted file mode 100644 index 32d6a4bcbb87ee5bf787a5e8bfa3e8a5039f6037..0000000000000000000000000000000000000000 --- a/spaces/amulyaprasanth/car_price_prediction/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import gradio as gr -import numpy as np -from sklearn.preprocessing import LabelEncoder -import tensorflow as tf -from helper_functions import build_model, convert - -# Loading in the Label Encoders -car_name_encoder = LabelEncoder() -car_name_encoder.classes_ = np.load('car_name.npy', allow_pickle=True) -car_names_list = car_name_encoder.classes_.tolist() - -fuel_type_encoder = LabelEncoder() -fuel_type_encoder.classes_ = np.load('fuel_type.npy', allow_pickle=True) -fuel_type_list = fuel_type_encoder.classes_.tolist() - -seller_type_encoder = LabelEncoder() -seller_type_encoder.classes_ = np.load('seller_type.npy', allow_pickle=True) -seller_type_list = seller_type_encoder.classes_.tolist() - -transmission_type_encoder = LabelEncoder() -transmission_type_encoder.classes_ = np.load( - 'transmission_type.npy', allow_pickle=True) -transmission_type_list = transmission_type_encoder.classes_.tolist() - -# Function to predict -# # Index(['car_name', 'vehicle_age', 'km_driven', 'seller_type', 'fuel_type', -# 'transmission_type', 'mileage', 'engine', 'max_power', 'seats', -# 'avg_selling_price'], -# dtype='object') - - -def predict(car_name, vehicle_age, km_driven, seller_type, fuel_type, transmission_type, mileage, engine, max_power, seats): - - model = build_model() - model.load_weights("./checkpoints/model_checkpoint") - - car_name = car_name_encoder.transform([car_name]) - seller_type = seller_type_encoder.transform([seller_type]) - fuel_type = fuel_type_encoder.transform([fuel_type]) - transmission_type = transmission_type_encoder.transform( - [transmission_type]) - features = np.array([car_name, vehicle_age, km_driven, seller_type, fuel_type, - transmission_type, mileage, engine, max_power, seats]).astype('float32') - prediction = model.predict(features.reshape(1, -1)) - return tf.round(prediction) - - -# Creating input list -car_name_dropdown = gr.inputs.Dropdown(car_names_list, label="Car Name") -vehicle_age_input = gr.Number(label="Vehicle Age") -km_driven_input = gr.Number(label="Kilometers Driven") -seller_type_dropdown = gr.inputs.Dropdown( - seller_type_list, label="Seller Type") -fuel_type_dropdown = gr.inputs.Dropdown(fuel_type_list, label="Fuel Type") -transmission_type_dropdown = gr.inputs.Dropdown( - transmission_type_list, label="Transmission Type") -mileage_input = gr.Number(label="Mileage") -engine_input = gr.Number(label="Engine (in cc) (1 L or litre = 1000cc") -max_power_input = gr.Number(label="Max Power (in bhp)") -seats_input = gr.Number(label="Number of Seats") - -input_list = [car_name_dropdown, vehicle_age_input, km_driven_input, seller_type_dropdown, - fuel_type_dropdown, transmission_type_dropdown, mileage_input, engine_input, max_power_input, seats_input] - -# Creating output list -output_list = gr.Number( - label="Selling price of used car in rupees (eg. 1.1 lakh = 110000)") - -title = "Car Price Prediction Application🚘🚙🏎️📊📈" -description = """ - -This application predicts the selling price of a used car based on the input parameters. The model is trained on data from Cardekho.com. The model is a Random Forest Regressor with 1000 estimators. The model is trained on 80% of the data and tested on 20% of the data. The model has an R2 score of 0.95. - -""" - -# Build the Gradio app -gr.Interface(fn=predict, - inputs=input_list, - outputs=output_list, - title=title, - description=description, - allow_flagging="never").launch() diff --git a/spaces/artba/SchoolStats1/app.py b/spaces/artba/SchoolStats1/app.py deleted file mode 100644 index 3035214828cc98742b8b86e4052597e9268d190c..0000000000000000000000000000000000000000 --- a/spaces/artba/SchoolStats1/app.py +++ /dev/null @@ -1,176 +0,0 @@ -import gradio as gr -import numpy as np -from mdutils import MdUtils -import matplotlib.pyplot as plt -import matplotlib.ticker as ticker -import base64 -import io -import json - -WIDTH = 10 -HEIGHT = 2 - - -def generate_charts(unique, counts, d, format="png", grayscale: bool = True) -> dict: - plot = io.BytesIO() - hist = io.BytesIO() - pie = io.BytesIO() - - # Generate Plot - fig = plt.figure(figsize=(WIDTH, HEIGHT)) - ax = fig.add_subplot() - ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) - if grayscale: - ax.plot(unique.astype("str"), counts, marker="o", color="black") - else: - ax.plot(unique.astype("str"), counts, marker="o", color="blue") - plt.savefig(plot, bbox_inches="tight", orientation="landscape", format=format) - - # Generate Histogram - fig, ax = plt.subplots(figsize=(WIDTH, HEIGHT)) - if grayscale: - ax.bar(x=[str(i) for i in unique], height=counts, width=0.5, color=["black"]) - else: - ax.bar(x=[str(i) for i in unique], height=counts, width=0.5, color=["blue"]) - plt.savefig(hist, bbox_inches="tight", format=format) - - # Generate Pie Chart - fig, ax = plt.subplots(figsize=(WIDTH, HEIGHT)) - if grayscale: - ax.pie( - list(d.values()), - labels=list(d.keys()), - colors=["black", "grey"], - wedgeprops={ - "edgecolor": "white", - "linewidth": 0.7, - }, - ) - else: - ax.pie( - list(d.values()), - labels=list(d.keys()), - wedgeprops={ - "edgecolor": "white", - "linewidth": 0.7, - }, - ) - plt.savefig(pie, format=format) - - plot.seek(0) - hist.seek(0) - pie.seek(0) - - plot_content = base64.b64encode(plot.read()).decode() - hist_content = base64.b64encode(hist.read()).decode() - pie_content = base64.b64encode(pie.read()).decode() - - return {"plot": plot_content, "hist": hist_content, "pie": pie_content} - - -def add_image_b64(mdfile: MdUtils, image_content: str, format: str = "png"): - mdfile.new_paragraph(f"") - mdfile.new_line() - - -def make_all(numbers, grayscale: bool): - arr = np.array(numbers) - - unique, counts = np.unique(arr, return_counts=True) - d = dict(zip(unique, counts)) # Counts of number of occurences - mode = ", ".join( - [ - str(unique[i]) - for i in np.argwhere(counts == np.max(counts)).flatten().tolist() - ] - ) - mean = np.mean(arr) - rng = np.max(arr) - np.min(arr) - vrnc = np.var(arr) - - mdFile = MdUtils( - file_name="Практическая работа по статистике", - title="Практическая работа по статистике", - ) - - mdFile.new_paragraph(",".join([str(x) for x in arr])) - mdFile.new_paragraph(",".join([str(x) for x in sorted(arr)])) - mdFile.new_paragraph(f"Размах: {rng}") - mdFile.new_paragraph(f"Мода: {mode}") - mdFile.new_paragraph(f"А ср.: {mean:.2f}") - mdFile.new_paragraph(f"D = {vrnc:.2f}") - list_of_strings = ["Элемент"] - - for x in d: - list_of_strings.extend([f"{str(x)}"]) - list_of_strings.append("Кол-во") - for value in d.values(): - list_of_strings.extend([f"{str(value)}"]) - - mdFile.new_line() - mdFile.new_table( - columns=len(d) + 1, rows=2, text=list_of_strings, text_align="center" - ) - mdFile.new_line() - - # Insert Images - charts = generate_charts(unique, counts, d, "png", grayscale) - - mdFile.new_paragraph("
    ") - - add_image_b64(mdFile, charts["plot"], "png") - add_image_b64(mdFile, charts["hist"], "png") - add_image_b64(mdFile, charts["pie"], "png") - - mdFile.new_paragraph("
    ") - mdFile.new_paragraph("
    \n> Created by @Quielan\n
    ") - - # mdFile.create_md_file() - - mdFile2 = MdUtils(file_name="Практическая работа по статистике_2") - mdFile2.new_paragraph("## Расчет дисперсии по отклонениям и их квадратам") - mdFile2.new_line() - - mdFile2.new_paragraph("

    \n\n
    ") - - list_of_strings = ["Элемент", "Отклонение", "Квадрат отклонения"] - - for x in sorted(arr): - list_of_strings.extend([f"{x}", f"{(mean - x):.2f}", f"{(mean - x) ** 2:.2f}"]) - - mdFile2.new_line() - mdFile2.new_table( - columns=3, - rows=len(arr) + 1, - text=list_of_strings, - ) - mdFile2.new_line() - mdFile2.new_paragraph("
    ") - mdFile2.new_line() - mdFile2.new_paragraph("
    \n> Created by @Quielan\n
    ") - - mdfile_enc = base64.b64encode(mdFile.get_md_text().encode("utf-8")) - mdfile_var_enc = base64.b64encode(mdFile2.get_md_text().encode("utf-8")) - - return mdfile_enc.decode(), mdfile_var_enc.decode() - - -def getints(numbers: str, grayscale: bool = True): - ls = list(map(int, numbers.split(" "))) - return make_all(ls, grayscale) - - -with open("examples.json") as jf: - examples = json.load(jf) - -iface = gr.Interface( - fn=getints, - inputs=[gr.Textbox(show_copy_button=True, label="Numbers"), "checkbox"], - outputs=[ - gr.Textbox(show_copy_button=True, label="Chart"), - gr.Textbox(show_copy_button=True, label="Variance"), - ], - title="Tilted Calculator", - examples=examples, -) -iface.launch() diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/parallel_wavegan_config.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/parallel_wavegan_config.py deleted file mode 100644 index 7845dd6bf835ebab4cc5d8b65962b7347b7711cf..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/parallel_wavegan_config.py +++ /dev/null @@ -1,133 +0,0 @@ -from dataclasses import dataclass, field - -from .shared_configs import BaseGANVocoderConfig - - -@dataclass -class ParallelWaveganConfig(BaseGANVocoderConfig): - """Defines parameters for ParallelWavegan vocoder. - - Args: - model (str): - Model name used for selecting the right configuration at initialization. Defaults to `gan`. - discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to - 'parallel_wavegan_discriminator`. - discriminator_model_params (dict): The discriminator model kwargs. Defaults to - '{"num_layers": 10}` - generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is - considered as a generator too. Defaults to `parallel_wavegan_generator`. - generator_model_param (dict): - The generator model kwargs. Defaults to `{"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30}`. - batch_size (int): - Batch size used at training. Larger values use more memory. Defaults to 16. - seq_len (int): - Audio segment length used at training. Larger values use more memory. Defaults to 8192. - pad_short (int): - Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0. - use_noise_augment (bool): - enable / disable random noise added to the input waveform. The noise is added after computing the - features. Defaults to True. - use_cache (bool): - enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is - not large enough. Defaults to True. - steps_to_start_discriminator (int): - Number of steps required to start training the discriminator. Defaults to 0. - use_stft_loss (bool):` - enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True. - use_subband_stft (bool): - enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True. - use_mse_gan_loss (bool): - enable / disable using Mean Squeare Error GAN loss. Defaults to True. - use_hinge_gan_loss (bool): - enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models. - Defaults to False. - use_feat_match_loss (bool): - enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True. - use_l1_spec_loss (bool): - enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False. - stft_loss_params (dict): STFT loss parameters. Default to - `{"n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240]}` - stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total - model loss. Defaults to 0.5. - subband_stft_loss_weight (float): - Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. - mse_G_loss_weight (float): - MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5. - hinge_G_loss_weight (float): - Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. - feat_match_loss_weight (float): - Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0. - l1_spec_loss_weight (float): - L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. - lr_gen (float): - Generator model initial learning rate. Defaults to 0.0002. - lr_disc (float): - Discriminator model initial learning rate. Defaults to 0.0002. - optimizer (torch.optim.Optimizer): - Optimizer used for the training. Defaults to `AdamW`. - optimizer_params (dict): - Optimizer kwargs. Defaults to `{"betas": [0.8, 0.99], "weight_decay": 0.0}` - lr_scheduler_gen (torch.optim.Scheduler): - Learning rate scheduler for the generator. Defaults to `ExponentialLR`. - lr_scheduler_gen_params (dict): - Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`. - lr_scheduler_disc (torch.optim.Scheduler): - Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`. - lr_scheduler_dict_params (dict): - Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`. - """ - - model: str = "parallel_wavegan" - - # Model specific params - discriminator_model: str = "parallel_wavegan_discriminator" - discriminator_model_params: dict = field(default_factory=lambda: {"num_layers": 10}) - generator_model: str = "parallel_wavegan_generator" - generator_model_params: dict = field( - default_factory=lambda: {"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30} - ) - - # Training - overrides - batch_size: int = 6 - seq_len: int = 25600 - pad_short: int = 2000 - use_noise_augment: bool = False - use_cache: bool = True - steps_to_start_discriminator: int = 200000 - - # LOSS PARAMETERS - overrides - use_stft_loss: bool = True - use_subband_stft_loss: bool = False - use_mse_gan_loss: bool = True - use_hinge_gan_loss: bool = False - use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN) - use_l1_spec_loss: bool = False - - stft_loss_params: dict = field( - default_factory=lambda: { - "n_ffts": [1024, 2048, 512], - "hop_lengths": [120, 240, 50], - "win_lengths": [600, 1200, 240], - } - ) - - # loss weights - overrides - stft_loss_weight: float = 0.5 - subband_stft_loss_weight: float = 0 - mse_G_loss_weight: float = 2.5 - hinge_G_loss_weight: float = 0 - feat_match_loss_weight: float = 0 - l1_spec_loss_weight: float = 0 - - # optimizer overrides - lr_gen: float = 0.0002 # Initial learning rate. - lr_disc: float = 0.0002 # Initial learning rate. - optimizer: str = "AdamW" - optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0}) - lr_scheduler_gen: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html - lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1}) - lr_scheduler_disc: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html - lr_scheduler_disc_params: dict = field( - default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1} - ) - scheduler_after_epoch: bool = False diff --git a/spaces/artificialguybr/video-dubbing/TTS/recipes/vctk/yourtts/train_yourtts.py b/spaces/artificialguybr/video-dubbing/TTS/recipes/vctk/yourtts/train_yourtts.py deleted file mode 100644 index b9cf10fa8eb3c37451d1a95c95d17680b81642d7..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/recipes/vctk/yourtts/train_yourtts.py +++ /dev/null @@ -1,253 +0,0 @@ -import os - -import torch -from trainer import Trainer, TrainerArgs - -from TTS.bin.compute_embeddings import compute_embeddings -from TTS.bin.resample import resample_files -from TTS.config.shared_configs import BaseDatasetConfig -from TTS.tts.configs.vits_config import VitsConfig -from TTS.tts.datasets import load_tts_samples -from TTS.tts.models.vits import CharactersConfig, Vits, VitsArgs, VitsAudioConfig -from TTS.utils.downloaders import download_vctk - -torch.set_num_threads(24) - -# pylint: disable=W0105 -""" - This recipe replicates the first experiment proposed in the YourTTS paper (https://arxiv.org/abs/2112.02418). - YourTTS model is based on the VITS model however it uses external speaker embeddings extracted from a pre-trained speaker encoder and has small architecture changes. - In addition, YourTTS can be trained in multilingual data, however, this recipe replicates the single language training using the VCTK dataset. - If you are interested in multilingual training, we have commented on parameters on the VitsArgs class instance that should be enabled for multilingual training. - In addition, you will need to add the extra datasets following the VCTK as an example. -""" -CURRENT_PATH = os.path.dirname(os.path.abspath(__file__)) - -# Name of the run for the Trainer -RUN_NAME = "YourTTS-EN-VCTK" - -# Path where you want to save the models outputs (configs, checkpoints and tensorboard logs) -OUT_PATH = os.path.dirname(os.path.abspath(__file__)) # "/raid/coqui/Checkpoints/original-YourTTS/" - -# If you want to do transfer learning and speedup your training you can set here the path to the original YourTTS model -RESTORE_PATH = None # "/root/.local/share/tts/tts_models--multilingual--multi-dataset--your_tts/model_file.pth" - -# This paramter is useful to debug, it skips the training epochs and just do the evaluation and produce the test sentences -SKIP_TRAIN_EPOCH = False - -# Set here the batch size to be used in training and evaluation -BATCH_SIZE = 32 - -# Training Sampling rate and the target sampling rate for resampling the downloaded dataset (Note: If you change this you might need to redownload the dataset !!) -# Note: If you add new datasets, please make sure that the dataset sampling rate and this parameter are matching, otherwise resample your audios -SAMPLE_RATE = 16000 - -# Max audio length in seconds to be used in training (every audio bigger than it will be ignored) -MAX_AUDIO_LEN_IN_SECONDS = 10 - -### Download VCTK dataset -VCTK_DOWNLOAD_PATH = os.path.join(CURRENT_PATH, "VCTK") -# Define the number of threads used during the audio resampling -NUM_RESAMPLE_THREADS = 10 -# Check if VCTK dataset is not already downloaded, if not download it -if not os.path.exists(VCTK_DOWNLOAD_PATH): - print(">>> Downloading VCTK dataset:") - download_vctk(VCTK_DOWNLOAD_PATH) - resample_files(VCTK_DOWNLOAD_PATH, SAMPLE_RATE, file_ext="flac", n_jobs=NUM_RESAMPLE_THREADS) - -# init configs -vctk_config = BaseDatasetConfig( - formatter="vctk", - dataset_name="vctk", - meta_file_train="", - meta_file_val="", - path=VCTK_DOWNLOAD_PATH, - language="en", - ignored_speakers=[ - "p261", - "p225", - "p294", - "p347", - "p238", - "p234", - "p248", - "p335", - "p245", - "p326", - "p302", - ], # Ignore the test speakers to full replicate the paper experiment -) - -# Add here all datasets configs, in our case we just want to train with the VCTK dataset then we need to add just VCTK. Note: If you want to add new datasets, just add them here and it will automatically compute the speaker embeddings (d-vectors) for this new dataset :) -DATASETS_CONFIG_LIST = [vctk_config] - -### Extract speaker embeddings -SPEAKER_ENCODER_CHECKPOINT_PATH = ( - "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar" -) -SPEAKER_ENCODER_CONFIG_PATH = "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json" - -D_VECTOR_FILES = [] # List of speaker embeddings/d-vectors to be used during the training - -# Iterates all the dataset configs checking if the speakers embeddings are already computated, if not compute it -for dataset_conf in DATASETS_CONFIG_LIST: - # Check if the embeddings weren't already computed, if not compute it - embeddings_file = os.path.join(dataset_conf.path, "speakers.pth") - if not os.path.isfile(embeddings_file): - print(f">>> Computing the speaker embeddings for the {dataset_conf.dataset_name} dataset") - compute_embeddings( - SPEAKER_ENCODER_CHECKPOINT_PATH, - SPEAKER_ENCODER_CONFIG_PATH, - embeddings_file, - old_speakers_file=None, - config_dataset_path=None, - formatter_name=dataset_conf.formatter, - dataset_name=dataset_conf.dataset_name, - dataset_path=dataset_conf.path, - meta_file_train=dataset_conf.meta_file_train, - meta_file_val=dataset_conf.meta_file_val, - disable_cuda=False, - no_eval=False, - ) - D_VECTOR_FILES.append(embeddings_file) - - -# Audio config used in training. -audio_config = VitsAudioConfig( - sample_rate=SAMPLE_RATE, - hop_length=256, - win_length=1024, - fft_size=1024, - mel_fmin=0.0, - mel_fmax=None, - num_mels=80, -) - -# Init VITSArgs setting the arguments that are needed for the YourTTS model -model_args = VitsArgs( - d_vector_file=D_VECTOR_FILES, - use_d_vector_file=True, - d_vector_dim=512, - num_layers_text_encoder=10, - speaker_encoder_model_path=SPEAKER_ENCODER_CHECKPOINT_PATH, - speaker_encoder_config_path=SPEAKER_ENCODER_CONFIG_PATH, - resblock_type_decoder="2", # In the paper, we accidentally trained the YourTTS using ResNet blocks type 2, if you like you can use the ResNet blocks type 1 like the VITS model - # Useful parameters to enable the Speaker Consistency Loss (SCL) described in the paper - # use_speaker_encoder_as_loss=True, - # Useful parameters to enable multilingual training - # use_language_embedding=True, - # embedded_language_dim=4, -) - -# General training config, here you can change the batch size and others useful parameters -config = VitsConfig( - output_path=OUT_PATH, - model_args=model_args, - run_name=RUN_NAME, - project_name="YourTTS", - run_description=""" - - Original YourTTS trained using VCTK dataset - """, - dashboard_logger="tensorboard", - logger_uri=None, - audio=audio_config, - batch_size=BATCH_SIZE, - batch_group_size=48, - eval_batch_size=BATCH_SIZE, - num_loader_workers=8, - eval_split_max_size=256, - print_step=50, - plot_step=100, - log_model_step=1000, - save_step=5000, - save_n_checkpoints=2, - save_checkpoints=True, - target_loss="loss_1", - print_eval=False, - use_phonemes=False, - phonemizer="espeak", - phoneme_language="en", - compute_input_seq_cache=True, - add_blank=True, - text_cleaner="multilingual_cleaners", - characters=CharactersConfig( - characters_class="TTS.tts.models.vits.VitsCharacters", - pad="_", - eos="&", - bos="*", - blank=None, - characters="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ", - punctuations="!'(),-.:;? ", - phonemes="", - is_unique=True, - is_sorted=True, - ), - phoneme_cache_path=None, - precompute_num_workers=12, - start_by_longest=True, - datasets=DATASETS_CONFIG_LIST, - cudnn_benchmark=False, - max_audio_len=SAMPLE_RATE * MAX_AUDIO_LEN_IN_SECONDS, - mixed_precision=False, - test_sentences=[ - [ - "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", - "VCTK_p277", - None, - "en", - ], - [ - "Be a voice, not an echo.", - "VCTK_p239", - None, - "en", - ], - [ - "I'm sorry Dave. I'm afraid I can't do that.", - "VCTK_p258", - None, - "en", - ], - [ - "This cake is great. It's so delicious and moist.", - "VCTK_p244", - None, - "en", - ], - [ - "Prior to November 22, 1963.", - "VCTK_p305", - None, - "en", - ], - ], - # Enable the weighted sampler - use_weighted_sampler=True, - # Ensures that all speakers are seen in the training batch equally no matter how many samples each speaker has - weighted_sampler_attrs={"speaker_name": 1.0}, - weighted_sampler_multipliers={}, - # It defines the Speaker Consistency Loss (SCL) α to 9 like the paper - speaker_encoder_loss_alpha=9.0, -) - -# Load all the datasets samples and split traning and evaluation sets -train_samples, eval_samples = load_tts_samples( - config.datasets, - eval_split=True, - eval_split_max_size=config.eval_split_max_size, - eval_split_size=config.eval_split_size, -) - -# Init the model -model = Vits.init_from_config(config) - -# Init the trainer and 🚀 -trainer = Trainer( - TrainerArgs(restore_path=RESTORE_PATH, skip_train_epoch=SKIP_TRAIN_EPOCH), - config, - output_path=OUT_PATH, - model=model, - train_samples=train_samples, - eval_samples=eval_samples, -) -trainer.fit() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/airports_count.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/airports_count.py deleted file mode 100644 index be4c6b9c2905a60aa67870d97de81e50f77a89f6..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/airports_count.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Locations of US Airports -======================== -This is a layered geographic visualization that shows the positions of US -airports on a background of US states. -""" -# category: maps -import altair as alt -from vega_datasets import data - -airports = data.airports.url -states = alt.topo_feature(data.us_10m.url, feature='states') - -# US states background -background = alt.Chart(states).mark_geoshape( - fill='lightgray', - stroke='white' -).properties( - width=500, - height=300 -).project('albersUsa') - -# airport positions on background -points = alt.Chart(airports).transform_aggregate( - latitude='mean(latitude)', - longitude='mean(longitude)', - count='count()', - groupby=['state'] -).mark_circle().encode( - longitude='longitude:Q', - latitude='latitude:Q', - size=alt.Size('count:Q', title='Number of Airports'), - color=alt.value('steelblue'), - tooltip=['state:N','count:Q'] -).properties( - title='Number of airports in US' -) - -background + points diff --git a/spaces/ashutosh1919/quantum-perceptron/run_tests.sh b/spaces/ashutosh1919/quantum-perceptron/run_tests.sh deleted file mode 100644 index 3fce8a71c595805d3ce11e6c3549ec7e331df789..0000000000000000000000000000000000000000 --- a/spaces/ashutosh1919/quantum-perceptron/run_tests.sh +++ /dev/null @@ -1,8 +0,0 @@ -echo "#### RUNNING MYPY TESTS ####" -PYTHONDONTWRITEBYTECODE=1 mypy ./ - -echo "#### RUNNING PYCODESTYLE TESTS ####" -pycodestyle ./ - -echo "#### RUNNING PYTEST TESTS ####" -PYTHONDONTWRITEBYTECODE=1 python -m pytest -p no:cacheprovider \ No newline at end of file diff --git a/spaces/atimughal662/InfoFusion/src/iterators/__init__.py b/spaces/atimughal662/InfoFusion/src/iterators/__init__.py deleted file mode 100644 index d800eac15a042c02c0d8b31f086db83ade229a53..0000000000000000000000000000000000000000 --- a/spaces/atimughal662/InfoFusion/src/iterators/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .timeout_iterator import TimeoutIterator, AsyncTimeoutIterator -from .iterator_pipe import IteratorPipe, AsyncIteratorPipe - -__all__ = ["TimeoutIterator", "AsyncTimeoutIterator", "IteratorPipe", "AsyncIteratorPipe"] \ No newline at end of file diff --git a/spaces/awacke1/AGameForThat/README.md b/spaces/awacke1/AGameForThat/README.md deleted file mode 100644 index 538ef715788e418ed21cdf3ac3d09a1547f37c77..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AGameForThat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🎨 A Game For That 🎮 Gradio -emoji: 🎮✍🎨 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/ASR-openai-whisper-base/README.md b/spaces/awacke1/ASR-openai-whisper-base/README.md deleted file mode 100644 index 69751a8dc98a6786150ac263dc28d04a8a72b979..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ASR-openai-whisper-base/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ASR Openai Whisper Base -emoji: ⚡ -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/CardWriterPro/README.md b/spaces/awacke1/CardWriterPro/README.md deleted file mode 100644 index 20da454a6aad2e5e3923ade0200934a45ff953b0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/CardWriterPro/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Modelcard Creator -emoji: ⚡ -colorFrom: red -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: 1_📝_form.py -pinned: false -license: mit -duplicated_from: huggingface/Model_Cards_Writing_Tool ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Tensorflow-AI-Driven-Personalization/app.py b/spaces/awacke1/Tensorflow-AI-Driven-Personalization/app.py deleted file mode 100644 index 00034fb714d9f79dcddc4825390247b0f6d49ded..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Tensorflow-AI-Driven-Personalization/app.py +++ /dev/null @@ -1,106 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import tensorflow as tf -import json -import os - -# Dummy TensorFlow model for demonstration purposes -def create_model(): - model = tf.keras.Sequential([ - tf.keras.layers.Dense(8, activation='relu', input_shape=(4,)), - tf.keras.layers.Dense(4, activation='relu'), - tf.keras.layers.Dense(1, activation='sigmoid') - ]) - model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) - return model - -model = create_model() - -# Function to get user preferences -def get_user_preferences(): - st.sidebar.write("## User Preferences") - username = st.sidebar.text_input("Username", value="Default") - - if "username" not in st.session_state: - st.session_state.username = username - - st.session_state.age = st.sidebar.number_input("Age", min_value=0, max_value=120, value=st.session_state.get("age", 30)) - st.session_state.gender = st.sidebar.selectbox("Gender", options=["Male", "Female", "Other"], index=["Male", "Female", "Other"].index(st.session_state.get("gender", "Male"))) - st.session_state.hobbies = st.sidebar.multiselect("Hobbies", options=["Sports", "Reading", "Travel", "Cooking", "Gaming"], default=st.session_state.get("hobbies", [])) - st.session_state.occupation = st.sidebar.selectbox("Occupation", options=["Student", "Employed", "Unemployed", "Retired"], index=["Student", "Employed", "Unemployed", "Retired"].index(st.session_state.get("occupation", "Student"))) - - preferences = { - "username": username, - "age": st.session_state.age, - "gender": st.session_state.gender, - "hobbies": st.session_state.hobbies, - "occupation": st.session_state.occupation - } - return preferences - -# Function to preprocess user preferences for TensorFlow model -def preprocess_user_preferences(preferences): - # Preprocess the user data as needed for your specific model - user_data = np.array([preferences['age'], len(preferences['hobbies']), int(preferences['gender'] == "Male"), int(preferences['occupation'] == "Employed")]) - return user_data.reshape(1, -1) - -# Function to save user preferences to a text file -def save_user_preferences(preferences): - file_path = f"{preferences['username']}.txt" - with open(file_path, 'w') as outfile: - json.dump(preferences, outfile) - -# Function to load user preferences from a text file -def load_user_preferences(username): - file_path = f"{username}.txt" - if os.path.exists(file_path): - with open(file_path, 'r') as infile: - preferences = json.load(infile) - return preferences - return None - -def main(): - st.title("AI-driven Personalized Experience") - - preferences = get_user_preferences() - - # Load button - if st.sidebar.button("Load"): - loaded_preferences = load_user_preferences(preferences["username"]) - if loaded_preferences: - preferences.update(loaded_preferences) - for key, value in loaded_preferences.items(): - st.session_state[key] = value - - st.write("## User Preferences") - st.write(preferences) - - user_data = preprocess_user_preferences(preferences) - prediction = model.predict(user_data) - - st.write("## AI-driven Personalized Content") - - - st.markdown("### Recommendation Score") - st.write(f"{prediction[0][0] * 100:.2f}%") - - st.markdown("### Recommended Activities") - activities = pd.DataFrame([ - {"Activity": "Outdoor Adventure", "Score": np.random.rand()}, - {"Activity": "Book Club", "Score": np.random.rand()}, - {"Activity": "Cooking Class", "Score": np.random.rand()}, - {"Activity": "Gaming Tournament", "Score": np.random.rand()} - ]) - - # Sort activities by score in descending order and take the top 10 - activities = activities.sort_values(by="Score", ascending=False).head(10) - activities["Score"] = activities["Score"].apply(lambda x: f"{x * 100:.2f}%") - st.table(activities) - - # Save button - if st.sidebar.button("Save"): - save_user_preferences(preferences) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/src/core/Face3.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/core/Face3.d.ts deleted file mode 100644 index c6e7a03850996e293265fec88a940f5513e78d60..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/core/Face3.d.ts +++ /dev/null @@ -1,104 +0,0 @@ -import { Vector3 } from './../math/Vector3'; -import { Color } from './../math/Color'; - -export interface Event { - type: string; - target?: any; - [attachment: string]: any; -} - -/** - * Triangle face. - * - * # Example - * var normal = new THREE.Vector3( 0, 1, 0 ); - * var color = new THREE.Color( 0xffaa00 ); - * var face = new THREE.Face3( 0, 1, 2, normal, color, 0 ); - * - * @source https://github.com/mrdoob/three.js/blob/master/src/core/Face3.js - */ -export class Face3 { - /** - * @param a Vertex A index. - * @param b Vertex B index. - * @param c Vertex C index. - * @param normal Face normal or array of vertex normals. - * @param color Face color or array of vertex colors. - * @param materialIndex Material index. - */ - constructor( - a: number, - b: number, - c: number, - normal?: Vector3, - color?: Color, - materialIndex?: number - ); - constructor( - a: number, - b: number, - c: number, - normal?: Vector3, - vertexColors?: Color[], - materialIndex?: number - ); - constructor( - a: number, - b: number, - c: number, - vertexNormals?: Vector3[], - color?: Color, - materialIndex?: number - ); - constructor( - a: number, - b: number, - c: number, - vertexNormals?: Vector3[], - vertexColors?: Color[], - materialIndex?: number - ); - - /** - * Vertex A index. - */ - a: number; - - /** - * Vertex B index. - */ - b: number; - - /** - * Vertex C index. - */ - c: number; - - /** - * Face normal. - */ - normal: Vector3; - - /** - * Array of 4 vertex normals. - */ - vertexNormals: Vector3[]; - - /** - * Face color. - */ - color: Color; - - /** - * Array of 4 vertex normals. - */ - vertexColors: Color[]; - - /** - * Material index (points to {@link Geometry.materials}). - */ - materialIndex: number; - - clone(): this; - copy(source: Face3): this; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/core/InstancedBufferAttribute.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/core/InstancedBufferAttribute.d.ts deleted file mode 100644 index 05fa371db2e9fa34d162b70547db6bcfb364e3ec..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/core/InstancedBufferAttribute.d.ts +++ /dev/null @@ -1,47 +0,0 @@ -import { BufferGeometry } from './BufferGeometry'; -import { BufferAttribute } from './BufferAttribute'; - -/** - * @see examples/js/BufferGeometryUtils.js - */ -export namespace BufferGeometryUtils { - export function mergeBufferGeometries( - geometries: BufferGeometry[] - ): BufferGeometry; - export function computeTangents(geometry: BufferGeometry): null; - export function mergeBufferAttributes( - attributes: BufferAttribute[] - ): BufferAttribute; -} - -/** - * @deprecated - */ -export namespace GeometryUtils { - /** - * @deprecated Use {@link Geometry#merge geometry.merge( geometry2, matrix, materialIndexOffset )} instead. - */ - export function merge( - geometry1: any, - geometry2: any, - materialIndexOffset?: any - ): any; - /** - * @deprecated Use {@link Geometry#center geometry.center()} instead. - */ - export function center(geometry: any): any; -} - -/** - * @see src/core/InstancedBufferAttribute.js - */ -export class InstancedBufferAttribute extends BufferAttribute { - constructor( - array: ArrayLike, - itemSize: number, - normalized?: boolean, - meshPerAttribute?: number - ); - - meshPerAttribute: number; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLIndexedBufferRenderer.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLIndexedBufferRenderer.d.ts deleted file mode 100644 index 924b44504cfc53499ce568d45faafc3f509f58e3..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLIndexedBufferRenderer.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -export class WebGLIndexedBufferRenderer { - constructor(gl: WebGLRenderingContext, properties: any, info: any); - - setMode(value: any): void; - setIndex(index: any): void; - render(start: any, count: number): void; - renderInstances(geometry: any, start: any, count: number): void; -} diff --git a/spaces/binery/Donut_Receipt_v2/app.py b/spaces/binery/Donut_Receipt_v2/app.py deleted file mode 100644 index ab7c5362f1bb9342de6086700aee3893f13993ed..0000000000000000000000000000000000000000 --- a/spaces/binery/Donut_Receipt_v2/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import re -import gradio as gr - -import torch -from transformers import DonutProcessor, VisionEncoderDecoderModel - -processor = DonutProcessor.from_pretrained("debu-das/donut_receipt_v2.29") -model = VisionEncoderDecoderModel.from_pretrained("debu-das/donut_receipt_v2.29") - -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -def process_document(image): - # prepare encoder inputs - pixel_values = processor(image, return_tensors="pt").pixel_values - - # prepare decoder inputs - task_prompt = "" - decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids - - # generate answer - outputs = model.generate( - pixel_values.to(device), - decoder_input_ids=decoder_input_ids.to(device), - max_length=model.decoder.config.max_position_embeddings, - early_stopping=True, - pad_token_id=processor.tokenizer.pad_token_id, - eos_token_id=processor.tokenizer.eos_token_id, - use_cache=True, - num_beams=1, - bad_words_ids=[[processor.tokenizer.unk_token_id]], - return_dict_in_generate=True, - ) - - # postprocess - sequence = processor.batch_decode(outputs.sequences)[0] - sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") - sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token - - return processor.token2json(sequence) - -description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on CORD (document parsing). To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below." -article = "

    Donut: OCR-free Document Understanding Transformer | Github Repo

    " - -demo = gr.Interface( - fn=process_document, - inputs="image", - outputs="json", - title="Demo: Donut 🍩 for Document Parsing", - description=description, - article=article, - enable_queue=True, - examples=[["example.png"], ["example_1.png"],["example_2.png"], ["example_3.png"],["example_4.png"]], - cache_examples=False) - -demo.launch() \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Crane Girder Design To Bs5950 Pdf Comparison of Different Types of Sections.md b/spaces/bioriAsaeru/text-to-voice/Crane Girder Design To Bs5950 Pdf Comparison of Different Types of Sections.md deleted file mode 100644 index 3aa330b710027643f66592a1a5bafde93a242df9..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Crane Girder Design To Bs5950 Pdf Comparison of Different Types of Sections.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Crane Girder Design To Bs5950 Pdf


    Download Ziphttps://urloso.com/2uyRyu



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/evaluatoin/cal_lpips.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/evaluatoin/cal_lpips.py deleted file mode 100644 index a66d53b6de3ed0af6441d633990aa0d16c49b7e4..0000000000000000000000000000000000000000 --- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/evaluatoin/cal_lpips.py +++ /dev/null @@ -1,71 +0,0 @@ -import cv2 -import os -import sys -import numpy as np -import math -import glob -import pyspng -import PIL.Image - -import torch -import lpips - - -def read_image(image_path): - with open(image_path, 'rb') as f: - if pyspng is not None and image_path.endswith('.png'): - image = pyspng.load(f.read()) - else: - image = np.array(PIL.Image.open(f)) - if image.ndim == 2: - image = image[:, :, np.newaxis] # HW => HWC - if image.shape[2] == 1: - image = np.repeat(image, 3, axis=2) - image = image.transpose(2, 0, 1) # HWC => CHW - image = torch.from_numpy(image).float().unsqueeze(0) - image = image / 127.5 - 1 - - return image - - -def calculate_metrics(folder1, folder2): - l1 = sorted(glob.glob(folder1 + '/*.png') + glob.glob(folder1 + '/*.jpg')) - l2 = sorted(glob.glob(folder2 + '/*.png') + glob.glob(folder2 + '/*.jpg')) - assert(len(l1) == len(l2)) - print('length:', len(l1)) - - # l1 = l1[:3]; l2 = l2[:3]; - - device = torch.device('cuda:0') - loss_fn = lpips.LPIPS(net='alex').to(device) - loss_fn.eval() - # loss_fn = lpips.LPIPS(net='vgg').to(device) - - lpips_l = [] - with torch.no_grad(): - for i, (fpath1, fpath2) in enumerate(zip(l1, l2)): - print(i) - _, name1 = os.path.split(fpath1) - _, name2 = os.path.split(fpath2) - name1 = name1.split('.')[0] - name2 = name2.split('.')[0] - assert name1 == name2, 'Illegal mapping: %s, %s' % (name1, name2) - - img1 = read_image(fpath1).to(device) - img2 = read_image(fpath2).to(device) - assert img1.shape == img2.shape, 'Illegal shape' - lpips_l.append(loss_fn(img1, img2).mean().cpu().numpy()) - - res = sum(lpips_l) / len(lpips_l) - - return res - - -if __name__ == '__main__': - folder1 = 'path to the inpainted result' - folder2 = 'path to the gt' - - res = calculate_metrics(folder1, folder2) - print('lpips: %.4f' % res) - with open('lpips.txt', 'w') as f: - f.write('lpips: %.4f' % res) diff --git a/spaces/bobathetheft/webui/README.md b/spaces/bobathetheft/webui/README.md deleted file mode 100644 index 013d12c9f3a56698056ae1bdbbfb0ec009805237..0000000000000000000000000000000000000000 --- a/spaces/bobathetheft/webui/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Stable Diffusion Web UI -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: camenduru/webui ---- - -## Stable Diffusion Web UI -[https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - -## Documentation -[https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki) - -## Models License -https://huggingface.co/spaces/CompVis/stable-diffusion-license \ No newline at end of file diff --git a/spaces/bradarrML/stablediffusion-infinity/postprocess.py b/spaces/bradarrML/stablediffusion-infinity/postprocess.py deleted file mode 100644 index 90c7f535c568fa46b6433390459d82e7967bb1fd..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/stablediffusion-infinity/postprocess.py +++ /dev/null @@ -1,249 +0,0 @@ -""" -https://github.com/Trinkle23897/Fast-Poisson-Image-Editing -MIT License - -Copyright (c) 2022 Jiayi Weng - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -""" - -import time -import argparse -import os -import fpie -from process import ALL_BACKEND, CPU_COUNT, DEFAULT_BACKEND -from fpie.io import read_images, write_image -from process import BaseProcessor, EquProcessor, GridProcessor - -from PIL import Image -import numpy as np -import skimage -import skimage.measure -import scipy -import scipy.signal - - -class PhotometricCorrection: - def __init__(self,quite=False): - self.get_parser("cli") - args=self.parser.parse_args(["--method","grid","-g","src","-s","a","-t","a","-o","a"]) - args.mpi_sync_interval = getattr(args, "mpi_sync_interval", 0) - self.backend=args.backend - self.args=args - self.quite=quite - proc: BaseProcessor - proc = GridProcessor( - args.gradient, - args.backend, - args.cpu, - args.mpi_sync_interval, - args.block_size, - args.grid_x, - args.grid_y, - ) - print( - f"[PIE]Successfully initialize PIE {args.method} solver " - f"with {args.backend} backend" - ) - self.proc=proc - - def run(self, original_image, inpainted_image, mode="mask_mode"): - print(f"[PIE] start") - if mode=="disabled": - return inpainted_image - input_arr=np.array(original_image) - if input_arr[:,:,-1].sum()<1: - return inpainted_image - output_arr=np.array(inpainted_image) - mask=input_arr[:,:,-1] - mask=255-mask - if mask.sum()<1 and mode=="mask_mode": - mode="" - if mode=="mask_mode": - mask = skimage.measure.block_reduce(mask, (8, 8), np.max) - mask = mask.repeat(8, axis=0).repeat(8, axis=1) - else: - mask[8:-9,8:-9]=255 - mask = mask[:,:,np.newaxis].repeat(3,axis=2) - nmask=mask.copy() - output_arr2=output_arr[:,:,0:3].copy() - input_arr2=input_arr[:,:,0:3].copy() - output_arr2[nmask<128]=0 - input_arr2[nmask>=128]=0 - output_arr2+=input_arr2 - src = output_arr2[:,:,0:3] - tgt = src.copy() - proc=self.proc - args=self.args - if proc.root: - n = proc.reset(src, mask, tgt, (args.h0, args.w0), (args.h1, args.w1)) - proc.sync() - if proc.root: - result = tgt - t = time.time() - if args.p == 0: - args.p = args.n - - for i in range(0, args.n, args.p): - if proc.root: - result, err = proc.step(args.p) # type: ignore - print(f"[PIE] Iter {i + args.p}, abs_err {err}") - else: - proc.step(args.p) - - if proc.root: - dt = time.time() - t - print(f"[PIE] Time elapsed: {dt:.4f}s") - # make sure consistent with dummy process - return Image.fromarray(result) - - - def get_parser(self,gen_type: str) -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument( - "-v", "--version", action="store_true", help="show the version and exit" - ) - parser.add_argument( - "--check-backend", action="store_true", help="print all available backends" - ) - if gen_type == "gui" and "mpi" in ALL_BACKEND: - # gui doesn't support MPI backend - ALL_BACKEND.remove("mpi") - parser.add_argument( - "-b", - "--backend", - type=str, - choices=ALL_BACKEND, - default=DEFAULT_BACKEND, - help="backend choice", - ) - parser.add_argument( - "-c", - "--cpu", - type=int, - default=CPU_COUNT, - help="number of CPU used", - ) - parser.add_argument( - "-z", - "--block-size", - type=int, - default=1024, - help="cuda block size (only for equ solver)", - ) - parser.add_argument( - "--method", - type=str, - choices=["equ", "grid"], - default="equ", - help="how to parallelize computation", - ) - parser.add_argument("-s", "--source", type=str, help="source image filename") - if gen_type == "cli": - parser.add_argument( - "-m", - "--mask", - type=str, - help="mask image filename (default is to use the whole source image)", - default="", - ) - parser.add_argument("-t", "--target", type=str, help="target image filename") - parser.add_argument("-o", "--output", type=str, help="output image filename") - if gen_type == "cli": - parser.add_argument( - "-h0", type=int, help="mask position (height) on source image", default=0 - ) - parser.add_argument( - "-w0", type=int, help="mask position (width) on source image", default=0 - ) - parser.add_argument( - "-h1", type=int, help="mask position (height) on target image", default=0 - ) - parser.add_argument( - "-w1", type=int, help="mask position (width) on target image", default=0 - ) - parser.add_argument( - "-g", - "--gradient", - type=str, - choices=["max", "src", "avg"], - default="max", - help="how to calculate gradient for PIE", - ) - parser.add_argument( - "-n", - type=int, - help="how many iteration would you perfer, the more the better", - default=5000, - ) - if gen_type == "cli": - parser.add_argument( - "-p", type=int, help="output result every P iteration", default=0 - ) - if "mpi" in ALL_BACKEND: - parser.add_argument( - "--mpi-sync-interval", - type=int, - help="MPI sync iteration interval", - default=100, - ) - parser.add_argument( - "--grid-x", type=int, help="x axis stride for grid solver", default=8 - ) - parser.add_argument( - "--grid-y", type=int, help="y axis stride for grid solver", default=8 - ) - self.parser=parser - -if __name__ =="__main__": - import sys - import io - import base64 - from PIL import Image - def base64_to_pil(base64_str): - data = base64.b64decode(str(base64_str)) - pil = Image.open(io.BytesIO(data)) - return pil - - def pil_to_base64(out_pil): - out_buffer = io.BytesIO() - out_pil.save(out_buffer, format="PNG") - out_buffer.seek(0) - base64_bytes = base64.b64encode(out_buffer.read()) - base64_str = base64_bytes.decode("ascii") - return base64_str - correction_func=PhotometricCorrection(quite=True) - while True: - buffer = sys.stdin.readline() - print(f"[PIE] suprocess {len(buffer)} {type(buffer)} ") - if len(buffer)==0: - break - if isinstance(buffer,str): - lst=buffer.strip().split(",") - else: - lst=buffer.decode("ascii").strip().split(",") - img0=base64_to_pil(lst[0]) - img1=base64_to_pil(lst[1]) - ret=correction_func.run(img0,img1,mode=lst[2]) - ret_base64=pil_to_base64(ret) - if isinstance(buffer,str): - sys.stdout.write(f"{ret_base64}\n") - else: - sys.stdout.write(f"{ret_base64}\n".encode()) - sys.stdout.flush() \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/MODEL_ZOO.md b/spaces/brjathu/HMR2.0/vendor/detectron2/MODEL_ZOO.md deleted file mode 100644 index 69db2728563c680e89a0d5d3e6ba272b8d78bdbd..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/MODEL_ZOO.md +++ /dev/null @@ -1,1052 +0,0 @@ -# Detectron2 Model Zoo and Baselines - -## Introduction - -This file documents a large collection of baselines trained -with detectron2 in Sep-Oct, 2019. -All numbers were obtained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/) -servers with 8 NVIDIA V100 GPUs & NVLink. The speed numbers are periodically updated with latest PyTorch/CUDA/cuDNN versions. -You can access these models from code using [detectron2.model_zoo](https://detectron2.readthedocs.io/modules/model_zoo.html) APIs. - -In addition to these official baseline models, you can find more models in [projects/](projects/). - -#### How to Read the Tables -* The "Name" column contains a link to the config file. Models can be reproduced using `tools/train_net.py` with the corresponding yaml config file, - or `tools/lazyconfig_train_net.py` for python config files. -* Training speed is averaged across the entire training. - We keep updating the speed with latest version of detectron2/pytorch/etc., - so they might be different from the `metrics` file. - Training speed for multi-machine jobs is not provided. -* Inference speed is measured by `tools/train_net.py --eval-only`, or [inference_on_dataset()](https://detectron2.readthedocs.io/modules/evaluation.html#detectron2.evaluation.inference_on_dataset), - with batch size 1 in detectron2 directly. - Measuring it with custom code may introduce other overhead. - Actual deployment in production should in general be faster than the given inference - speed due to more optimizations. -* The *model id* column is provided for ease of reference. - To check downloaded file integrity, any model on this page contains its md5 prefix in its file name. -* Training curves and other statistics can be found in `metrics` for each model. - -#### Common Settings for COCO Models -* All COCO models were trained on `train2017` and evaluated on `val2017`. -* The default settings are __not directly comparable__ with Detectron's standard settings. - For example, our default training data augmentation uses scale jittering in addition to horizontal flipping. - - To make fair comparisons with Detectron's settings, see - [Detectron1-Comparisons](configs/Detectron1-Comparisons/) for accuracy comparison, - and [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html) - for speed comparison. -* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__: - * __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction, - respectively. It obtains the best - speed/accuracy tradeoff, but the other two are still useful for research. - * __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper. - * __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads - for mask and box prediction, respectively. - This is used by the Deformable ConvNet paper. -* Most models are trained with the 3x schedule (~37 COCO epochs). - Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs) - training schedule for comparison when doing quick research iteration. - -#### ImageNet Pretrained Models - -It's common to initialize from backbone models pre-trained on ImageNet classification tasks. The following backbone models are available: - -* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks) model. -* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks) model. -* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB. -* [R-50.pkl (torchvision)](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/torchvision/R-50.pkl): converted copy of [torchvision's ResNet-50](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.resnet50) model. - More details can be found in [the conversion script](tools/convert-torchvision-to-d2.py). - -Note that the above models have __different__ format from those provided in Detectron: we do not fuse BatchNorm into an affine layer. -Pretrained models in Detectron's format can still be used. For example: -* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl): - ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k). -* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl): - ResNet-50 with Group Normalization. -* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl): - ResNet-101 with Group Normalization. - -These models require slightly different settings regarding normalization and architecture. See the model zoo configs for reference. - -#### License - -All models available for download through this document are licensed under the -[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/). - -### COCO Object Detection Baselines - -#### Faster R-CNN: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    model iddownload
    R50-C41x0.5510.1024.835.7137257644model | metrics
    R50-DC51x0.3800.0685.037.3137847829model | metrics
    R50-FPN1x0.2100.0383.037.9137257794model | metrics
    R50-C43x0.5430.1044.838.4137849393model | metrics
    R50-DC53x0.3780.0705.039.0137849425model | metrics
    R50-FPN3x0.2090.0383.040.2137849458model | metrics
    R101-C43x0.6190.1395.941.1138204752model | metrics
    R101-DC53x0.4520.0866.140.6138204841model | metrics
    R101-FPN3x0.2860.0514.142.0137851257model | metrics
    X101-FPN3x0.6380.0986.743.0139173657model | metrics
    - -#### RetinaNet: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    model iddownload
    R501x0.2050.0414.137.4190397773model | metrics
    R503x0.2050.0414.138.7190397829model | metrics
    R1013x0.2910.0545.240.4190397697model | metrics
    - - -#### RPN & Fast R-CNN: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    prop.
    AR
    model iddownload
    RPN R50-C41x0.1300.0341.551.6137258005model | metrics
    RPN R50-FPN1x0.1860.0322.758.0137258492model | metrics
    Fast R-CNN R50-FPN1x0.1400.0292.637.8137635226model | metrics
    - -### COCO Instance Segmentation Baselines with Mask R-CNN - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    model iddownload
    R50-C41x0.5840.1105.236.832.2137259246model | metrics
    R50-DC51x0.4710.0766.538.334.2137260150model | metrics
    R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
    R50-C43x0.5750.1115.239.834.4137849525model | metrics
    R50-DC53x0.4700.0766.540.035.9137849551model | metrics
    R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
    R101-C43x0.6520.1456.342.636.7138363239model | metrics
    R101-DC53x0.5450.0927.641.937.3138363294model | metrics
    R101-FPN3x0.3400.0564.642.938.6138205316model | metrics
    X101-FPN3x0.6900.1037.244.339.5139653917model | metrics
    - - - -#### New baselines using Large-Scale Jitter and Longer Training Schedule - -The following baselines of COCO Instance Segmentation with Mask R-CNN are generated -using a longer training schedule and large-scale jitter as described in Google's -[Simple Copy-Paste Data Augmentation](https://arxiv.org/pdf/2012.07177.pdf) paper. These -models are trained from scratch using random initialization. These baselines exceed the -previous Mask R-CNN baselines. - -In the following table, one epoch consists of training on 118000 COCO images. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Nameepochstrain
    time
    (s/im)
    inference
    time
    (s/im)
    box
    AP
    mask
    AP
    model iddownload
    R50-FPN1000.3760.06944.640.342047764model | metrics
    R50-FPN2000.3760.06946.341.742047638model | metrics
    R50-FPN4000.3760.06947.442.542019571model | metrics
    R101-FPN1000.5180.07346.441.642025812model | metrics
    R101-FPN2000.5180.07348.043.142131867model | metrics
    R101-FPN4000.5180.07348.943.742073830model | metrics
    regnetx_4gf_dds_FPN1000.4740.07146.041.342047771model | metrics
    regnetx_4gf_dds_FPN2000.4740.07148.143.142132721model | metrics
    regnetx_4gf_dds_FPN4000.4740.07148.643.542025447model | metrics
    regnety_4gf_dds_FPN1000.4870.07346.141.642047784model | metrics
    regnety_4gf_dds_FPN2000.4870.07247.843.042047642model | metrics
    regnety_4gf_dds_FPN4000.4870.07248.243.342045954model | metrics
    - -### COCO Person Keypoint Detection Baselines with Keypoint R-CNN - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    kp.
    AP
    model iddownload
    R50-FPN1x0.3150.0725.053.664.0137261548model | metrics
    R50-FPN3x0.3160.0665.055.465.5137849621model | metrics
    R101-FPN3x0.3900.0766.156.466.1138363331model | metrics
    X101-FPN3x0.7380.1218.757.366.0139686956model | metrics
    - -### COCO Panoptic Segmentation Baselines with Panoptic FPN - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    PQmodel iddownload
    R50-FPN1x0.3040.0534.837.634.739.4139514544model | metrics
    R50-FPN3x0.3020.0534.840.036.541.5139514569model | metrics
    R101-FPN3x0.3920.0666.042.438.543.0139514519model | metrics
    - - -### LVIS Instance Segmentation Baselines with Mask R-CNN - -Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5. -These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195). - -NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO 1x baselines. -They are roughly 24 epochs of LVISv0.5 data. -The final results of these configs have large variance across different runs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    model iddownload
    R50-FPN1x0.2920.1077.123.624.4144219072model | metrics
    R101-FPN1x0.3710.1147.825.625.9144219035model | metrics
    X101-FPN1x0.7120.15110.226.727.1144219108model | metrics
    - - - -### Cityscapes & Pascal VOC Baselines - -Simple baselines for -* Mask R-CNN on Cityscapes instance segmentation (initialized from COCO pre-training, then trained on Cityscapes fine annotations only) -* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Nametrain
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    box
    AP50
    mask
    AP
    model iddownload
    R50-FPN, Cityscapes0.2400.0784.436.5142423278model | metrics
    R50-C4, VOC0.5370.0814.851.980.3142202221model | metrics
    - - - -### Other Settings - -Ablations for Deformable Conv and Cascade R-CNN: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    model iddownload
    Baseline R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
    Deformable Conv1x0.3420.0483.541.537.5138602867model | metrics
    Cascade R-CNN1x0.3170.0524.042.136.4138602847model | metrics
    Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
    Deformable Conv3x0.3490.0473.542.738.5144998336model | metrics
    Cascade R-CNN3x0.3280.0534.044.338.5144998488model | metrics
    - - -Ablations for normalization methods, and a few models trained from scratch following [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883). -(Note: The baseline uses `2fc` head while the others use [`4conv1fc` head](https://arxiv.org/abs/1803.08494)) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    model iddownload
    Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
    GN3x0.3090.0605.642.638.6138602888model | metrics
    SyncBN3x0.3450.0535.541.937.8169527823model | metrics
    GN (from scratch)3x0.3380.0617.239.936.6138602908model | metrics
    GN (from scratch)9xN/A0.0617.243.739.6183808979model | metrics
    SyncBN (from scratch)9xN/A0.0557.243.639.3184226666model | metrics
    - - -A few very large models trained for a long time, for demo purposes. They are trained using multiple machines: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Nameinference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    PQmodel iddownload
    Panoptic FPN R1010.09811.447.441.346.1139797668model | metrics
    Mask R-CNN X1520.23415.150.244.018131413model | metrics
    above + test-time aug.51.945.9
    diff --git a/spaces/camenduru-com/webui/Dockerfile b/spaces/camenduru-com/webui/Dockerfile deleted file mode 100644 index 31e953f0e45c5da1b35a12c02b26f12555f4dc43..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/webui/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# Dockerfile Public A10G - -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/devel/cudnn8/Dockerfile -# FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/base/Dockerfile -FROM nvidia/cuda:11.7.1-base-ubuntu22.04 -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update -y && apt-get upgrade -y && apt-get install -y libgl1 libglib2.0-0 wget git git-lfs python3-pip python-is-python3 && rm -rf /var/lib/apt/lists/* - -RUN adduser --disabled-password --gecos '' user -RUN mkdir /content && chown -R user:user /content -WORKDIR /content -USER user - -RUN pip3 install --upgrade pip -RUN pip install https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.16/xformers-0.0.16+814314d.d20230119.A10G-cp310-cp310-linux_x86_64.whl -RUN pip install --pre triton -RUN pip install numexpr torchmetrics==0.11.4 - -RUN git clone -b v1.6 https://github.com/camenduru/stable-diffusion-webui -RUN sed -i '$a fastapi==0.90.0' /content/stable-diffusion-webui/requirements_versions.txt -RUN sed -i -e '''/prepare_environment()/a\ os.system\(f\"""sed -i -e ''\"s/dict()))/dict())).cuda()/g\"'' /content/stable-diffusion-webui/repositories/stable-diffusion-stability-ai/ldm/util.py""")''' /content/stable-diffusion-webui/launch.py -RUN sed -i -e 's/ start()/ #start()/g' /content/stable-diffusion-webui/launch.py -RUN cd stable-diffusion-webui && python launch.py --skip-torch-cuda-test - -ADD --chown=user https://github.com/camenduru/webui/raw/main/env_patch.py /content/env_patch.py -RUN sed -i -e '/import image_from_url_text/r /content/env_patch.py' /content/stable-diffusion-webui/modules/ui.py -ADD --chown=user https://github.com/camenduru/webui/raw/main/header_patch.py /content/header_patch.py -RUN sed -i -e '/demo:/r /content/header_patch.py' /content/stable-diffusion-webui/modules/ui.py - -RUN sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /content/stable-diffusion-webui/script.js -RUN sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e 's/default_enabled=False/default_enabled=True/g' /content/stable-diffusion-webui/webui.py -RUN sed -i -e 's/ outputs=\[/queue=False, &/g' /content/stable-diffusion-webui/modules/ui.py -RUN sed -i -e 's/ queue=False, / /g' /content/stable-diffusion-webui/modules/ui.py - -RUN rm -rfv /content/stable-diffusion-webui/scripts/ - -ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/shared-config.json /content/shared-config.json -ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/shared-ui-config.json /content/shared-ui-config.json - -ADD --chown=user https://huggingface.co/ckpt/anything-v3-vae-swapped/resolve/main/anything-v3-vae-swapped.ckpt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v3-vae-swapped.ckpt - -EXPOSE 7860 - -CMD cd /content/stable-diffusion-webui && python webui.py --xformers --listen --disable-console-progressbars --enable-console-prompts --no-progressbar-hiding --ui-config-file /content/shared-ui-config.json --ui-settings-file /content/shared-config.json diff --git a/spaces/cancanasoyak/CropBased-TissueMasking/Deployment/webapp.py b/spaces/cancanasoyak/CropBased-TissueMasking/Deployment/webapp.py deleted file mode 100644 index 6e2505cd181d2ba2bfc53e8bd0b3dfb8e43949ab..0000000000000000000000000000000000000000 --- a/spaces/cancanasoyak/CropBased-TissueMasking/Deployment/webapp.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -import pandas as pd -import os -import streamlit as st -import DepCNN -import DepDataloader -import DepCropping -from PIL import Image -import torch -import torch.nn as nn -from joblib import Parallel, delayed -from torch.utils.data import DataLoader -from model.LeNet5_different_inputSizes import LeNet5_64 -from DepCNN import test_model -from torch import no_grad - - - -@st.cache(suppress_st_warning=True) -def get_fvalue(val): - feature_dict = {"No":0,"Yes":1} - for key,value in feature_dict.items(): - if val == key: - return value - -def get_value(val,my_dict): - for key,value in my_dict.items(): - if val == key: - return value - -st.title('Predict Mask') - -with st.form(key='start_masking'): - model_str = st.selectbox("Select the Model",["LeNet5_64"], index=None, placeholder="Select Model Here...") - uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"]) - - submit_button = st.form_submit_button(label='Create Mask') -with st.expander("Advanced Settings", expanded=False): - with st.form(key="advanced_settings"): - st.write("Advanced Settings") - custom_threshold_str = st.selectbox("Custom Threshold",["No","Yes"], index=0) - threshold = st.number_input("Threshold",min_value=0.0,max_value=1.0,value=0.75,step=0.01) - advanced_submit_button = st.form_submit_button(label='Save Settings') - -if model_str != None and uploaded_image != None and submit_button == True: - - pb = st.progress(0,text="Cropping and Loading Model...") - - img = Image.open(uploaded_image) - img_arr = np.array(img) - device = "cpu" - - if custom_threshold_str == "Yes": - custom_threshold = True - else: - custom_threshold = False - - - if model_str == "LeNet5_64": - model = LeNet5_64() - model_path = r"Deployment/model/LeNet5_just_Student.pth" - cropsize = 64 - stride = 64 - - dataloader = DepCNN.LoadModelnData(img_arr, cropsize, stride, device, model, model_path) - - pb.progress(50,text="Predicting...") - - pred_df = DepCNN.TestToDataframe(model, device, dataloader) - - pb.progress(75,text="Saving Mask...") - - mask_path = DepCNN.SaveMask(pred_df, img.height, img.width, cropsize, custom_threshold, threshold) - - pb.progress(100,text="Done!") - - st.image(mask_path) - st.download_button(label="Download Mask",data=mask_path,file_name="predicted_mask.png",mime="image/png") - st.download_button(label="Download Predictions",data=pred_df.to_csv(index=False),file_name="predicted_mask.csv",mime="text/csv") \ No newline at end of file diff --git a/spaces/candlend/vits-hoshimi/sovits/mel_processing.py b/spaces/candlend/vits-hoshimi/sovits/mel_processing.py deleted file mode 100644 index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/sovits/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/segm.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/segm.py deleted file mode 100644 index 1962b886e1946fa4896776da8a007ae0a9a4fab3..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/segm.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -from typing import Any, List -import torch -from torch.nn import functional as F - -from detectron2.config import CfgNode -from detectron2.structures import Instances - -from .utils import resample_data - - -class SegmentationLoss: - """ - Segmentation loss as cross-entropy for raw unnormalized scores given ground truth - labels. Segmentation ground truth labels are defined for the bounding box of - interest at some fixed resolution [S, S], where - S = MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE. - """ - - def __init__(self, cfg: CfgNode): - """ - Initialize segmentation loss from configuration options - - Args: - cfg (CfgNode): configuration options - """ - self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE - self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS - - def __call__( - self, - proposals_with_gt: List[Instances], - densepose_predictor_outputs: Any, - packed_annotations: Any, - ) -> torch.Tensor: - """ - Compute segmentation loss as cross-entropy on aligned segmentation - ground truth and estimated scores. - - Args: - proposals_with_gt (list of Instances): detections with associated ground truth data - densepose_predictor_outputs: an object of a dataclass that contains predictor outputs - with estimated values; assumed to have the following attributes: - * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] - packed_annotations: packed annotations for efficient loss computation; - the following attributes are used: - - coarse_segm_gt - - bbox_xywh_gt - - bbox_xywh_est - """ - if packed_annotations.coarse_segm_gt is None: - return self.fake_value(densepose_predictor_outputs) - coarse_segm_est = densepose_predictor_outputs.coarse_segm[packed_annotations.bbox_indices] - with torch.no_grad(): - coarse_segm_gt = resample_data( - packed_annotations.coarse_segm_gt.unsqueeze(1), - packed_annotations.bbox_xywh_gt, - packed_annotations.bbox_xywh_est, - self.heatmap_size, - self.heatmap_size, - mode="nearest", - padding_mode="zeros", - ).squeeze(1) - if self.n_segm_chan == 2: - coarse_segm_gt = coarse_segm_gt > 0 - return F.cross_entropy(coarse_segm_est, coarse_segm_gt.long()) - - def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor: - """ - Fake segmentation loss used when no suitable ground truth data - was found in a batch. The loss has a value 0 and is primarily used to - construct the computation graph, so that `DistributedDataParallel` - has similar graphs on all GPUs and can perform reduction properly. - - Args: - densepose_predictor_outputs: DensePose predictor outputs, an object - of a dataclass that is assumed to have `coarse_segm` - attribute - Return: - Zero value loss with proper computation graph - """ - return densepose_predictor_outputs.coarse_segm.sum() * 0 diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/tests/tracking_overtime_test.py b/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/tests/tracking_overtime_test.py deleted file mode 100644 index a61b155608a492a125a13695222f3e73cc537e17..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/tests/tracking_overtime_test.py +++ /dev/null @@ -1,85 +0,0 @@ -import pytest -import addict -from m2.tht import tracking_overtime - - -@pytest.fixture -def basic_hts_mock(mocker): - '''A mock handcoded_rep_utilities result with two HypothesisTracker''' - m = mocker - - onset_times = [0, 100, 200, 300] - - h1 = m.MagicMock() - h1.confs = list(zip(range(1, 4), [1, 1, 4])) - h1.corr = list(zip(range(1, 4), [m.MagicMock() for _ in range(3)])) - h1.onset_times = onset_times - h1.__repr__ = m.Mock(return_value='h1') - - h2 = m.MagicMock() - h2.confs = list(zip(range(2, 4), [2, 3])) - h2.corr = list(zip(range(2, 4), [m.MagicMock() for _ in range(1, 3)])) - h2.onset_times = onset_times - h2.__repr__ = m.Mock(return_value='h2') - - hts = {'h1': h1, 'h2': h2} - return addict.Dict({ - 'hts': hts, - 'h1': h1, - 'h2': h2, - 'onset_times': onset_times - }) - - -def matchesHypothesisAtTime(hts=None, - onset_idx=None, corr=None, - ht_value=None, conf=None): - def match(hat): - if hts: - hat.hts == hts - if onset_idx: - assert hat.onset_idx == onset_idx - if corr: - assert hat.corr == corr - if ht_value: - assert ht_value == ht_value - if conf: - assert conf == conf - return True - return match - - -def equalsToMatchers(hats, matchers): - assert len(hats) == len(matchers) - assert all([m(h) for m, h in zip(matchers, hats)]) - - -def test_overtime_tracking_init(basic_hts_mock): - b = basic_hts_mock - hts_at_time = tracking_overtime.OvertimeTracking(b.hts) - assert hts_at_time.onset_times == b.onset_times - assert sorted(hts_at_time.time.keys()) == b.onset_times[1:] - hts_at_sorted_time = list(hts_at_time.hypothesis_by_time()) - print(hts_at_sorted_time) - equalsToMatchers(hts_at_sorted_time[0][1], - [matchesHypothesisAtTime(hts=b.h1, onset_idx=1, conf=1)]) - equalsToMatchers(hts_at_sorted_time[1][1], - [matchesHypothesisAtTime(hts=b.h1, onset_idx=2, conf=1), - matchesHypothesisAtTime(hts=b.h2, onset_idx=2, conf=2)]) - equalsToMatchers(hts_at_sorted_time[2][1], - [matchesHypothesisAtTime(hts=b.h1, onset_idx=3, conf=4), - matchesHypothesisAtTime(hts=b.h2, onset_idx=3, conf=3)]) - - -def test_conf_sorted_hats(basic_hts_mock): - b = basic_hts_mock - hts_at_time = tracking_overtime.OvertimeTracking(b.hts) - hts_at_sorted_time = list(hts_at_time.hypothesis_sorted_by_conf()) - equalsToMatchers(hts_at_sorted_time[0][1], - [matchesHypothesisAtTime(hts=b.h1, onset_idx=1, conf=1)]) - equalsToMatchers(hts_at_sorted_time[1][1], - [matchesHypothesisAtTime(hts=b.h2, onset_idx=2, conf=2), - matchesHypothesisAtTime(hts=b.h1, onset_idx=2, conf=1)]) - equalsToMatchers(hts_at_sorted_time[2][1], - [matchesHypothesisAtTime(hts=b.h1, onset_idx=3, conf=4), - matchesHypothesisAtTime(hts=b.h2, onset_idx=3, conf=3)]) diff --git a/spaces/chaninder/SmartWaste/app.py b/spaces/chaninder/SmartWaste/app.py deleted file mode 100644 index 75de6afc73ad1484f88b75367bd0188b50e87499..0000000000000000000000000000000000000000 --- a/spaces/chaninder/SmartWaste/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -import tensorflow as tf -from tensorflow import keras - -import huggingface_hub -from huggingface_hub import from_pretrained_keras - - -# load custom pre-trained model from HuggingFace models -model_api_link = 'chaninder/waste-sorting-model-v4' #'chaninder/waste-sorting-model-updated', #'chaninder/waste-sorting-model' -pre_trained_model = from_pretrained_keras(model_api_link) - -# classification labels -labels = ['compost', 'e-waste', 'recycle', 'trash'] - - -def classify_image(inp): - inp = inp.reshape((-1, 224, 224, 3)) - #inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp) - prediction = pre_trained_model.predict(inp).flatten() - confidences = {labels[i]: float(prediction[i]) for i in range(4)} - return confidences - -# create Gradio interface - -iface = gr.Interface(fn=classify_image, - inputs=gr.Image(shape=(224, 224)), - outputs=gr.Label(num_top_classes=4), - examples=["banana.jpg", 'can.jpg', 'battery.jpg']) - -iface.launch(share=True) \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_auto.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_auto.py deleted file mode 100644 index 26c9a1becd9d41290e46b82f2cf66c3b180b8d97..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_auto.py +++ /dev/null @@ -1,1328 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Auto Model class.""" - -import warnings -from collections import OrderedDict - -from ...utils import logging -from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update -from .configuration_auto import CONFIG_MAPPING_NAMES - - -logger = logging.get_logger(__name__) - - -MODEL_MAPPING_NAMES = OrderedDict( - [ - # Base model mapping - ("albert", "AlbertModel"), - ("align", "AlignModel"), - ("altclip", "AltCLIPModel"), - ("audio-spectrogram-transformer", "ASTModel"), - ("bart", "BartModel"), - ("beit", "BeitModel"), - ("bert", "BertModel"), - ("bert-generation", "BertGenerationEncoder"), - ("big_bird", "BigBirdModel"), - ("bigbird_pegasus", "BigBirdPegasusModel"), - ("biogpt", "BioGptModel"), - ("bit", "BitModel"), - ("blenderbot", "BlenderbotModel"), - ("blenderbot-small", "BlenderbotSmallModel"), - ("blip", "BlipModel"), - ("blip-2", "Blip2Model"), - ("bloom", "BloomModel"), - ("bridgetower", "BridgeTowerModel"), - ("camembert", "CamembertModel"), - ("canine", "CanineModel"), - ("chinese_clip", "ChineseCLIPModel"), - ("clap", "ClapModel"), - ("clip", "CLIPModel"), - ("clipseg", "CLIPSegModel"), - ("codegen", "CodeGenModel"), - ("conditional_detr", "ConditionalDetrModel"), - ("convbert", "ConvBertModel"), - ("convnext", "ConvNextModel"), - ("convnextv2", "ConvNextV2Model"), - ("cpmant", "CpmAntModel"), - ("ctrl", "CTRLModel"), - ("cvt", "CvtModel"), - ("data2vec-audio", "Data2VecAudioModel"), - ("data2vec-text", "Data2VecTextModel"), - ("data2vec-vision", "Data2VecVisionModel"), - ("deberta", "DebertaModel"), - ("deberta-v2", "DebertaV2Model"), - ("decision_transformer", "DecisionTransformerModel"), - ("deformable_detr", "DeformableDetrModel"), - ("deit", "DeiTModel"), - ("deta", "DetaModel"), - ("detr", "DetrModel"), - ("dinat", "DinatModel"), - ("distilbert", "DistilBertModel"), - ("donut-swin", "DonutSwinModel"), - ("dpr", "DPRQuestionEncoder"), - ("dpt", "DPTModel"), - ("efficientformer", "EfficientFormerModel"), - ("efficientnet", "EfficientNetModel"), - ("electra", "ElectraModel"), - ("ernie", "ErnieModel"), - ("ernie_m", "ErnieMModel"), - ("esm", "EsmModel"), - ("flaubert", "FlaubertModel"), - ("flava", "FlavaModel"), - ("fnet", "FNetModel"), - ("fsmt", "FSMTModel"), - ("funnel", ("FunnelModel", "FunnelBaseModel")), - ("git", "GitModel"), - ("glpn", "GLPNModel"), - ("gpt-sw3", "GPT2Model"), - ("gpt2", "GPT2Model"), - ("gpt_bigcode", "GPTBigCodeModel"), - ("gpt_neo", "GPTNeoModel"), - ("gpt_neox", "GPTNeoXModel"), - ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), - ("gptj", "GPTJModel"), - ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), - ("graphormer", "GraphormerModel"), - ("groupvit", "GroupViTModel"), - ("hubert", "HubertModel"), - ("ibert", "IBertModel"), - ("imagegpt", "ImageGPTModel"), - ("informer", "InformerModel"), - ("jukebox", "JukeboxModel"), - ("layoutlm", "LayoutLMModel"), - ("layoutlmv2", "LayoutLMv2Model"), - ("layoutlmv3", "LayoutLMv3Model"), - ("led", "LEDModel"), - ("levit", "LevitModel"), - ("lilt", "LiltModel"), - ("llama", "LlamaModel"), - ("longformer", "LongformerModel"), - ("longt5", "LongT5Model"), - ("luke", "LukeModel"), - ("lxmert", "LxmertModel"), - ("m2m_100", "M2M100Model"), - ("marian", "MarianModel"), - ("markuplm", "MarkupLMModel"), - ("mask2former", "Mask2FormerModel"), - ("maskformer", "MaskFormerModel"), - ("maskformer-swin", "MaskFormerSwinModel"), - ("mbart", "MBartModel"), - ("mctct", "MCTCTModel"), - ("mega", "MegaModel"), - ("megatron-bert", "MegatronBertModel"), - ("mgp-str", "MgpstrForSceneTextRecognition"), - ("mobilebert", "MobileBertModel"), - ("mobilenet_v1", "MobileNetV1Model"), - ("mobilenet_v2", "MobileNetV2Model"), - ("mobilevit", "MobileViTModel"), - ("mpnet", "MPNetModel"), - ("mt5", "MT5Model"), - ("mvp", "MvpModel"), - ("nat", "NatModel"), - ("nezha", "NezhaModel"), - ("nllb-moe", "NllbMoeModel"), - ("nystromformer", "NystromformerModel"), - ("oneformer", "OneFormerModel"), - ("openai-gpt", "OpenAIGPTModel"), - ("opt", "OPTModel"), - ("owlvit", "OwlViTModel"), - ("pegasus", "PegasusModel"), - ("pegasus_x", "PegasusXModel"), - ("perceiver", "PerceiverModel"), - ("plbart", "PLBartModel"), - ("poolformer", "PoolFormerModel"), - ("prophetnet", "ProphetNetModel"), - ("qdqbert", "QDQBertModel"), - ("reformer", "ReformerModel"), - ("regnet", "RegNetModel"), - ("rembert", "RemBertModel"), - ("resnet", "ResNetModel"), - ("retribert", "RetriBertModel"), - ("roberta", "RobertaModel"), - ("roberta-prelayernorm", "RobertaPreLayerNormModel"), - ("roc_bert", "RoCBertModel"), - ("roformer", "RoFormerModel"), - ("segformer", "SegformerModel"), - ("sew", "SEWModel"), - ("sew-d", "SEWDModel"), - ("speech_to_text", "Speech2TextModel"), - ("speecht5", "SpeechT5Model"), - ("splinter", "SplinterModel"), - ("squeezebert", "SqueezeBertModel"), - ("swin", "SwinModel"), - ("swin2sr", "Swin2SRModel"), - ("swinv2", "Swinv2Model"), - ("switch_transformers", "SwitchTransformersModel"), - ("t5", "T5Model"), - ("table-transformer", "TableTransformerModel"), - ("tapas", "TapasModel"), - ("time_series_transformer", "TimeSeriesTransformerModel"), - ("timesformer", "TimesformerModel"), - ("trajectory_transformer", "TrajectoryTransformerModel"), - ("transfo-xl", "TransfoXLModel"), - ("tvlt", "TvltModel"), - ("unispeech", "UniSpeechModel"), - ("unispeech-sat", "UniSpeechSatModel"), - ("van", "VanModel"), - ("videomae", "VideoMAEModel"), - ("vilt", "ViltModel"), - ("vision-text-dual-encoder", "VisionTextDualEncoderModel"), - ("visual_bert", "VisualBertModel"), - ("vit", "ViTModel"), - ("vit_hybrid", "ViTHybridModel"), - ("vit_mae", "ViTMAEModel"), - ("vit_msn", "ViTMSNModel"), - ("wav2vec2", "Wav2Vec2Model"), - ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), - ("wavlm", "WavLMModel"), - ("whisper", "WhisperModel"), - ("xclip", "XCLIPModel"), - ("xglm", "XGLMModel"), - ("xlm", "XLMModel"), - ("xlm-prophetnet", "XLMProphetNetModel"), - ("xlm-roberta", "XLMRobertaModel"), - ("xlm-roberta-xl", "XLMRobertaXLModel"), - ("xlnet", "XLNetModel"), - ("xmod", "XmodModel"), - ("yolos", "YolosModel"), - ("yoso", "YosoModel"), - ] -) - -MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( - [ - # Model for pre-training mapping - ("albert", "AlbertForPreTraining"), - ("bart", "BartForConditionalGeneration"), - ("bert", "BertForPreTraining"), - ("big_bird", "BigBirdForPreTraining"), - ("bloom", "BloomForCausalLM"), - ("camembert", "CamembertForMaskedLM"), - ("ctrl", "CTRLLMHeadModel"), - ("data2vec-text", "Data2VecTextForMaskedLM"), - ("deberta", "DebertaForMaskedLM"), - ("deberta-v2", "DebertaV2ForMaskedLM"), - ("distilbert", "DistilBertForMaskedLM"), - ("electra", "ElectraForPreTraining"), - ("ernie", "ErnieForPreTraining"), - ("flaubert", "FlaubertWithLMHeadModel"), - ("flava", "FlavaForPreTraining"), - ("fnet", "FNetForPreTraining"), - ("fsmt", "FSMTForConditionalGeneration"), - ("funnel", "FunnelForPreTraining"), - ("gpt-sw3", "GPT2LMHeadModel"), - ("gpt2", "GPT2LMHeadModel"), - ("gpt_bigcode", "GPTBigCodeForCausalLM"), - ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), - ("ibert", "IBertForMaskedLM"), - ("layoutlm", "LayoutLMForMaskedLM"), - ("longformer", "LongformerForMaskedLM"), - ("luke", "LukeForMaskedLM"), - ("lxmert", "LxmertForPreTraining"), - ("mega", "MegaForMaskedLM"), - ("megatron-bert", "MegatronBertForPreTraining"), - ("mobilebert", "MobileBertForPreTraining"), - ("mpnet", "MPNetForMaskedLM"), - ("mvp", "MvpForConditionalGeneration"), - ("nezha", "NezhaForPreTraining"), - ("nllb-moe", "NllbMoeForConditionalGeneration"), - ("openai-gpt", "OpenAIGPTLMHeadModel"), - ("retribert", "RetriBertModel"), - ("roberta", "RobertaForMaskedLM"), - ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), - ("roc_bert", "RoCBertForPreTraining"), - ("splinter", "SplinterForPreTraining"), - ("squeezebert", "SqueezeBertForMaskedLM"), - ("switch_transformers", "SwitchTransformersForConditionalGeneration"), - ("t5", "T5ForConditionalGeneration"), - ("tapas", "TapasForMaskedLM"), - ("transfo-xl", "TransfoXLLMHeadModel"), - ("tvlt", "TvltForPreTraining"), - ("unispeech", "UniSpeechForPreTraining"), - ("unispeech-sat", "UniSpeechSatForPreTraining"), - ("videomae", "VideoMAEForPreTraining"), - ("visual_bert", "VisualBertForPreTraining"), - ("vit_mae", "ViTMAEForPreTraining"), - ("wav2vec2", "Wav2Vec2ForPreTraining"), - ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"), - ("xlm", "XLMWithLMHeadModel"), - ("xlm-roberta", "XLMRobertaForMaskedLM"), - ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), - ("xlnet", "XLNetLMHeadModel"), - ("xmod", "XmodForMaskedLM"), - ] -) - -MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( - [ - # Model with LM heads mapping - ("albert", "AlbertForMaskedLM"), - ("bart", "BartForConditionalGeneration"), - ("bert", "BertForMaskedLM"), - ("big_bird", "BigBirdForMaskedLM"), - ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), - ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), - ("bloom", "BloomForCausalLM"), - ("camembert", "CamembertForMaskedLM"), - ("codegen", "CodeGenForCausalLM"), - ("convbert", "ConvBertForMaskedLM"), - ("cpmant", "CpmAntForCausalLM"), - ("ctrl", "CTRLLMHeadModel"), - ("data2vec-text", "Data2VecTextForMaskedLM"), - ("deberta", "DebertaForMaskedLM"), - ("deberta-v2", "DebertaV2ForMaskedLM"), - ("distilbert", "DistilBertForMaskedLM"), - ("electra", "ElectraForMaskedLM"), - ("encoder-decoder", "EncoderDecoderModel"), - ("ernie", "ErnieForMaskedLM"), - ("esm", "EsmForMaskedLM"), - ("flaubert", "FlaubertWithLMHeadModel"), - ("fnet", "FNetForMaskedLM"), - ("fsmt", "FSMTForConditionalGeneration"), - ("funnel", "FunnelForMaskedLM"), - ("git", "GitForCausalLM"), - ("gpt-sw3", "GPT2LMHeadModel"), - ("gpt2", "GPT2LMHeadModel"), - ("gpt_bigcode", "GPTBigCodeForCausalLM"), - ("gpt_neo", "GPTNeoForCausalLM"), - ("gpt_neox", "GPTNeoXForCausalLM"), - ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), - ("gptj", "GPTJForCausalLM"), - ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), - ("ibert", "IBertForMaskedLM"), - ("layoutlm", "LayoutLMForMaskedLM"), - ("led", "LEDForConditionalGeneration"), - ("longformer", "LongformerForMaskedLM"), - ("longt5", "LongT5ForConditionalGeneration"), - ("luke", "LukeForMaskedLM"), - ("m2m_100", "M2M100ForConditionalGeneration"), - ("marian", "MarianMTModel"), - ("mega", "MegaForMaskedLM"), - ("megatron-bert", "MegatronBertForCausalLM"), - ("mobilebert", "MobileBertForMaskedLM"), - ("mpnet", "MPNetForMaskedLM"), - ("mvp", "MvpForConditionalGeneration"), - ("nezha", "NezhaForMaskedLM"), - ("nllb-moe", "NllbMoeForConditionalGeneration"), - ("nystromformer", "NystromformerForMaskedLM"), - ("openai-gpt", "OpenAIGPTLMHeadModel"), - ("pegasus_x", "PegasusXForConditionalGeneration"), - ("plbart", "PLBartForConditionalGeneration"), - ("qdqbert", "QDQBertForMaskedLM"), - ("reformer", "ReformerModelWithLMHead"), - ("rembert", "RemBertForMaskedLM"), - ("roberta", "RobertaForMaskedLM"), - ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), - ("roc_bert", "RoCBertForMaskedLM"), - ("roformer", "RoFormerForMaskedLM"), - ("speech_to_text", "Speech2TextForConditionalGeneration"), - ("squeezebert", "SqueezeBertForMaskedLM"), - ("switch_transformers", "SwitchTransformersForConditionalGeneration"), - ("t5", "T5ForConditionalGeneration"), - ("tapas", "TapasForMaskedLM"), - ("transfo-xl", "TransfoXLLMHeadModel"), - ("wav2vec2", "Wav2Vec2ForMaskedLM"), - ("whisper", "WhisperForConditionalGeneration"), - ("xlm", "XLMWithLMHeadModel"), - ("xlm-roberta", "XLMRobertaForMaskedLM"), - ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), - ("xlnet", "XLNetLMHeadModel"), - ("xmod", "XmodForMaskedLM"), - ("yoso", "YosoForMaskedLM"), - ] -) - -MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Causal LM mapping - ("bart", "BartForCausalLM"), - ("bert", "BertLMHeadModel"), - ("bert-generation", "BertGenerationDecoder"), - ("big_bird", "BigBirdForCausalLM"), - ("bigbird_pegasus", "BigBirdPegasusForCausalLM"), - ("biogpt", "BioGptForCausalLM"), - ("blenderbot", "BlenderbotForCausalLM"), - ("blenderbot-small", "BlenderbotSmallForCausalLM"), - ("bloom", "BloomForCausalLM"), - ("camembert", "CamembertForCausalLM"), - ("codegen", "CodeGenForCausalLM"), - ("cpmant", "CpmAntForCausalLM"), - ("ctrl", "CTRLLMHeadModel"), - ("data2vec-text", "Data2VecTextForCausalLM"), - ("electra", "ElectraForCausalLM"), - ("ernie", "ErnieForCausalLM"), - ("git", "GitForCausalLM"), - ("gpt-sw3", "GPT2LMHeadModel"), - ("gpt2", "GPT2LMHeadModel"), - ("gpt_bigcode", "GPTBigCodeForCausalLM"), - ("gpt_neo", "GPTNeoForCausalLM"), - ("gpt_neox", "GPTNeoXForCausalLM"), - ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), - ("gptj", "GPTJForCausalLM"), - ("llama", "LlamaForCausalLM"), - ("marian", "MarianForCausalLM"), - ("mbart", "MBartForCausalLM"), - ("mega", "MegaForCausalLM"), - ("megatron-bert", "MegatronBertForCausalLM"), - ("mvp", "MvpForCausalLM"), - ("openai-gpt", "OpenAIGPTLMHeadModel"), - ("opt", "OPTForCausalLM"), - ("pegasus", "PegasusForCausalLM"), - ("plbart", "PLBartForCausalLM"), - ("prophetnet", "ProphetNetForCausalLM"), - ("qdqbert", "QDQBertLMHeadModel"), - ("reformer", "ReformerModelWithLMHead"), - ("rembert", "RemBertForCausalLM"), - ("roberta", "RobertaForCausalLM"), - ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"), - ("roc_bert", "RoCBertForCausalLM"), - ("roformer", "RoFormerForCausalLM"), - ("speech_to_text_2", "Speech2Text2ForCausalLM"), - ("transfo-xl", "TransfoXLLMHeadModel"), - ("trocr", "TrOCRForCausalLM"), - ("xglm", "XGLMForCausalLM"), - ("xlm", "XLMWithLMHeadModel"), - ("xlm-prophetnet", "XLMProphetNetForCausalLM"), - ("xlm-roberta", "XLMRobertaForCausalLM"), - ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"), - ("xlnet", "XLNetLMHeadModel"), - ("xmod", "XmodForCausalLM"), - ] -) - -MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( - [ - ("deit", "DeiTForMaskedImageModeling"), - ("swin", "SwinForMaskedImageModeling"), - ("swinv2", "Swinv2ForMaskedImageModeling"), - ("vit", "ViTForMaskedImageModeling"), - ] -) - - -MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( - # Model for Causal Image Modeling mapping - [ - ("imagegpt", "ImageGPTForCausalImageModeling"), - ] -) - -MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Image Classification mapping - ("beit", "BeitForImageClassification"), - ("bit", "BitForImageClassification"), - ("convnext", "ConvNextForImageClassification"), - ("convnextv2", "ConvNextV2ForImageClassification"), - ("cvt", "CvtForImageClassification"), - ("data2vec-vision", "Data2VecVisionForImageClassification"), - ("deit", ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher")), - ("dinat", "DinatForImageClassification"), - ( - "efficientformer", - ( - "EfficientFormerForImageClassification", - "EfficientFormerForImageClassificationWithTeacher", - ), - ), - ("efficientnet", "EfficientNetForImageClassification"), - ("imagegpt", "ImageGPTForImageClassification"), - ("levit", ("LevitForImageClassification", "LevitForImageClassificationWithTeacher")), - ("mobilenet_v1", "MobileNetV1ForImageClassification"), - ("mobilenet_v2", "MobileNetV2ForImageClassification"), - ("mobilevit", "MobileViTForImageClassification"), - ("nat", "NatForImageClassification"), - ( - "perceiver", - ( - "PerceiverForImageClassificationLearned", - "PerceiverForImageClassificationFourier", - "PerceiverForImageClassificationConvProcessing", - ), - ), - ("poolformer", "PoolFormerForImageClassification"), - ("regnet", "RegNetForImageClassification"), - ("resnet", "ResNetForImageClassification"), - ("segformer", "SegformerForImageClassification"), - ("swin", "SwinForImageClassification"), - ("swinv2", "Swinv2ForImageClassification"), - ("van", "VanForImageClassification"), - ("vit", "ViTForImageClassification"), - ("vit_hybrid", "ViTHybridForImageClassification"), - ("vit_msn", "ViTMSNForImageClassification"), - ] -) - -MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict( - [ - # Do not add new models here, this class will be deprecated in the future. - # Model for Image Segmentation mapping - ("detr", "DetrForSegmentation"), - ] -) - -MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Semantic Segmentation mapping - ("beit", "BeitForSemanticSegmentation"), - ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"), - ("dpt", "DPTForSemanticSegmentation"), - ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"), - ("mobilevit", "MobileViTForSemanticSegmentation"), - ("segformer", "SegformerForSemanticSegmentation"), - ("upernet", "UperNetForSemanticSegmentation"), - ] -) - -MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Instance Segmentation mapping - # MaskFormerForInstanceSegmentation can be removed from this mapping in v5 - ("maskformer", "MaskFormerForInstanceSegmentation"), - ] -) - -MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Universal Segmentation mapping - ("detr", "DetrForSegmentation"), - ("mask2former", "Mask2FormerForUniversalSegmentation"), - ("maskformer", "MaskFormerForInstanceSegmentation"), - ("oneformer", "OneFormerForUniversalSegmentation"), - ] -) - -MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - ("timesformer", "TimesformerForVideoClassification"), - ("videomae", "VideoMAEForVideoClassification"), - ] -) - -MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( - [ - ("blip", "BlipForConditionalGeneration"), - ("blip-2", "Blip2ForConditionalGeneration"), - ("vision-encoder-decoder", "VisionEncoderDecoderModel"), - ] -) - -MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Masked LM mapping - ("albert", "AlbertForMaskedLM"), - ("bart", "BartForConditionalGeneration"), - ("bert", "BertForMaskedLM"), - ("big_bird", "BigBirdForMaskedLM"), - ("camembert", "CamembertForMaskedLM"), - ("convbert", "ConvBertForMaskedLM"), - ("data2vec-text", "Data2VecTextForMaskedLM"), - ("deberta", "DebertaForMaskedLM"), - ("deberta-v2", "DebertaV2ForMaskedLM"), - ("distilbert", "DistilBertForMaskedLM"), - ("electra", "ElectraForMaskedLM"), - ("ernie", "ErnieForMaskedLM"), - ("esm", "EsmForMaskedLM"), - ("flaubert", "FlaubertWithLMHeadModel"), - ("fnet", "FNetForMaskedLM"), - ("funnel", "FunnelForMaskedLM"), - ("ibert", "IBertForMaskedLM"), - ("layoutlm", "LayoutLMForMaskedLM"), - ("longformer", "LongformerForMaskedLM"), - ("luke", "LukeForMaskedLM"), - ("mbart", "MBartForConditionalGeneration"), - ("mega", "MegaForMaskedLM"), - ("megatron-bert", "MegatronBertForMaskedLM"), - ("mobilebert", "MobileBertForMaskedLM"), - ("mpnet", "MPNetForMaskedLM"), - ("mvp", "MvpForConditionalGeneration"), - ("nezha", "NezhaForMaskedLM"), - ("nystromformer", "NystromformerForMaskedLM"), - ("perceiver", "PerceiverForMaskedLM"), - ("qdqbert", "QDQBertForMaskedLM"), - ("reformer", "ReformerForMaskedLM"), - ("rembert", "RemBertForMaskedLM"), - ("roberta", "RobertaForMaskedLM"), - ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), - ("roc_bert", "RoCBertForMaskedLM"), - ("roformer", "RoFormerForMaskedLM"), - ("squeezebert", "SqueezeBertForMaskedLM"), - ("tapas", "TapasForMaskedLM"), - ("wav2vec2", "Wav2Vec2ForMaskedLM"), - ("xlm", "XLMWithLMHeadModel"), - ("xlm-roberta", "XLMRobertaForMaskedLM"), - ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), - ("xmod", "XmodForMaskedLM"), - ("yoso", "YosoForMaskedLM"), - ] -) - -MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( - [ - # Model for Object Detection mapping - ("conditional_detr", "ConditionalDetrForObjectDetection"), - ("deformable_detr", "DeformableDetrForObjectDetection"), - ("deta", "DetaForObjectDetection"), - ("detr", "DetrForObjectDetection"), - ("table-transformer", "TableTransformerForObjectDetection"), - ("yolos", "YolosForObjectDetection"), - ] -) - -MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( - [ - # Model for Zero Shot Object Detection mapping - ("owlvit", "OwlViTForObjectDetection") - ] -) - -MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict( - [ - # Model for depth estimation mapping - ("dpt", "DPTForDepthEstimation"), - ("glpn", "GLPNForDepthEstimation"), - ] -) -MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Seq2Seq Causal LM mapping - ("bart", "BartForConditionalGeneration"), - ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), - ("blenderbot", "BlenderbotForConditionalGeneration"), - ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), - ("encoder-decoder", "EncoderDecoderModel"), - ("fsmt", "FSMTForConditionalGeneration"), - ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), - ("led", "LEDForConditionalGeneration"), - ("longt5", "LongT5ForConditionalGeneration"), - ("m2m_100", "M2M100ForConditionalGeneration"), - ("marian", "MarianMTModel"), - ("mbart", "MBartForConditionalGeneration"), - ("mt5", "MT5ForConditionalGeneration"), - ("mvp", "MvpForConditionalGeneration"), - ("nllb-moe", "NllbMoeForConditionalGeneration"), - ("pegasus", "PegasusForConditionalGeneration"), - ("pegasus_x", "PegasusXForConditionalGeneration"), - ("plbart", "PLBartForConditionalGeneration"), - ("prophetnet", "ProphetNetForConditionalGeneration"), - ("switch_transformers", "SwitchTransformersForConditionalGeneration"), - ("t5", "T5ForConditionalGeneration"), - ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"), - ] -) - -MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( - [ - ("speech-encoder-decoder", "SpeechEncoderDecoderModel"), - ("speech_to_text", "Speech2TextForConditionalGeneration"), - ("speecht5", "SpeechT5ForSpeechToText"), - ("whisper", "WhisperForConditionalGeneration"), - ] -) - -MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Sequence Classification mapping - ("albert", "AlbertForSequenceClassification"), - ("bart", "BartForSequenceClassification"), - ("bert", "BertForSequenceClassification"), - ("big_bird", "BigBirdForSequenceClassification"), - ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"), - ("bloom", "BloomForSequenceClassification"), - ("camembert", "CamembertForSequenceClassification"), - ("canine", "CanineForSequenceClassification"), - ("convbert", "ConvBertForSequenceClassification"), - ("ctrl", "CTRLForSequenceClassification"), - ("data2vec-text", "Data2VecTextForSequenceClassification"), - ("deberta", "DebertaForSequenceClassification"), - ("deberta-v2", "DebertaV2ForSequenceClassification"), - ("distilbert", "DistilBertForSequenceClassification"), - ("electra", "ElectraForSequenceClassification"), - ("ernie", "ErnieForSequenceClassification"), - ("ernie_m", "ErnieMForSequenceClassification"), - ("esm", "EsmForSequenceClassification"), - ("flaubert", "FlaubertForSequenceClassification"), - ("fnet", "FNetForSequenceClassification"), - ("funnel", "FunnelForSequenceClassification"), - ("gpt-sw3", "GPT2ForSequenceClassification"), - ("gpt2", "GPT2ForSequenceClassification"), - ("gpt_bigcode", "GPTBigCodeForSequenceClassification"), - ("gpt_neo", "GPTNeoForSequenceClassification"), - ("gpt_neox", "GPTNeoXForSequenceClassification"), - ("gptj", "GPTJForSequenceClassification"), - ("ibert", "IBertForSequenceClassification"), - ("layoutlm", "LayoutLMForSequenceClassification"), - ("layoutlmv2", "LayoutLMv2ForSequenceClassification"), - ("layoutlmv3", "LayoutLMv3ForSequenceClassification"), - ("led", "LEDForSequenceClassification"), - ("lilt", "LiltForSequenceClassification"), - ("llama", "LlamaForSequenceClassification"), - ("longformer", "LongformerForSequenceClassification"), - ("luke", "LukeForSequenceClassification"), - ("markuplm", "MarkupLMForSequenceClassification"), - ("mbart", "MBartForSequenceClassification"), - ("mega", "MegaForSequenceClassification"), - ("megatron-bert", "MegatronBertForSequenceClassification"), - ("mobilebert", "MobileBertForSequenceClassification"), - ("mpnet", "MPNetForSequenceClassification"), - ("mvp", "MvpForSequenceClassification"), - ("nezha", "NezhaForSequenceClassification"), - ("nystromformer", "NystromformerForSequenceClassification"), - ("openai-gpt", "OpenAIGPTForSequenceClassification"), - ("opt", "OPTForSequenceClassification"), - ("perceiver", "PerceiverForSequenceClassification"), - ("plbart", "PLBartForSequenceClassification"), - ("qdqbert", "QDQBertForSequenceClassification"), - ("reformer", "ReformerForSequenceClassification"), - ("rembert", "RemBertForSequenceClassification"), - ("roberta", "RobertaForSequenceClassification"), - ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"), - ("roc_bert", "RoCBertForSequenceClassification"), - ("roformer", "RoFormerForSequenceClassification"), - ("squeezebert", "SqueezeBertForSequenceClassification"), - ("tapas", "TapasForSequenceClassification"), - ("transfo-xl", "TransfoXLForSequenceClassification"), - ("xlm", "XLMForSequenceClassification"), - ("xlm-roberta", "XLMRobertaForSequenceClassification"), - ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"), - ("xlnet", "XLNetForSequenceClassification"), - ("xmod", "XmodForSequenceClassification"), - ("yoso", "YosoForSequenceClassification"), - ] -) - -MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - # Model for Question Answering mapping - ("albert", "AlbertForQuestionAnswering"), - ("bart", "BartForQuestionAnswering"), - ("bert", "BertForQuestionAnswering"), - ("big_bird", "BigBirdForQuestionAnswering"), - ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), - ("bloom", "BloomForQuestionAnswering"), - ("camembert", "CamembertForQuestionAnswering"), - ("canine", "CanineForQuestionAnswering"), - ("convbert", "ConvBertForQuestionAnswering"), - ("data2vec-text", "Data2VecTextForQuestionAnswering"), - ("deberta", "DebertaForQuestionAnswering"), - ("deberta-v2", "DebertaV2ForQuestionAnswering"), - ("distilbert", "DistilBertForQuestionAnswering"), - ("electra", "ElectraForQuestionAnswering"), - ("ernie", "ErnieForQuestionAnswering"), - ("ernie_m", "ErnieMForQuestionAnswering"), - ("flaubert", "FlaubertForQuestionAnsweringSimple"), - ("fnet", "FNetForQuestionAnswering"), - ("funnel", "FunnelForQuestionAnswering"), - ("gptj", "GPTJForQuestionAnswering"), - ("ibert", "IBertForQuestionAnswering"), - ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), - ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), - ("led", "LEDForQuestionAnswering"), - ("lilt", "LiltForQuestionAnswering"), - ("longformer", "LongformerForQuestionAnswering"), - ("luke", "LukeForQuestionAnswering"), - ("lxmert", "LxmertForQuestionAnswering"), - ("markuplm", "MarkupLMForQuestionAnswering"), - ("mbart", "MBartForQuestionAnswering"), - ("mega", "MegaForQuestionAnswering"), - ("megatron-bert", "MegatronBertForQuestionAnswering"), - ("mobilebert", "MobileBertForQuestionAnswering"), - ("mpnet", "MPNetForQuestionAnswering"), - ("mvp", "MvpForQuestionAnswering"), - ("nezha", "NezhaForQuestionAnswering"), - ("nystromformer", "NystromformerForQuestionAnswering"), - ("opt", "OPTForQuestionAnswering"), - ("qdqbert", "QDQBertForQuestionAnswering"), - ("reformer", "ReformerForQuestionAnswering"), - ("rembert", "RemBertForQuestionAnswering"), - ("roberta", "RobertaForQuestionAnswering"), - ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"), - ("roc_bert", "RoCBertForQuestionAnswering"), - ("roformer", "RoFormerForQuestionAnswering"), - ("splinter", "SplinterForQuestionAnswering"), - ("squeezebert", "SqueezeBertForQuestionAnswering"), - ("xlm", "XLMForQuestionAnsweringSimple"), - ("xlm-roberta", "XLMRobertaForQuestionAnswering"), - ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"), - ("xlnet", "XLNetForQuestionAnsweringSimple"), - ("xmod", "XmodForQuestionAnswering"), - ("yoso", "YosoForQuestionAnswering"), - ] -) - -MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - # Model for Table Question Answering mapping - ("tapas", "TapasForQuestionAnswering"), - ] -) - -MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - ("vilt", "ViltForQuestionAnswering"), - ] -) - -MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - ("layoutlm", "LayoutLMForQuestionAnswering"), - ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), - ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), - ] -) - -MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Token Classification mapping - ("albert", "AlbertForTokenClassification"), - ("bert", "BertForTokenClassification"), - ("big_bird", "BigBirdForTokenClassification"), - ("biogpt", "BioGptForTokenClassification"), - ("bloom", "BloomForTokenClassification"), - ("camembert", "CamembertForTokenClassification"), - ("canine", "CanineForTokenClassification"), - ("convbert", "ConvBertForTokenClassification"), - ("data2vec-text", "Data2VecTextForTokenClassification"), - ("deberta", "DebertaForTokenClassification"), - ("deberta-v2", "DebertaV2ForTokenClassification"), - ("distilbert", "DistilBertForTokenClassification"), - ("electra", "ElectraForTokenClassification"), - ("ernie", "ErnieForTokenClassification"), - ("ernie_m", "ErnieMForTokenClassification"), - ("esm", "EsmForTokenClassification"), - ("flaubert", "FlaubertForTokenClassification"), - ("fnet", "FNetForTokenClassification"), - ("funnel", "FunnelForTokenClassification"), - ("gpt-sw3", "GPT2ForTokenClassification"), - ("gpt2", "GPT2ForTokenClassification"), - ("gpt_bigcode", "GPTBigCodeForTokenClassification"), - ("ibert", "IBertForTokenClassification"), - ("layoutlm", "LayoutLMForTokenClassification"), - ("layoutlmv2", "LayoutLMv2ForTokenClassification"), - ("layoutlmv3", "LayoutLMv3ForTokenClassification"), - ("lilt", "LiltForTokenClassification"), - ("longformer", "LongformerForTokenClassification"), - ("luke", "LukeForTokenClassification"), - ("markuplm", "MarkupLMForTokenClassification"), - ("mega", "MegaForTokenClassification"), - ("megatron-bert", "MegatronBertForTokenClassification"), - ("mobilebert", "MobileBertForTokenClassification"), - ("mpnet", "MPNetForTokenClassification"), - ("nezha", "NezhaForTokenClassification"), - ("nystromformer", "NystromformerForTokenClassification"), - ("qdqbert", "QDQBertForTokenClassification"), - ("rembert", "RemBertForTokenClassification"), - ("roberta", "RobertaForTokenClassification"), - ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"), - ("roc_bert", "RoCBertForTokenClassification"), - ("roformer", "RoFormerForTokenClassification"), - ("squeezebert", "SqueezeBertForTokenClassification"), - ("xlm", "XLMForTokenClassification"), - ("xlm-roberta", "XLMRobertaForTokenClassification"), - ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"), - ("xlnet", "XLNetForTokenClassification"), - ("xmod", "XmodForTokenClassification"), - ("yoso", "YosoForTokenClassification"), - ] -) - -MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( - [ - # Model for Multiple Choice mapping - ("albert", "AlbertForMultipleChoice"), - ("bert", "BertForMultipleChoice"), - ("big_bird", "BigBirdForMultipleChoice"), - ("camembert", "CamembertForMultipleChoice"), - ("canine", "CanineForMultipleChoice"), - ("convbert", "ConvBertForMultipleChoice"), - ("data2vec-text", "Data2VecTextForMultipleChoice"), - ("deberta-v2", "DebertaV2ForMultipleChoice"), - ("distilbert", "DistilBertForMultipleChoice"), - ("electra", "ElectraForMultipleChoice"), - ("ernie", "ErnieForMultipleChoice"), - ("ernie_m", "ErnieMForMultipleChoice"), - ("flaubert", "FlaubertForMultipleChoice"), - ("fnet", "FNetForMultipleChoice"), - ("funnel", "FunnelForMultipleChoice"), - ("ibert", "IBertForMultipleChoice"), - ("longformer", "LongformerForMultipleChoice"), - ("luke", "LukeForMultipleChoice"), - ("mega", "MegaForMultipleChoice"), - ("megatron-bert", "MegatronBertForMultipleChoice"), - ("mobilebert", "MobileBertForMultipleChoice"), - ("mpnet", "MPNetForMultipleChoice"), - ("nezha", "NezhaForMultipleChoice"), - ("nystromformer", "NystromformerForMultipleChoice"), - ("qdqbert", "QDQBertForMultipleChoice"), - ("rembert", "RemBertForMultipleChoice"), - ("roberta", "RobertaForMultipleChoice"), - ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"), - ("roc_bert", "RoCBertForMultipleChoice"), - ("roformer", "RoFormerForMultipleChoice"), - ("squeezebert", "SqueezeBertForMultipleChoice"), - ("xlm", "XLMForMultipleChoice"), - ("xlm-roberta", "XLMRobertaForMultipleChoice"), - ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"), - ("xlnet", "XLNetForMultipleChoice"), - ("xmod", "XmodForMultipleChoice"), - ("yoso", "YosoForMultipleChoice"), - ] -) - -MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( - [ - ("bert", "BertForNextSentencePrediction"), - ("ernie", "ErnieForNextSentencePrediction"), - ("fnet", "FNetForNextSentencePrediction"), - ("megatron-bert", "MegatronBertForNextSentencePrediction"), - ("mobilebert", "MobileBertForNextSentencePrediction"), - ("nezha", "NezhaForNextSentencePrediction"), - ("qdqbert", "QDQBertForNextSentencePrediction"), - ] -) - -MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Audio Classification mapping - ("audio-spectrogram-transformer", "ASTForAudioClassification"), - ("data2vec-audio", "Data2VecAudioForSequenceClassification"), - ("hubert", "HubertForSequenceClassification"), - ("sew", "SEWForSequenceClassification"), - ("sew-d", "SEWDForSequenceClassification"), - ("unispeech", "UniSpeechForSequenceClassification"), - ("unispeech-sat", "UniSpeechSatForSequenceClassification"), - ("wav2vec2", "Wav2Vec2ForSequenceClassification"), - ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"), - ("wavlm", "WavLMForSequenceClassification"), - ("whisper", "WhisperForAudioClassification"), - ] -) - -MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict( - [ - # Model for Connectionist temporal classification (CTC) mapping - ("data2vec-audio", "Data2VecAudioForCTC"), - ("hubert", "HubertForCTC"), - ("mctct", "MCTCTForCTC"), - ("sew", "SEWForCTC"), - ("sew-d", "SEWDForCTC"), - ("unispeech", "UniSpeechForCTC"), - ("unispeech-sat", "UniSpeechSatForCTC"), - ("wav2vec2", "Wav2Vec2ForCTC"), - ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"), - ("wavlm", "WavLMForCTC"), - ] -) - -MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Audio Classification mapping - ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"), - ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"), - ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"), - ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"), - ("wavlm", "WavLMForAudioFrameClassification"), - ] -) - -MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict( - [ - # Model for Audio Classification mapping - ("data2vec-audio", "Data2VecAudioForXVector"), - ("unispeech-sat", "UniSpeechSatForXVector"), - ("wav2vec2", "Wav2Vec2ForXVector"), - ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"), - ("wavlm", "WavLMForXVector"), - ] -) - -MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Zero Shot Image Classification mapping - ("align", "AlignModel"), - ("altclip", "AltCLIPModel"), - ("blip", "BlipModel"), - ("chinese_clip", "ChineseCLIPModel"), - ("clip", "CLIPModel"), - ("clipseg", "CLIPSegModel"), - ] -) - -MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict( - [ - # Backbone mapping - ("bit", "BitBackbone"), - ("convnext", "ConvNextBackbone"), - ("convnextv2", "ConvNextV2Backbone"), - ("dinat", "DinatBackbone"), - ("maskformer-swin", "MaskFormerSwinBackbone"), - ("nat", "NatBackbone"), - ("resnet", "ResNetBackbone"), - ("swin", "SwinBackbone"), - ] -) - -MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES) -MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES) -MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES) -MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) -MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES -) -MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES -) -MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES -) -MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES -) -MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES -) -MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) -MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES -) -MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES -) -MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) -MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES -) -MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES) -MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES -) -MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) -MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES -) -MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES -) -MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES -) -MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) -MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES -) -MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES) -MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) -MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES -) -MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES) - -MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES) - - -class AutoModel(_BaseAutoModelClass): - _model_mapping = MODEL_MAPPING - - -AutoModel = auto_class_update(AutoModel) - - -class AutoModelForPreTraining(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_PRETRAINING_MAPPING - - -AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining") - - -# Private on purpose, the public class will add the deprecation warnings. -class _AutoModelWithLMHead(_BaseAutoModelClass): - _model_mapping = MODEL_WITH_LM_HEAD_MAPPING - - -_AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling") - - -class AutoModelForCausalLM(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING - - -AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling") - - -class AutoModelForMaskedLM(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_MASKED_LM_MAPPING - - -AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling") - - -class AutoModelForSeq2SeqLM(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING - - -AutoModelForSeq2SeqLM = auto_class_update( - AutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" -) - - -class AutoModelForSequenceClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING - - -AutoModelForSequenceClassification = auto_class_update( - AutoModelForSequenceClassification, head_doc="sequence classification" -) - - -class AutoModelForQuestionAnswering(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING - - -AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering") - - -class AutoModelForTableQuestionAnswering(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING - - -AutoModelForTableQuestionAnswering = auto_class_update( - AutoModelForTableQuestionAnswering, - head_doc="table question answering", - checkpoint_for_example="google/tapas-base-finetuned-wtq", -) - - -class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING - - -AutoModelForVisualQuestionAnswering = auto_class_update( - AutoModelForVisualQuestionAnswering, - head_doc="visual question answering", - checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa", -) - - -class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING - - -AutoModelForDocumentQuestionAnswering = auto_class_update( - AutoModelForDocumentQuestionAnswering, - head_doc="document question answering", - checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', -) - - -class AutoModelForTokenClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING - - -AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification") - - -class AutoModelForMultipleChoice(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING - - -AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice") - - -class AutoModelForNextSentencePrediction(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING - - -AutoModelForNextSentencePrediction = auto_class_update( - AutoModelForNextSentencePrediction, head_doc="next sentence prediction" -) - - -class AutoModelForImageClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING - - -AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification") - - -class AutoModelForZeroShotImageClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING - - -AutoModelForZeroShotImageClassification = auto_class_update( - AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" -) - - -class AutoModelForImageSegmentation(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING - - -AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation") - - -class AutoModelForSemanticSegmentation(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING - - -AutoModelForSemanticSegmentation = auto_class_update( - AutoModelForSemanticSegmentation, head_doc="semantic segmentation" -) - - -class AutoModelForUniversalSegmentation(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING - - -AutoModelForUniversalSegmentation = auto_class_update( - AutoModelForUniversalSegmentation, head_doc="universal image segmentation" -) - - -class AutoModelForInstanceSegmentation(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING - - -AutoModelForInstanceSegmentation = auto_class_update( - AutoModelForInstanceSegmentation, head_doc="instance segmentation" -) - - -class AutoModelForObjectDetection(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING - - -AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection") - - -class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING - - -AutoModelForZeroShotObjectDetection = auto_class_update( - AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection" -) - - -class AutoModelForDepthEstimation(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING - - -AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation") - - -class AutoModelForVideoClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING - - -AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification") - - -class AutoModelForVision2Seq(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING - - -AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling") - - -class AutoModelForAudioClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING - - -AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification") - - -class AutoModelForCTC(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_CTC_MAPPING - - -AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification") - - -class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING - - -AutoModelForSpeechSeq2Seq = auto_class_update( - AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" -) - - -class AutoModelForAudioFrameClassification(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING - - -AutoModelForAudioFrameClassification = auto_class_update( - AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification" -) - - -class AutoModelForAudioXVector(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING - - -class AutoBackbone(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_BACKBONE_MAPPING - - -AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector") - - -class AutoModelForMaskedImageModeling(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING - - -AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling") - - -class AutoModelWithLMHead(_AutoModelWithLMHead): - @classmethod - def from_config(cls, config): - warnings.warn( - "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " - "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " - "`AutoModelForSeq2SeqLM` for encoder-decoder models.", - FutureWarning, - ) - return super().from_config(config) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - warnings.warn( - "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " - "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " - "`AutoModelForSeq2SeqLM` for encoder-decoder models.", - FutureWarning, - ) - return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/cleaner.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/cleaner.py deleted file mode 100644 index be86d55e7f8716afd89fffdf51eb903743e32442..0000000000000000000000000000000000000000 --- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/cleaner.py +++ /dev/null @@ -1,28 +0,0 @@ -from text import chinese, japanese, cleaned_text_to_sequence - - -language_module_map = {"ZH": chinese, "JP": japanese} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - - -if __name__ == "__main__": - pass \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_fileio.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_fileio.py deleted file mode 100644 index 35e8e8af6c11dd6690a8382af6a23d1391fff9dc..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_fileio.py +++ /dev/null @@ -1,603 +0,0 @@ -from __future__ import annotations - -import os -import pathlib -import sys -from dataclasses import dataclass -from functools import partial -from os import PathLike -from typing import ( - IO, - TYPE_CHECKING, - Any, - AnyStr, - AsyncIterator, - Callable, - Generic, - Iterable, - Iterator, - Sequence, - cast, - overload, -) - -from .. import to_thread -from ..abc import AsyncResource - -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final - -if TYPE_CHECKING: - from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer -else: - ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object - - -class AsyncFile(AsyncResource, Generic[AnyStr]): - """ - An asynchronous file object. - - This class wraps a standard file object and provides async friendly versions of the following - blocking methods (where available on the original file object): - - * read - * read1 - * readline - * readlines - * readinto - * readinto1 - * write - * writelines - * truncate - * seek - * tell - * flush - - All other methods are directly passed through. - - This class supports the asynchronous context manager protocol which closes the underlying file - at the end of the context block. - - This class also supports asynchronous iteration:: - - async with await open_file(...) as f: - async for line in f: - print(line) - """ - - def __init__(self, fp: IO[AnyStr]) -> None: - self._fp: Any = fp - - def __getattr__(self, name: str) -> object: - return getattr(self._fp, name) - - @property - def wrapped(self) -> IO[AnyStr]: - """The wrapped file object.""" - return self._fp - - async def __aiter__(self) -> AsyncIterator[AnyStr]: - while True: - line = await self.readline() - if line: - yield line - else: - break - - async def aclose(self) -> None: - return await to_thread.run_sync(self._fp.close) - - async def read(self, size: int = -1) -> AnyStr: - return await to_thread.run_sync(self._fp.read, size) - - async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes: - return await to_thread.run_sync(self._fp.read1, size) - - async def readline(self) -> AnyStr: - return await to_thread.run_sync(self._fp.readline) - - async def readlines(self) -> list[AnyStr]: - return await to_thread.run_sync(self._fp.readlines) - - async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: - return await to_thread.run_sync(self._fp.readinto, b) - - async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: - return await to_thread.run_sync(self._fp.readinto1, b) - - @overload - async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: - ... - - @overload - async def write(self: AsyncFile[str], b: str) -> int: - ... - - async def write(self, b: ReadableBuffer | str) -> int: - return await to_thread.run_sync(self._fp.write, b) - - @overload - async def writelines( - self: AsyncFile[bytes], lines: Iterable[ReadableBuffer] - ) -> None: - ... - - @overload - async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: - ... - - async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None: - return await to_thread.run_sync(self._fp.writelines, lines) - - async def truncate(self, size: int | None = None) -> int: - return await to_thread.run_sync(self._fp.truncate, size) - - async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int: - return await to_thread.run_sync(self._fp.seek, offset, whence) - - async def tell(self) -> int: - return await to_thread.run_sync(self._fp.tell) - - async def flush(self) -> None: - return await to_thread.run_sync(self._fp.flush) - - -@overload -async def open_file( - file: str | PathLike[str] | int, - mode: OpenBinaryMode, - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - closefd: bool = ..., - opener: Callable[[str, int], int] | None = ..., -) -> AsyncFile[bytes]: - ... - - -@overload -async def open_file( - file: str | PathLike[str] | int, - mode: OpenTextMode = ..., - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - closefd: bool = ..., - opener: Callable[[str, int], int] | None = ..., -) -> AsyncFile[str]: - ... - - -async def open_file( - file: str | PathLike[str] | int, - mode: str = "r", - buffering: int = -1, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - closefd: bool = True, - opener: Callable[[str, int], int] | None = None, -) -> AsyncFile[Any]: - """ - Open a file asynchronously. - - The arguments are exactly the same as for the builtin :func:`open`. - - :return: an asynchronous file object - - """ - fp = await to_thread.run_sync( - open, file, mode, buffering, encoding, errors, newline, closefd, opener - ) - return AsyncFile(fp) - - -def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: - """ - Wrap an existing file as an asynchronous file. - - :param file: an existing file-like object - :return: an asynchronous file object - - """ - return AsyncFile(file) - - -@dataclass(eq=False) -class _PathIterator(AsyncIterator["Path"]): - iterator: Iterator[PathLike[str]] - - async def __anext__(self) -> Path: - nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True) - if nextval is None: - raise StopAsyncIteration from None - - return Path(cast("PathLike[str]", nextval)) - - -class Path: - """ - An asynchronous version of :class:`pathlib.Path`. - - This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but - it is compatible with the :class:`os.PathLike` interface. - - It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the - deprecated :meth:`~pathlib.Path.link_to` method. - - Any methods that do disk I/O need to be awaited on. These methods are: - - * :meth:`~pathlib.Path.absolute` - * :meth:`~pathlib.Path.chmod` - * :meth:`~pathlib.Path.cwd` - * :meth:`~pathlib.Path.exists` - * :meth:`~pathlib.Path.expanduser` - * :meth:`~pathlib.Path.group` - * :meth:`~pathlib.Path.hardlink_to` - * :meth:`~pathlib.Path.home` - * :meth:`~pathlib.Path.is_block_device` - * :meth:`~pathlib.Path.is_char_device` - * :meth:`~pathlib.Path.is_dir` - * :meth:`~pathlib.Path.is_fifo` - * :meth:`~pathlib.Path.is_file` - * :meth:`~pathlib.Path.is_mount` - * :meth:`~pathlib.Path.lchmod` - * :meth:`~pathlib.Path.lstat` - * :meth:`~pathlib.Path.mkdir` - * :meth:`~pathlib.Path.open` - * :meth:`~pathlib.Path.owner` - * :meth:`~pathlib.Path.read_bytes` - * :meth:`~pathlib.Path.read_text` - * :meth:`~pathlib.Path.readlink` - * :meth:`~pathlib.Path.rename` - * :meth:`~pathlib.Path.replace` - * :meth:`~pathlib.Path.rmdir` - * :meth:`~pathlib.Path.samefile` - * :meth:`~pathlib.Path.stat` - * :meth:`~pathlib.Path.touch` - * :meth:`~pathlib.Path.unlink` - * :meth:`~pathlib.Path.write_bytes` - * :meth:`~pathlib.Path.write_text` - - Additionally, the following methods return an async iterator yielding :class:`~.Path` objects: - - * :meth:`~pathlib.Path.glob` - * :meth:`~pathlib.Path.iterdir` - * :meth:`~pathlib.Path.rglob` - """ - - __slots__ = "_path", "__weakref__" - - __weakref__: Any - - def __init__(self, *args: str | PathLike[str]) -> None: - self._path: Final[pathlib.Path] = pathlib.Path(*args) - - def __fspath__(self) -> str: - return self._path.__fspath__() - - def __str__(self) -> str: - return self._path.__str__() - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.as_posix()!r})" - - def __bytes__(self) -> bytes: - return self._path.__bytes__() - - def __hash__(self) -> int: - return self._path.__hash__() - - def __eq__(self, other: object) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__eq__(target) - - def __lt__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__lt__(target) - - def __le__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__le__(target) - - def __gt__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__gt__(target) - - def __ge__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__ge__(target) - - def __truediv__(self, other: Any) -> Path: - return Path(self._path / other) - - def __rtruediv__(self, other: Any) -> Path: - return Path(other) / self - - @property - def parts(self) -> tuple[str, ...]: - return self._path.parts - - @property - def drive(self) -> str: - return self._path.drive - - @property - def root(self) -> str: - return self._path.root - - @property - def anchor(self) -> str: - return self._path.anchor - - @property - def parents(self) -> Sequence[Path]: - return tuple(Path(p) for p in self._path.parents) - - @property - def parent(self) -> Path: - return Path(self._path.parent) - - @property - def name(self) -> str: - return self._path.name - - @property - def suffix(self) -> str: - return self._path.suffix - - @property - def suffixes(self) -> list[str]: - return self._path.suffixes - - @property - def stem(self) -> str: - return self._path.stem - - async def absolute(self) -> Path: - path = await to_thread.run_sync(self._path.absolute) - return Path(path) - - def as_posix(self) -> str: - return self._path.as_posix() - - def as_uri(self) -> str: - return self._path.as_uri() - - def match(self, path_pattern: str) -> bool: - return self._path.match(path_pattern) - - def is_relative_to(self, *other: str | PathLike[str]) -> bool: - try: - self.relative_to(*other) - return True - except ValueError: - return False - - async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: - func = partial(os.chmod, follow_symlinks=follow_symlinks) - return await to_thread.run_sync(func, self._path, mode) - - @classmethod - async def cwd(cls) -> Path: - path = await to_thread.run_sync(pathlib.Path.cwd) - return cls(path) - - async def exists(self) -> bool: - return await to_thread.run_sync(self._path.exists, cancellable=True) - - async def expanduser(self) -> Path: - return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True)) - - def glob(self, pattern: str) -> AsyncIterator[Path]: - gen = self._path.glob(pattern) - return _PathIterator(gen) - - async def group(self) -> str: - return await to_thread.run_sync(self._path.group, cancellable=True) - - async def hardlink_to(self, target: str | pathlib.Path | Path) -> None: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(os.link, target, self) - - @classmethod - async def home(cls) -> Path: - home_path = await to_thread.run_sync(pathlib.Path.home) - return cls(home_path) - - def is_absolute(self) -> bool: - return self._path.is_absolute() - - async def is_block_device(self) -> bool: - return await to_thread.run_sync(self._path.is_block_device, cancellable=True) - - async def is_char_device(self) -> bool: - return await to_thread.run_sync(self._path.is_char_device, cancellable=True) - - async def is_dir(self) -> bool: - return await to_thread.run_sync(self._path.is_dir, cancellable=True) - - async def is_fifo(self) -> bool: - return await to_thread.run_sync(self._path.is_fifo, cancellable=True) - - async def is_file(self) -> bool: - return await to_thread.run_sync(self._path.is_file, cancellable=True) - - async def is_mount(self) -> bool: - return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True) - - def is_reserved(self) -> bool: - return self._path.is_reserved() - - async def is_socket(self) -> bool: - return await to_thread.run_sync(self._path.is_socket, cancellable=True) - - async def is_symlink(self) -> bool: - return await to_thread.run_sync(self._path.is_symlink, cancellable=True) - - def iterdir(self) -> AsyncIterator[Path]: - gen = self._path.iterdir() - return _PathIterator(gen) - - def joinpath(self, *args: str | PathLike[str]) -> Path: - return Path(self._path.joinpath(*args)) - - async def lchmod(self, mode: int) -> None: - await to_thread.run_sync(self._path.lchmod, mode) - - async def lstat(self) -> os.stat_result: - return await to_thread.run_sync(self._path.lstat, cancellable=True) - - async def mkdir( - self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False - ) -> None: - await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) - - @overload - async def open( - self, - mode: OpenBinaryMode, - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - ) -> AsyncFile[bytes]: - ... - - @overload - async def open( - self, - mode: OpenTextMode = ..., - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - ) -> AsyncFile[str]: - ... - - async def open( - self, - mode: str = "r", - buffering: int = -1, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - ) -> AsyncFile[Any]: - fp = await to_thread.run_sync( - self._path.open, mode, buffering, encoding, errors, newline - ) - return AsyncFile(fp) - - async def owner(self) -> str: - return await to_thread.run_sync(self._path.owner, cancellable=True) - - async def read_bytes(self) -> bytes: - return await to_thread.run_sync(self._path.read_bytes) - - async def read_text( - self, encoding: str | None = None, errors: str | None = None - ) -> str: - return await to_thread.run_sync(self._path.read_text, encoding, errors) - - def relative_to(self, *other: str | PathLike[str]) -> Path: - return Path(self._path.relative_to(*other)) - - async def readlink(self) -> Path: - target = await to_thread.run_sync(os.readlink, self._path) - return Path(cast(str, target)) - - async def rename(self, target: str | pathlib.PurePath | Path) -> Path: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.rename, target) - return Path(target) - - async def replace(self, target: str | pathlib.PurePath | Path) -> Path: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.replace, target) - return Path(target) - - async def resolve(self, strict: bool = False) -> Path: - func = partial(self._path.resolve, strict=strict) - return Path(await to_thread.run_sync(func, cancellable=True)) - - def rglob(self, pattern: str) -> AsyncIterator[Path]: - gen = self._path.rglob(pattern) - return _PathIterator(gen) - - async def rmdir(self) -> None: - await to_thread.run_sync(self._path.rmdir) - - async def samefile( - self, other_path: str | bytes | int | pathlib.Path | Path - ) -> bool: - if isinstance(other_path, Path): - other_path = other_path._path - - return await to_thread.run_sync( - self._path.samefile, other_path, cancellable=True - ) - - async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: - func = partial(os.stat, follow_symlinks=follow_symlinks) - return await to_thread.run_sync(func, self._path, cancellable=True) - - async def symlink_to( - self, - target: str | pathlib.Path | Path, - target_is_directory: bool = False, - ) -> None: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) - - async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: - await to_thread.run_sync(self._path.touch, mode, exist_ok) - - async def unlink(self, missing_ok: bool = False) -> None: - try: - await to_thread.run_sync(self._path.unlink) - except FileNotFoundError: - if not missing_ok: - raise - - def with_name(self, name: str) -> Path: - return Path(self._path.with_name(name)) - - def with_stem(self, stem: str) -> Path: - return Path(self._path.with_name(stem + self._path.suffix)) - - def with_suffix(self, suffix: str) -> Path: - return Path(self._path.with_suffix(suffix)) - - async def write_bytes(self, data: bytes) -> int: - return await to_thread.run_sync(self._path.write_bytes, data) - - async def write_text( - self, - data: str, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - ) -> int: - # Path.write_text() does not support the "newline" parameter before Python 3.10 - def sync_write_text() -> int: - with self._path.open( - "w", encoding=encoding, errors=errors, newline=newline - ) as fp: - return fp.write(data) - - return await to_thread.run_sync(sync_write_text) - - -PathLike.register(Path) diff --git a/spaces/cihyFjudo/fairness-paper-search/VERIFIED Brevent App V3.0.7 [Patched] [Latest] The Ultimate Solution for Android App Management.md b/spaces/cihyFjudo/fairness-paper-search/VERIFIED Brevent App V3.0.7 [Patched] [Latest] The Ultimate Solution for Android App Management.md deleted file mode 100644 index d1dc70b3c2b3a8b5944cf99e4278705ca3a70026..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/VERIFIED Brevent App V3.0.7 [Patched] [Latest] The Ultimate Solution for Android App Management.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    However, our Dynamic Application Security Testing (DAST) analyzer included the vulnerable library, which we have patched in DAST v3.0.32. Self-managed customers that are using our built-in DAST CI template after 15.0 can get the latest release from registry.gitlab.com. If using the always pull policy the update will occur automatically. GitLab.com is already running the updated DAST scanner.

    -

    |VERIFIED| Brevent App V3.0.7 [Patched] [Latest]


    Download File 🗸 https://tinurli.com/2uwj6Z



    -

    The service pack provides a mechanism to patch Pristine installation 6.3.0 Fix Pack 7 install images so that you can get the latest prerequisite scanner definitions as well as patching the IBM Tivoli Monitoring V6.3.0.7 Base, Linux on System z® media so that you can install the Tivoli Enterprise Portal Server on newer OS versions on Linux for System z (See install steps below). In all cases after doing a pristine installation from patched 630 Fix Pack 7, you would still need to additionally apply the service pack. Also if you patch windows media, due to the update to Visual C++ runtimes, you must also apply the service pack after doing a pristine installation from patched 630 Fix Pack 7 media.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Wep dx 400 printer driver download for windows 7 Compatible with all Windows versions.md b/spaces/cihyFjudo/fairness-paper-search/Wep dx 400 printer driver download for windows 7 Compatible with all Windows versions.md deleted file mode 100644 index 283bad1719da25ae618bbd150cead3dc55bd61c5..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Wep dx 400 printer driver download for windows 7 Compatible with all Windows versions.md +++ /dev/null @@ -1,21 +0,0 @@ - -

    Are you looking for the latest Wipro WEP BOUNTI DR-400 driver package? If yes, then you have arrived at the right place, here, you will not only find the drivers for this printer, but also complete instructions for their proper installation.

    -

    On this page, we are sharing Wipro WEP BOUNTI DR-400 driver download links for Windows, Linux and Mac operating systems. The drivers shared below can be used as an alternative to the software CD drivers of this printer.

    -

    wep dx 400 printer driver download for windows 7


    DOWNLOAD ⇒⇒⇒ https://tinurli.com/2uwhAu



    -

    Although this Wipro printer is a receipt printer, but its installation process is similar to the printer driver installation process described in our guide. You must read our guide given below before installing your printer driver.

    -

    2) Finally, Microsoft released Windows 11 on July 2021. The drivers on this website are being updated with Windows 11 drivers; this will be finished shortly. You can start downloading the driver from its download page. July 14, 2021

    -

    The Wipro WeP TH 400 / 400+ is a retail billing printer from WeP Solutions Limited. This printer delivers its best performance when you install it using the Wipro WeP TH-400 driver CD. If you are unable to install the CD drivers, then you can install the latest Wipro WeP TH 400 driver pack.

    -

    Here, we are sharing Wipro WeP TH400 / TH400+ / 400II (GP-80250IVN) driver download link for Windows XP, Vista, 7, 8, 8.1, 10, 11, Server 2008, Server 2012 and Server 2003 for 32bit and 64bit versions, Linux and various Mac operating systems.

    -

    All the drivers of this printer available on the official Wipro website are fully compatible with this printer. For this reason, we are sharing the full feature Wipro WeP TH 400 driver download links of the official Wipro website on this page.

    -

    For the proper working of this printer, you must update its driver on a regular basis. Each new driver update of this printer released by its manufacturer has the least known bugs in it, which makes that update the most suitable support software for this printer.

    -

    The built-in Microsoft Windows Update service may not update your drivers properly. Instead, use The Printer Driver Update Utility for WeP (Wipro). It is intelligent software that automatically recognizes your computer's operating system and printer manufacturer and model to find the most up-to-date drivers for it. There is no risk of installing the wrong driver. The Printer Driver Update Utility downloads and installs your drivers quickly and easily.

    -

    -

    Click the Update button next to your driver. The correct version will be downloaded and installed automatically. Or, you can click the Update All button at the bottom to automatically download and install the correct version of all the drivers that are missing or out-of-date on your system.

    -

    To find the latest driver, including Windows 11 drivers, choose from our list of most popular WeP (Wipro) Printer downloads or search our driver archive for the driver that fits your specific WeP (Wipro) printer model and your PC's operating system.

    -

    After downloading your driver update, you will need to install it. Driver updates come in a variety of file formats with different file extensions. For example, you may have downloaded an EXE, INF, ZIP, or SYS file. Each file type has a slightly different installation procedure to follow. Visit our Driver Support Page to watch helpful step-by-step videos on how to install drivers based on their file extension.

    -

    To get the latest driver, including Windows 11 drivers, you can choose from the above list of most popular WeP (Wipro) downloads. Click the "Download driver" button next to the matching model name. After you complete your download, move on to Step 2.

    -

    Once you download your new driver, then you need to install it. To install a driver in Windows, you will need to use a built-in utility called Device Manager. It allows you to see all of the devices recognized by your system, and the drivers associated with them.

    -

    If you are having trouble installing your driver, you should use the Driver Update Utility for WeP (Wipro). It is a software utility that automatically finds, downloads and installs the right driver for your system. You can even backup your drivers before making any changes, and revert back in case there were any problems. You can safely update all of your drivers in just a few clicks. Once you download and run the utility, it will scan for out-of-date or missing drivers:

    -

    (a) On-line troubleshooting via a searchable Knowledgebase, answers to frequently asked questions, the latest driver and firmware downloads, and email support at www.cusa.canon.com/support. (b) Toll free telephone support at 1-800-OK-CANON (652-2666), Monday through Friday from 8:00 a.m. to 8:00 p.m. Eastern Time (excluding holidays).

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cleanmaster/so-vits-svc-akagi/inference/__init__.py b/spaces/cleanmaster/so-vits-svc-akagi/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageStat.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageStat.py deleted file mode 100644 index b7ebddf066ab6eb115a79d6bc34e31ab0c1569bd..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageStat.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# global image statistics -# -# History: -# 1996-04-05 fl Created -# 1997-05-21 fl Added mask; added rms, var, stddev attributes -# 1997-08-05 fl Added median -# 1998-07-05 hk Fixed integer overflow error -# -# Notes: -# This class shows how to implement delayed evaluation of attributes. -# To get a certain value, simply access the corresponding attribute. -# The __getattr__ dispatcher takes care of the rest. -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996-97. -# -# See the README file for information on usage and redistribution. -# - -import functools -import math -import operator - - -class Stat: - def __init__(self, image_or_list, mask=None): - try: - if mask: - self.h = image_or_list.histogram(mask) - else: - self.h = image_or_list.histogram() - except AttributeError: - self.h = image_or_list # assume it to be a histogram list - if not isinstance(self.h, list): - msg = "first argument must be image or list" - raise TypeError(msg) - self.bands = list(range(len(self.h) // 256)) - - def __getattr__(self, id): - """Calculate missing attribute""" - if id[:4] == "_get": - raise AttributeError(id) - # calculate missing attribute - v = getattr(self, "_get" + id)() - setattr(self, id, v) - return v - - def _getextrema(self): - """Get min/max values for each band in the image""" - - def minmax(histogram): - n = 255 - x = 0 - for i in range(256): - if histogram[i]: - n = min(n, i) - x = max(x, i) - return n, x # returns (255, 0) if there's no data in the histogram - - v = [] - for i in range(0, len(self.h), 256): - v.append(minmax(self.h[i:])) - return v - - def _getcount(self): - """Get total number of pixels in each layer""" - - v = [] - for i in range(0, len(self.h), 256): - v.append(functools.reduce(operator.add, self.h[i : i + 256])) - return v - - def _getsum(self): - """Get sum of all pixels in each layer""" - - v = [] - for i in range(0, len(self.h), 256): - layer_sum = 0.0 - for j in range(256): - layer_sum += j * self.h[i + j] - v.append(layer_sum) - return v - - def _getsum2(self): - """Get squared sum of all pixels in each layer""" - - v = [] - for i in range(0, len(self.h), 256): - sum2 = 0.0 - for j in range(256): - sum2 += (j**2) * float(self.h[i + j]) - v.append(sum2) - return v - - def _getmean(self): - """Get average pixel level for each layer""" - - v = [] - for i in self.bands: - v.append(self.sum[i] / self.count[i]) - return v - - def _getmedian(self): - """Get median pixel level for each layer""" - - v = [] - for i in self.bands: - s = 0 - half = self.count[i] // 2 - b = i * 256 - for j in range(256): - s = s + self.h[b + j] - if s > half: - break - v.append(j) - return v - - def _getrms(self): - """Get RMS for each layer""" - - v = [] - for i in self.bands: - v.append(math.sqrt(self.sum2[i] / self.count[i])) - return v - - def _getvar(self): - """Get variance for each layer""" - - v = [] - for i in self.bands: - n = self.count[i] - v.append((self.sum2[i] - (self.sum[i] ** 2.0) / n) / n) - return v - - def _getstddev(self): - """Get standard deviation for each layer""" - - v = [] - for i in self.bands: - v.append(math.sqrt(self.var[i])) - return v - - -Global = Stat # compatibility diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/cython.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/cython.py deleted file mode 100644 index 2a42d94a3591e0e8e47f184b303e4aec0a6337ef..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/cython.py +++ /dev/null @@ -1,27 +0,0 @@ -""" Exports a no-op 'cython' namespace similar to -https://github.com/cython/cython/blob/master/Cython/Shadow.py - -This allows to optionally compile @cython decorated functions -(when cython is available at built time), or run the same code -as pure-python, without runtime dependency on cython module. - -We only define the symbols that we use. E.g. see fontTools.cu2qu -""" - -from types import SimpleNamespace - - -def _empty_decorator(x): - return x - - -compiled = False - -for name in ("double", "complex", "int"): - globals()[name] = None - -for name in ("cfunc", "inline"): - globals()[name] = _empty_decorator - -locals = lambda **_: _empty_decorator -returns = lambda _: _empty_decorator diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c deleted file mode 100644 index dae223240398fd2c03501d7a29748335dfc33796..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2017 Google Inc. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define BPP 12 -#define INIT_FUNC ff_vp9dsp_init_12bpp_aarch64 -#include "vp9dsp_init_16bpp_aarch64_template.c" diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/decode.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/decode.h deleted file mode 100644 index 8430ffbd66484de6ebfd0cdf36e821d40e74ee4b..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/decode.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * generic decoding-related code - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DECODE_H -#define AVCODEC_DECODE_H - -#include "libavutil/buffer.h" -#include "libavutil/frame.h" -#include "libavutil/hwcontext.h" - -#include "avcodec.h" - -/** - * This struct stores per-frame lavc-internal data and is attached to it via - * private_ref. - */ -typedef struct FrameDecodeData { - /** - * The callback to perform some delayed processing on the frame right - * before it is returned to the caller. - * - * @note This code is called at some unspecified point after the frame is - * returned from the decoder's decode/receive_frame call. Therefore it cannot rely - * on AVCodecContext being in any specific state, so it does not get to - * access AVCodecContext directly at all. All the state it needs must be - * stored in the post_process_opaque object. - */ - int (*post_process)(void *logctx, AVFrame *frame); - void *post_process_opaque; - void (*post_process_opaque_free)(void *opaque); - - /** - * Per-frame private data for hwaccels. - */ - void *hwaccel_priv; - void (*hwaccel_priv_free)(void *priv); -} FrameDecodeData; - -/** - * avcodec_receive_frame() implementation for decoders. - */ -int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame); - -/** - * Called by decoders to get the next packet for decoding. - * - * @param pkt An empty packet to be filled with data. - * @return 0 if a new reference has been successfully written to pkt - * AVERROR(EAGAIN) if no data is currently available - * AVERROR_EOF if and end of stream has been reached, so no more data - * will be available - */ -int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt); - -/** - * Set various frame properties from the provided packet. - */ -int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx, - AVFrame *frame, const AVPacket *pkt); - -/** - * Set various frame properties from the codec context / packet data. - */ -int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame); - -/** - * Make sure avctx.hw_frames_ctx is set. If it's not set, the function will - * try to allocate it from hw_device_ctx. If that is not possible, an error - * message is printed, and an error code is returned. - */ -int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, - enum AVHWDeviceType dev_type); - -int ff_attach_decode_data(AVFrame *frame); - -/** - * Check whether the side-data of src contains a palette of - * size AVPALETTE_SIZE; if so, copy it to dst and return 1; - * else return 0. - * Also emit an error message upon encountering a palette - * with invalid size. - */ -int ff_copy_palette(void *dst, const AVPacket *src, void *logctx); - -/** - * Perform decoder initialization and validation. - * Called when opening the decoder, before the FFCodec.init() call. - */ -int ff_decode_preinit(AVCodecContext *avctx); - -/** - * Check that the provided frame dimensions are valid and set them on the codec - * context. - */ -int ff_set_dimensions(AVCodecContext *s, int width, int height); - -/** - * Check that the provided sample aspect ratio is valid and set it on the codec - * context. - */ -int ff_set_sar(AVCodecContext *avctx, AVRational sar); - -/** - * Select the (possibly hardware accelerated) pixel format. - * This is a wrapper around AVCodecContext.get_format() and should be used - * instead of calling get_format() directly. - * - * The list of pixel formats must contain at least one valid entry, and is - * terminated with AV_PIX_FMT_NONE. If it is possible to decode to software, - * the last entry in the list must be the most accurate software format. - * If it is not possible to decode to software, AVCodecContext.sw_pix_fmt - * must be set before calling this function. - */ -int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt); - -/** - * Get a buffer for a frame. This is a wrapper around - * AVCodecContext.get_buffer() and should be used instead calling get_buffer() - * directly. - */ -int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags); - -#define FF_REGET_BUFFER_FLAG_READONLY 1 ///< the returned buffer does not need to be writable -/** - * Identical in function to ff_get_buffer(), except it reuses the existing buffer - * if available. - */ -int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags); - -/** - * Add or update AV_FRAME_DATA_MATRIXENCODING side data. - */ -int ff_side_data_update_matrix_encoding(AVFrame *frame, - enum AVMatrixEncoding matrix_encoding); - -#endif /* AVCODEC_DECODE_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libtwolame.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libtwolame.c deleted file mode 100644 index 9c0156aa2577df13b81b5866b170b87485b8e043..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libtwolame.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Interface to libtwolame for mp2 encoding - * Copyright (c) 2012 Paul B Mahol - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Interface to libtwolame for mp2 encoding. - */ - -#include - -#include "libavutil/channel_layout.h" -#include "libavutil/common.h" -#include "libavutil/opt.h" - -#include "avcodec.h" -#include "codec_internal.h" -#include "encode.h" -#include "mpegaudio.h" - -typedef struct TWOLAMEContext { - AVClass *class; - int mode; - int psymodel; - int energy; - int error_protection; - int copyright; - int original; - int verbosity; - - twolame_options *glopts; - int64_t next_pts; -} TWOLAMEContext; - -static av_cold int twolame_encode_close(AVCodecContext *avctx) -{ - TWOLAMEContext *s = avctx->priv_data; - twolame_close(&s->glopts); - return 0; -} - -static av_cold int twolame_encode_init(AVCodecContext *avctx) -{ - TWOLAMEContext *s = avctx->priv_data; - int ret; - - avctx->frame_size = TWOLAME_SAMPLES_PER_FRAME; - avctx->initial_padding = 512 - 32 + 1; - - s->glopts = twolame_init(); - if (!s->glopts) - return AVERROR(ENOMEM); - - twolame_set_verbosity(s->glopts, s->verbosity); - twolame_set_mode(s->glopts, s->mode); - twolame_set_psymodel(s->glopts, s->psymodel); - twolame_set_energy_levels(s->glopts, s->energy); - twolame_set_error_protection(s->glopts, s->error_protection); - twolame_set_copyright(s->glopts, s->copyright); - twolame_set_original(s->glopts, s->original); - - twolame_set_num_channels(s->glopts, avctx->ch_layout.nb_channels); - twolame_set_in_samplerate(s->glopts, avctx->sample_rate); - twolame_set_out_samplerate(s->glopts, avctx->sample_rate); - - if (!avctx->bit_rate) { - if ((s->mode == TWOLAME_AUTO_MODE && avctx->ch_layout.nb_channels == 1) || s->mode == TWOLAME_MONO) - avctx->bit_rate = avctx->sample_rate < 28000 ? 80000 : 192000; - else - avctx->bit_rate = avctx->sample_rate < 28000 ? 160000 : 384000; - } - - if (avctx->flags & AV_CODEC_FLAG_QSCALE || !avctx->bit_rate) { - twolame_set_VBR(s->glopts, TRUE); - twolame_set_VBR_level(s->glopts, - avctx->global_quality / (float) FF_QP2LAMBDA); - av_log(avctx, AV_LOG_WARNING, - "VBR in MP2 is a hack, use another codec that supports it.\n"); - } else { - twolame_set_bitrate(s->glopts, avctx->bit_rate / 1000); - } - - ret = twolame_init_params(s->glopts); - if (ret) { - twolame_encode_close(avctx); - return AVERROR_UNKNOWN; - } - - return 0; -} - -static int twolame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr) -{ - TWOLAMEContext *s = avctx->priv_data; - int ret; - - if ((ret = ff_alloc_packet(avctx, avpkt, MPA_MAX_CODED_FRAME_SIZE)) < 0) - return ret; - - if (frame) { - switch (avctx->sample_fmt) { - case AV_SAMPLE_FMT_FLT: - ret = twolame_encode_buffer_float32_interleaved(s->glopts, - (const float *)frame->data[0], - frame->nb_samples, - avpkt->data, - avpkt->size); - break; - case AV_SAMPLE_FMT_FLTP: - ret = twolame_encode_buffer_float32(s->glopts, - (const float *)frame->data[0], - (const float *)frame->data[1], - frame->nb_samples, - avpkt->data, avpkt->size); - break; - case AV_SAMPLE_FMT_S16: - ret = twolame_encode_buffer_interleaved(s->glopts, - (const short int *)frame->data[0], - frame->nb_samples, - avpkt->data, avpkt->size); - break; - case AV_SAMPLE_FMT_S16P: - ret = twolame_encode_buffer(s->glopts, - (const short int *)frame->data[0], - (const short int *)frame->data[1], - frame->nb_samples, - avpkt->data, avpkt->size); - break; - default: - av_log(avctx, AV_LOG_ERROR, - "Unsupported sample format %d.\n", avctx->sample_fmt); - return AVERROR_BUG; - } - } else { - ret = twolame_encode_flush(s->glopts, avpkt->data, avpkt->size); - } - - if (!ret) // no bytes written - return 0; - if (ret < 0) // twolame error - return AVERROR_UNKNOWN; - - if (frame) { - avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples); - if (frame->pts != AV_NOPTS_VALUE) - avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding); - } else { - avpkt->pts = s->next_pts; - } - // this is for setting pts for flushed packet(s). - if (avpkt->pts != AV_NOPTS_VALUE) - s->next_pts = avpkt->pts + avpkt->duration; - - av_shrink_packet(avpkt, ret); - *got_packet_ptr = 1; - return 0; -} - -#define OFFSET(x) offsetof(TWOLAMEContext, x) -#define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { - { "mode", "Mpeg Mode", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = TWOLAME_AUTO_MODE }, TWOLAME_AUTO_MODE, TWOLAME_MONO, AE, "mode"}, - { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TWOLAME_AUTO_MODE }, 0, 0, AE, "mode" }, - { "stereo", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TWOLAME_STEREO }, 0, 0, AE, "mode" }, - { "joint_stereo", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TWOLAME_JOINT_STEREO }, 0, 0, AE, "mode" }, - { "dual_channel", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TWOLAME_DUAL_CHANNEL }, 0, 0, AE, "mode" }, - { "mono", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TWOLAME_MONO }, 0, 0, AE, "mode" }, - { "psymodel", "Psychoacoustic Model", OFFSET(psymodel), AV_OPT_TYPE_INT, { .i64 = 3 }, -1, 4, AE}, - { "energy_levels","enable energy levels", OFFSET(energy), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AE}, - { "error_protection","enable CRC error protection", OFFSET(error_protection), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AE}, - { "copyright", "set MPEG Audio Copyright flag", OFFSET(copyright), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AE}, - { "original", "set MPEG Audio Original flag", OFFSET(original), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AE}, - { "verbosity", "set library optput level (0-10)", OFFSET(verbosity), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 10, AE}, - { NULL }, -}; - -static const AVClass twolame_class = { - .class_name = "libtwolame encoder", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -static const FFCodecDefault twolame_defaults[] = { - { "b", "0" }, - { NULL }, -}; - -static const int twolame_samplerates[] = { - 16000, 22050, 24000, 32000, 44100, 48000, 0 -}; - -const FFCodec ff_libtwolame_encoder = { - .p.name = "libtwolame", - CODEC_LONG_NAME("libtwolame MP2 (MPEG audio layer 2)"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_MP2, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, - .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, - .priv_data_size = sizeof(TWOLAMEContext), - .init = twolame_encode_init, - FF_CODEC_ENCODE_CB(twolame_encode_frame), - .close = twolame_encode_close, - .defaults = twolame_defaults, - .p.priv_class = &twolame_class, - .p.sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, - AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_S16, - AV_SAMPLE_FMT_S16P, - AV_SAMPLE_FMT_NONE - }, - CODEC_OLD_CHANNEL_LAYOUTS(AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO) - .p.ch_layouts = (const AVChannelLayout[]) { - AV_CHANNEL_LAYOUT_MONO, - AV_CHANNEL_LAYOUT_STEREO, - { 0 }, - }, - .p.supported_samplerates = twolame_samplerates, - .p.wrapper_name = "libtwolame", -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/h264_deblock_lasx.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/h264_deblock_lasx.c deleted file mode 100644 index c89bea9a8464ca3556208c09cca63e683d47554e..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/h264_deblock_lasx.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2021 Loongson Technology Corporation Limited - * Contributed by Xiwei Gu - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavcodec/bit_depth_template.c" -#include "h264dsp_lasx.h" -#include "libavutil/loongarch/loongson_intrinsics.h" - -#define H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(edges, step, mask_mv, dir, \ - d_idx, mask_dir) \ -do { \ - int b_idx = 0; \ - int step_x4 = step << 2; \ - int d_idx_12 = d_idx + 12; \ - int d_idx_52 = d_idx + 52; \ - int d_idx_x4 = d_idx << 2; \ - int d_idx_x4_48 = d_idx_x4 + 48; \ - int dir_x32 = dir * 32; \ - uint8_t *ref_t = (uint8_t*)ref; \ - uint8_t *mv_t = (uint8_t*)mv; \ - uint8_t *nnz_t = (uint8_t*)nnz; \ - uint8_t *bS_t = (uint8_t*)bS; \ - mask_mv <<= 3; \ - for (; b_idx < edges; b_idx += step) { \ - out &= mask_dir; \ - if (!(mask_mv & b_idx)) { \ - if (bidir) { \ - ref2 = __lasx_xvldx(ref_t, d_idx_12); \ - ref3 = __lasx_xvldx(ref_t, d_idx_52); \ - ref0 = __lasx_xvld(ref_t, 12); \ - ref1 = __lasx_xvld(ref_t, 52); \ - ref2 = __lasx_xvilvl_w(ref3, ref2); \ - ref0 = __lasx_xvilvl_w(ref0, ref0); \ - ref1 = __lasx_xvilvl_w(ref1, ref1); \ - ref3 = __lasx_xvshuf4i_w(ref2, 0xB1); \ - ref0 = __lasx_xvsub_b(ref0, ref2); \ - ref1 = __lasx_xvsub_b(ref1, ref3); \ - ref0 = __lasx_xvor_v(ref0, ref1); \ -\ - tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48); \ - tmp3 = __lasx_xvld(mv_t, 48); \ - tmp4 = __lasx_xvld(mv_t, 208); \ - tmp5 = __lasx_xvld(mv_t + d_idx_x4, 208); \ - DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp2, 0x20, tmp5, tmp5, \ - 0x20, tmp2, tmp5); \ - tmp3 = __lasx_xvpermi_q(tmp4, tmp3, 0x20); \ - tmp2 = __lasx_xvsub_h(tmp2, tmp3); \ - tmp5 = __lasx_xvsub_h(tmp5, tmp3); \ - DUP2_ARG2(__lasx_xvsat_h, tmp2, 7, tmp5, 7, tmp2, tmp5); \ - tmp0 = __lasx_xvpickev_b(tmp5, tmp2); \ - tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \ - tmp0 = __lasx_xvadd_b(tmp0, cnst_1); \ - tmp0 = __lasx_xvssub_bu(tmp0, cnst_0); \ - tmp0 = __lasx_xvsat_h(tmp0, 7); \ - tmp0 = __lasx_xvpickev_b(tmp0, tmp0); \ - tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \ - tmp1 = __lasx_xvpickod_d(tmp0, tmp0); \ - out = __lasx_xvor_v(ref0, tmp0); \ - tmp1 = __lasx_xvshuf4i_w(tmp1, 0xB1); \ - out = __lasx_xvor_v(out, tmp1); \ - tmp0 = __lasx_xvshuf4i_w(out, 0xB1); \ - out = __lasx_xvmin_bu(out, tmp0); \ - } else { \ - ref0 = __lasx_xvldx(ref_t, d_idx_12); \ - ref3 = __lasx_xvld(ref_t, 12); \ - tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48); \ - tmp3 = __lasx_xvld(mv_t, 48); \ - tmp4 = __lasx_xvsub_h(tmp3, tmp2); \ - tmp1 = __lasx_xvsat_h(tmp4, 7); \ - tmp1 = __lasx_xvpickev_b(tmp1, tmp1); \ - tmp1 = __lasx_xvadd_b(tmp1, cnst_1); \ - out = __lasx_xvssub_bu(tmp1, cnst_0); \ - out = __lasx_xvsat_h(out, 7); \ - out = __lasx_xvpickev_b(out, out); \ - ref0 = __lasx_xvsub_b(ref3, ref0); \ - out = __lasx_xvor_v(out, ref0); \ - } \ - } \ - tmp0 = __lasx_xvld(nnz_t, 12); \ - tmp1 = __lasx_xvldx(nnz_t, d_idx_12); \ - tmp0 = __lasx_xvor_v(tmp0, tmp1); \ - tmp0 = __lasx_xvmin_bu(tmp0, cnst_2); \ - out = __lasx_xvmin_bu(out, cnst_2); \ - tmp0 = __lasx_xvslli_h(tmp0, 1); \ - tmp0 = __lasx_xvmax_bu(out, tmp0); \ - tmp0 = __lasx_vext2xv_hu_bu(tmp0); \ - __lasx_xvstelm_d(tmp0, bS_t + dir_x32, 0, 0); \ - ref_t += step; \ - mv_t += step_x4; \ - nnz_t += step; \ - bS_t += step; \ - } \ -} while(0) - -void ff_h264_loop_filter_strength_lasx(int16_t bS[2][4][4], uint8_t nnz[40], - int8_t ref[2][40], int16_t mv[2][40][2], - int bidir, int edges, int step, - int mask_mv0, int mask_mv1, int field) -{ - __m256i out; - __m256i ref0, ref1, ref2, ref3; - __m256i tmp0, tmp1; - __m256i tmp2, tmp3, tmp4, tmp5; - __m256i cnst_0, cnst_1, cnst_2; - __m256i zero = __lasx_xvldi(0); - __m256i one = __lasx_xvnor_v(zero, zero); - int64_t cnst3 = 0x0206020602060206, cnst4 = 0x0103010301030103; - if (field) { - cnst_0 = __lasx_xvreplgr2vr_d(cnst3); - cnst_1 = __lasx_xvreplgr2vr_d(cnst4); - cnst_2 = __lasx_xvldi(0x01); - } else { - DUP2_ARG1(__lasx_xvldi, 0x06, 0x03, cnst_0, cnst_1); - cnst_2 = __lasx_xvldi(0x01); - } - step <<= 3; - edges <<= 3; - - H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(edges, step, mask_mv1, - 1, -8, zero); - H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(32, 8, mask_mv0, 0, -1, one); - - DUP2_ARG2(__lasx_xvld, (int8_t*)bS, 0, (int8_t*)bS, 16, tmp0, tmp1); - DUP2_ARG2(__lasx_xvilvh_d, tmp0, tmp0, tmp1, tmp1, tmp2, tmp3); - LASX_TRANSPOSE4x4_H(tmp0, tmp2, tmp1, tmp3, tmp2, tmp3, tmp4, tmp5); - __lasx_xvstelm_d(tmp2, (int8_t*)bS, 0, 0); - __lasx_xvstelm_d(tmp3, (int8_t*)bS + 8, 0, 0); - __lasx_xvstelm_d(tmp4, (int8_t*)bS + 16, 0, 0); - __lasx_xvstelm_d(tmp5, (int8_t*)bS + 24, 0, 0); -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/CarX Drift Racing Mod Unlock Everything with One Click.md b/spaces/congsaPfin/Manga-OCR/logs/CarX Drift Racing Mod Unlock Everything with One Click.md deleted file mode 100644 index 01ede7be9770e2a23ccb9b3e5f7f2e6cb5a18fd3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/CarX Drift Racing Mod Unlock Everything with One Click.md +++ /dev/null @@ -1,96 +0,0 @@ -
    -

    CarX Drift Racing Mod APK: The Ultimate Drifting Experience on Android

    -

    If you are a fan of drifting and racing games, you must have heard of CarX Drift Racing. It is one of the most popular and realistic drifting games on Android, with over 50 million downloads on Google Play. In this game, you can experience the thrill of drifting on various tracks, with different cars and settings. You can also compete with other players online, or challenge yourself in the career mode. But what if you want to enjoy the game without any limitations or restrictions? That's where CarX Drift Racing Mod APK comes in handy. In this article, we will tell you everything you need to know about CarX Drift Racing Mod APK, including its features, benefits, and how to download and install it on your device.

    -

    What is CarX Drift Racing?

    -

    CarX Drift Racing is a racing game developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics. The game was released in 2014, and since then, it has received many updates and improvements. The game features over 100 cars, from sports cars to muscle cars, that you can customize and upgrade according to your preferences. You can also choose from over 40 tracks, each with its own unique layout and scenery. The game has a realistic physics engine that simulates the behavior of the cars on different surfaces, such as asphalt, grass, sand, or snow. You can control the cars using various options, such as tilt, buttons, or steering wheel. The game also has a dynamic sound system that makes you feel like you are in a real car.

    -

    carx apk mod


    Download Zip >>> https://urlca.com/2uO6ho



    -

    Features of CarX Drift Racing

    -

    CarX Drift Racing has many features that make it stand out from other racing games. Here are some of them:

    -

    Realistic physics and graphics

    -

    The game uses a sophisticated physics engine that accurately reproduces the dynamics of drifting. You can feel the weight of the car, the traction of the tires, the inertia of the body, and the effect of the wind. You can also adjust the parameters of the car, such as suspension, engine power, brake force, steering angle, and more. The game also has stunning graphics that create a realistic atmosphere. You can see the smoke from the tires, the sparks from the collisions, the reflections from the sun, and the shadows from the objects. You can also change the time of day and weather conditions to suit your mood.

    -

    Customizable cars and tracks

    -

    The game offers a wide range of cars that you can customize and upgrade. You can change the color, paint job, wheels, spoilers, exhausts, and more. You can also tune the performance of the car by modifying the engine, transmission, turbo, nitro, brakes, and more. You can also create your own tracks using the track editor. You can design the layout, add obstacles, ramps, bridges, tunnels, and more. You can also share your tracks with other players online.

    -

    Online and offline modes

    -

    The game has both online and offline modes that you can enjoy. In the online mode, you can compete with other players from around the world in various events and tournaments. You can also join or create your own club and chat with other members. In the offline mode, you can play in the career mode or in single races. In the career mode, you have to complete various missions and challenges to earn money and reputation. In single races, you can practice your skills and have fun. You can also watch replays of your races and share them with your friends.

    -

    Why download CarX Drift Racing Mod APK?

    -

    CarX Drift Racing is a great game, but it also has some drawbacks. For example, you need to spend a lot of money and gold to unlock new cars and tracks, or to upgrade and customize your existing ones. You also have to watch ads to get some extra rewards or to skip some waiting time. Moreover, you need to have a rooted device to install some mods or cheats. These things can ruin your gaming experience and make you frustrated. That's why you should download CarX Drift Racing Mod APK instead. CarX Drift Racing Mod APK is a modified version of the original game that gives you unlimited access to all the features and content of the game, without any limitations or restrictions. You can enjoy the game to the fullest, without spending any money or watching any ads. You also don't need to root your device to install it.

    -

    Benefits of CarX Drift Racing Mod APK

    -

    CarX Drift Racing Mod APK has many benefits that make it better than the original game. Here are some of them:

    -

    Unlimited money and gold

    -

    With CarX Drift Racing Mod APK, you don't have to worry about running out of money or gold. You can get unlimited amounts of both currencies, which you can use to buy new cars and tracks, or to upgrade and customize your existing ones. You can also use them to unlock new features and modes in the game. You can have the best cars and tracks in the game, without spending a dime.

    -

    carx drift racing mod apk unlimited money
    -carx highway racing mod apk download
    -carx drift racing 2 mod apk latest version
    -carx drift racing mod apk obb
    -carx highway racing mod apk android 1
    -carx drift racing 2 mod apk revdl
    -carx drift racing mod apk rexdl
    -carx highway racing mod apk offline
    -carx drift racing 2 mod apk unlimited gold
    -carx drift racing mod apk all cars unlocked
    -carx highway racing mod apk hack
    -carx drift racing 2 mod apk android republic
    -carx drift racing mod apk an1
    -carx highway racing mod apk data
    -carx drift racing 2 mod apk happymod
    -carx drift racing mod apk pure
    -carx highway racing mod apk free shopping
    -carx drift racing 2 mod apk obb download
    -carx drift racing mod apk ios
    -carx highway racing mod apk unlimited money and gold
    -carx drift racing 2 mod apk ios
    -carx drift racing mod apk old version
    -carx highway racing mod apk latest version
    -carx drift racing 2 mod apk offline
    -carx drift racing mod apk new version
    -carx highway racing mod apk rexdl
    -carx drift racing 2 mod apk all cars unlocked
    -carx drift racing mod apk no root
    -carx highway racing mod apk unlimited everything
    -carx drift racing 2 mod apk no root
    -carx drift racing mod apk online
    -carx highway racing mod apk obb download
    -carx drift racing 2 mod apk online
    -carx drift racing mod apk unlimited coins and gold
    -carx highway racing mod apk pure
    -carx drift racing 2 mod apk unlimited coins and gold
    -carx drift racing mod apk hack download
    -carx highway racing mod apk revdl
    -carx drift racing 2 mod apk hack download
    -carx drift racing mod apk android republic
    -carx highway racing mod apk happymod
    -carx drift racing 2 mod apk data download
    -carx drift racing mod apk data download
    -carx highway racing mod apk an1
    -carx drift racing 2 mod apk free shopping
    -carx drift racing mod apk free shopping
    -carx highway racing mod apk ios
    -carx drift racing 2 mod apk old version

    -

    Unlocked all cars and tracks

    -

    With CarX Drift Racing Mod APK, you don't have to wait for hours or days to unlock new cars and tracks. You can get access to all the cars and tracks in the game, right from the start. You can choose from over 100 cars, from sports cars to muscle cars, and from over 40 tracks, each with its own unique layout and scenery. You can also create your own tracks using the track editor. You can enjoy the variety and diversity of the game, without any limitations.

    -

    No ads and root required

    -

    With CarX Drift Racing Mod APK, you don't have to watch annoying ads that interrupt your gameplay or waste your time. You can play the game smoothly and seamlessly, without any distractions or interruptions. You also don't need to root your device to install the mod apk. You can install it easily and safely, without risking any damage or loss of data on your device.

    -

    How to download and install CarX Drift Racing Mod APK?

    -

    Downloading and installing CarX Drift Racing Mod APK is very simple and easy. Just follow these steps:

    -

    Step 1: Download the APK file from a trusted source

    -

    The first thing you need to do is to download the APK file of CarX Drift Racing Mod APK from a reliable and secure source. You can use the link below to download it directly from our website. The file is 100% safe and virus-free, so you don't have to worry about anything.

    -

    Step 2: Enable unknown sources on your device

    -

    The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.

    -

    Step 3: Install the APK file and enjoy the game

    -

    The final thing you need to do is to install the APK file on your device. To do this, locate the file in your downloads folder, tap on it, and follow the instructions on the screen. Once the installation is complete, you can launch the game and enjoy it.

    -

    Conclusion

    -

    CarX Drift Racing is one of the best drifting games on Android, with realistic physics and graphics, customizable cars and tracks, online and offline modes, and more. However, if you want to enjoy the game without any limitations or restrictions, you should download CarX Drift Racing Mod APK instead. CarX Drift Racing Mod APK gives you unlimited money and gold, unlocked all cars and tracks, no ads and root required, and more. You can download it from our website for free, and install it on your device easily and safely. So what are you waiting for? Download CarX Drift Racing Mod APK now and experience the ultimate drifting experience on Android.

    -

    Frequently Asked Questions

    -

    Here are some of the most common questions that people ask about CarX Drift Racing Mod APK and their answers:

    -

    Q: Is CarX Drift Racing Mod APK safe to use?

    -

    A: Yes, CarX Drift Racing Mod APK is safe to use, as long as you download it from a trusted source like our website. The file is 100% virus-free and does not contain any malware or spyware. You can also scan it with any antivirus app before installing it.

    -

    Q: Does CarX Drift Racing Mod APK work on all Android devices?

    -

    A: Yes, CarX Drift Racing Mod APK works on all Android devices that support the original game. The minimum requirements are Android 4.1 and above, and 1 GB of RAM. However, some devices may experience lag or crashes due to low performance or compatibility issues.

    -

    Q: How can I update CarX Drift Racing Mod APK?

    -

    A: To update CarX Drift Racing Mod APK, you need to download the latest version of the mod apk from our website and install it over the existing one. You don't need to uninstall the previous version or lose your progress. However, you should always backup your data before updating, just in case something goes wrong.

    -

    Q: Can I play CarX Drift Racing Mod APK online with other players?

    -

    A: Yes, you can play CarX Drift Racing Mod APK online with other players, but only with those who have the same mod apk as you. You cannot play with players who have the original game or a different mod apk, as they will have different features and settings. You can also join or create your own club and chat with other members.

    -

    Q: Can I get banned for using CarX Drift Racing Mod APK?

    -

    A: No, you cannot get banned for using CarX Drift Racing Mod APK, as the game does not have any anti-cheat system or detection mechanism. You can use the mod apk without any risk or fear of getting banned. However, you should not abuse the mod apk or use it to cheat or harass other players, as that would be unfair and unethical.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Online Word and Enjoy the Familiar Microsoft 365 Experience.md b/spaces/congsaPfin/Manga-OCR/logs/Download Online Word and Enjoy the Familiar Microsoft 365 Experience.md deleted file mode 100644 index f83fc1a0dff9e9ecc243270b9f38be8c0c463442..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Online Word and Enjoy the Familiar Microsoft 365 Experience.md +++ /dev/null @@ -1,146 +0,0 @@ -
    -

    How to Download Online Word for Free

    -

    Online Word is a web-based version of Microsoft Word that allows you to create, edit, and share documents online. It is part of the Microsoft 365 suite of online productivity tools that also includes Excel, PowerPoint, OneNote, and more. In this article, you will learn what Online Word is, how to access it for free, how to download Online Word documents, and how to edit them offline.

    -

    download online word


    Download File ··· https://urlca.com/2uO7gd



    -

    What is Online Word?

    -

    Online Word is a cloud-based word processor that lets you work on your documents from any device with an internet connection. You can use Online Word to create resumes, newsletters, reports, essays, and other types of documents. You can also collaborate with others in real time and co-author documents with your team members or classmates.

    -

    The benefits of using Online Word

    -

    Some of the benefits of using Online Word are:

    -
      -
    • It is free to use and does not require installation or subscription.
    • -
    • It automatically saves your changes and syncs them across your devices.
    • -
    • It offers familiar Microsoft Word features and functions, such as formatting tools, templates, spell check, grammar check, and more.
    • -
    • It supports various file formats, such as DOCX, PDF, ODT, HTML, and more.
    • -
    • It integrates with other Microsoft 365 apps and services, such as OneDrive, Outlook, Teams, SharePoint, and more.
    • -
    -

    The features of Online Word

    -

    Some of the features of Online Word are:

    -
      -
    • It allows you to create and edit documents online using a web browser.
    • -
    • It lets you share your documents with anyone and control their access permissions.
    • -
    • It enables you to co-author documents with others and see their changes as they happen.
    • -
    • It provides you with free Word templates for various purposes and occasions.
    • -
    • It gives you access to free add-ins that enhance your productivity and creativity.
    • -
    -

    How to access Online Word for free

    -

    There are two main ways to access Online Word for free:

    -

    Using Microsoft 365 Online

    -

    You can use Microsoft 365 Online to access Online Word and other Microsoft 365 apps for free on the web. To do this, follow these steps:

    -
      -
    1. Go to Microsoft365.com.
    2. -
    3. Sign in with your Microsoft account or create one for free.
    4. -
    5. Select the app launcher icon in the upper left corner and choose Word.
    6. -
    7. Create a new document or open an existing one from OneDrive or your computer.
    8. -
    -

    Using Office.com

    -

    You can also use Office.com to access Online Word and other Office apps for free on the web. To do this, follow these steps:

    -

    download free online word processor
    -download online word to pdf converter
    -download online word document editor
    -download online word games for pc
    -download online word resume templates
    -download online word art generator
    -download online word cloud maker
    -download online word file viewer
    -download online word search puzzles
    -download online word count tool
    -download online word collage creator
    -download online word scramble solver
    -download online word calendar maker
    -download online word invoice template
    -download online word bingo game
    -download online word crossword clue
    -download online word certificate maker
    -download online word flyer template
    -download online word quiz maker
    -download online word brochure template
    -download online word logo maker
    -download online word letterhead template
    -download online word trivia game
    -download online word newsletter template
    -download online word poster maker
    -download online word label template
    -download online word jumble game
    -download online word report template
    -download online word banner maker
    -download online word envelope template
    -download online word anagram solver
    -download online word proposal template
    -download online word sticker maker
    -download online word card template
    -download online word sudoku game
    -download online word contract template
    -download online word badge maker
    -download online word invitation template
    -download online word crossword game
    -download online word memo template
    -download online word timeline maker
    -download online word cover letter template
    -download online word hangman game
    -download online word checklist template
    -download online word mind map maker
    -download online word business card template

    -
      -
    1. Go to Office.com.
    2. -
    3. Sign in with your Microsoft account or create one for free.
    4. -
    5. Select the app launcher icon in the upper left corner and choose Word.
    6. -
    7. Create a new document or open an existing one from OneDrive or your computer.
    8. -
    -

    How to download Online Word documents

    -

    If you want to download your Online Word documents to your device, you need to save them to OneDrive first. OneDrive is a cloud storage service that lets you store and access your files online. You can also use OneDrive to share your files with others and sync them across your devices. Here is how to save and download your Online Word documents:

    -

    Saving documents to OneDriveSaving documents to OneDrive

    -

    To save your Online Word documents to OneDrive, follow these steps:

    -
      -
    1. While working on your document, click the File tab in the upper left corner.
    2. -
    3. Select Save As and choose OneDrive - Personal.
    4. -
    5. Enter a name for your document and click Save.
    6. -
    7. Your document will be saved to your OneDrive account and you can access it from any device.
    8. -
    -

    Downloading documents from OneDrive

    -

    To download your Online Word documents from OneDrive, follow these steps:

    -
      -
    1. Go to OneDrive.com and sign in with your Microsoft account.
    2. -
    3. Find the document you want to download and select it.
    4. -
    5. Click the Download button in the top menu bar.
    6. -
    7. Your document will be downloaded to your device and you can open it with any compatible program.
    8. -
    -

    How to edit Online Word documents offline

    -

    If you want to edit your Online Word documents offline, you need to use either the Word app or the Word desktop program. The Word app is a mobile version of Microsoft Word that lets you view and edit documents on your phone or tablet. The Word desktop program is the full-featured version of Microsoft Word that lets you create and edit documents on your computer. Here is how to edit your Online Word documents offline:

    -

    Using the Word app

    -

    To use the Word app to edit your Online Word documents offline, follow these steps:

    -
      -
    1. Download and install the Word app from the Google Play Store or the Apple App Store.
    2. -
    3. Open the Word app and sign in with your Microsoft account.
    4. -
    5. Tap the Open tab and select OneDrive.
    6. -
    7. Find the document you want to edit and tap it.
    8. -
    9. The document will be downloaded to your device and you can edit it offline.
    10. -
    11. To save your changes, tap the File tab and select Save.
    12. -
    13. Your changes will be synced to OneDrive when you go online again.
    14. -
    -

    Using the Word desktop program

    -

    To use the Word desktop program to edit your Online Word documents offline, follow these steps:

    -
      -
    1. Download and install the Word desktop program from Microsoft.com.
    2. -
    3. Open the Word desktop program and sign in with your Microsoft account.
    4. -
    5. Click the File tab and select Open.
    6. -
    7. Select OneDrive - Personal and find the document you want to edit.
    8. -
    9. The document will be downloaded to your computer and you can edit it offline.
    10. -
    11. To save your changes, click the Save button or press Ctrl+S.
    12. -
    13. Your changes will be synced to OneDrive when you go online again.
    14. -
    -

    Conclusion

    -

    In this article, you learned how to download Online Word for free and how to use it to create, edit, and share documents online. You also learned how to save and download your Online Word documents to OneDrive and how to edit them offline using the Word app or the Word desktop program. Online Word is a convenient and powerful tool that can help you with your personal or professional projects. Try it out today and see what you can do with it!

    -

    FAQs

    -

    Here are some frequently asked questions about Online Word:

    -

    Q: Do I need a Microsoft account to use Online Word?

    -

    A: Yes, you need a Microsoft account to use Online Word. You can create one for free at Microsoft.com.

    -

    Q: Can I use Online Word without an internet connection?

    -

    A: No, you need an internet connection to use Online Word on the web. However, you can edit your Online Word documents offline using the Word app or the Word desktop program as explained above.

    -

    Q: How much storage space do I get with OneDrive?

    -

    A: You get 5 GB of free storage space with OneDrive. You can upgrade to more storage space by subscribing to Microsoft 365 or buying additional storage plans.

    -

    Q: Can I use Online Word with other cloud storage services?

    -

    A: Yes, you can use Online Word with other cloud storage services, such as Google Drive, Dropbox, Box, and more. You can add these services as places in Online Word and access your documents from there.

    -

    Q: How can

    Q: How can I get more features and functions with Online Word?

    -

    A: You can get more features and functions with Online Word by subscribing to Microsoft 365. Microsoft 365 is a subscription service that gives you access to the premium versions of Online Word and other Microsoft 365 apps, as well as extra benefits such as more storage space, advanced security, and more. You can choose from different plans and prices depending on your needs and preferences.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Jan Aadhaar 2.0 How to Update Your Details and Download Your Card.md b/spaces/congsaPfin/Manga-OCR/logs/Jan Aadhaar 2.0 How to Update Your Details and Download Your Card.md deleted file mode 100644 index df20bc9a5c14b11aa98a0d66126d427bc80449b3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Jan Aadhaar 2.0 How to Update Your Details and Download Your Card.md +++ /dev/null @@ -1,175 +0,0 @@ -
    -

    Jan Aadhaar Download 2.0: How to Download and Use the New Version of Rajasthan's Unique Identity Card

    -

    If you are a resident of Rajasthan, you might have heard of the Jan Aadhaar Yojana, a scheme launched by the state government in 2019 to provide a unique identity card to every citizen. The Jan Aadhaar card is a smart card that contains your biometric and demographic details, and can be used for various purposes such as availing subsidies, benefits, and services from the government.

    -

    In this article, we will tell you everything you need to know about Jan Aadhaar Download 2.0, the latest version of the Jan Aadhaar card that has been released in 2021. We will explain what are the features and benefits of this new version, how to download and install it on your device, and how to use it for various services. We will also provide some tips on how to troubleshoot common issues with Jan Aadhaar Download 2.0.

    -

    jan aadhar download 2.0


    Download Ziphttps://urlca.com/2uO6cy



    -

    What is Jan Aadhaar Yojana?

    -

    Jan Aadhaar Yojana is a scheme launched by the Rajasthan government in December 2019, with the objective of "One Number, One Card, One Identity" for the residents of the state. The scheme aims to provide a unique identity card to every citizen, which can be used as a single document for availing various government schemes and services.

    -

    The objective and benefits of the scheme

    -

    The main objective of Jan Aadhaar Yojana is to simplify and streamline the delivery of government services to the citizens, and to ensure transparency and accountability in governance. Some of the benefits of the scheme are:

    -
      -
    • It provides a unique identity number (Jan Aadhaar number) to every citizen, which can be used as a single identifier for various purposes.
    • -
    • It provides a smart card (Jan Aadhaar card) that contains biometric and demographic details of the citizen, such as name, date of birth, gender, address, photo, fingerprint, iris scan, etc.
    • -
    • It enables online verification and authentication of the citizen's identity using biometric or OTP methods.
    • -
    • It enables access to various government schemes and services using the Jan Aadhaar card or number, such as ration card, pension, health insurance, education, etc.
    • -
    • It reduces duplication and fraud in government service delivery, and ensures that only eligible beneficiaries receive the benefits.
    • -
    • It improves efficiency and convenience for both citizens and government officials in availing and providing services.
    • -
    -

    The eligibility and documents required for the scheme

    -

    The eligibility criteria for Jan Aadhaar Yojana are:

    -
      -
    • The applicant must be a resident of Rajasthan.
    • -
    • The applicant must not have enrolled for any other similar identity card scheme in Rajasthan or any other state.
    • -
    -

    The documents required for Jan Aadhaar Yojana are:

    -
      -
    • Aadhaar card or number
    • -
    • Bhamashah card or number (if available)
    • -
    • Mobile number
    • -
    • Email ID (optional)
    • -
    -

    The process of enrollment and registration for the scheme

    -

    The process of enrollment and registration for Jan Aadhaar Yojana is as follows

    The process of enrollment and registration for Jan Aadhaar Yojana is as follows:

    -

    How to download jan aadhar card 2.0 online
    -Jan aadhar yojana rajasthan 2.0 download
    -Jan aadhar app 2.0 download for android
    -Jan aadhar portal 2.0 download link
    -Jan aadhar 2.0 download kaise kare
    -Jan aadhar 2.0 download without sso id
    -Jan aadhar 2.0 download by name and date of birth
    -Jan aadhar 2.0 download with qr code
    -Jan aadhar 2.0 download error solution
    -Jan aadhar 2.0 download status check
    -Benefits of jan aadhar card 2.0 download
    -Documents required for jan aadhar card 2.0 download
    -Jan aadhar card 2.0 download helpline number
    -Jan aadhar card 2.0 download vs aadhaar card
    -Jan aadhar card 2.0 download eligibility criteria
    -Jan aadhar card 2.0 download application form
    -Jan aadhar card 2.0 download verification process
    -Jan aadhar card 2.0 download update details
    -Jan aadhar card 2.0 download password reset
    -Jan aadhar card 2.0 download print out
    -Jan aadhar card 2.0 download pdf format
    -Jan aadhar card 2.0 download in hindi
    -Jan aadhar card 2.0 download in english
    -Jan aadhar card 2.0 download in marathi
    -Jan aadhar card 2.0 download in gujarati
    -Jan aadhar card 2.0 download in tamil
    -Jan aadhar card 2.0 download in telugu
    -Jan aadhar card 2.0 download in kannada
    -Jan aadhar card 2.0 download in malayalam
    -Jan aadhar card 2.0 download in bengali
    -Jan aadhar card 2.0 download in punjabi
    -Jan aadhar card 2.0 download in odia
    -Jan aadhar card 2.0 download in urdu
    -Jan aadhar card 2.0 download in assamese
    -Jan aadhar card 2.0 download for windows pc
    -Jan aadhar card 2.0 download for mac os
    -Jan aadhar card 2.0 download for linux
    -Jan aadhar card 2.0 download for ios devices
    -Jan aadhar card 2.0 download for jio phone
    -Jan aadhar card 2.0 download for feature phone
    -Best websites to download jan aadhar card 2.0
    -Best apps to download jan aadhar card 2.0
    -Best software to download jan aadhar card 2.0
    -Best tools to download jan aadhar card 2.0
    -Best tips and tricks to download jan aadhar card 2.0
    -Best alternatives to jan aadhar card 2.0
    -Best reviews of jan aadhar card 2.0
    -Best offers and discounts on jan aadhar card 2.0
    -Best features and advantages of jan aadhar card 2.0
    -Best comparison of jan aadhar card 2.0 and other cards

    -
      -
    1. Visit the official website of Jan Aadhaar Yojana at https://jan-aadhaar.rajasthan.gov.in/ or download the Jan Aadhaar mobile app from Google Play Store or App Store.
    2. -
    3. Click on the "Enroll" option and enter your Aadhaar number or Bhamashah number (if available).
    4. -
    5. Verify your identity using biometric or OTP method.
    6. -
    7. Fill in your personal details, such as name, date of birth, gender, address, etc.
    8. -
    9. Upload your photo and scan your fingerprint and iris.
    10. -
    11. Review and submit your application form.
    12. -
    13. You will receive an acknowledgment slip with your Jan Aadhaar number and QR code.
    14. -
    15. You can download your Jan Aadhaar card from the website or app, or collect it from the nearest e-Mitra kiosk.
    16. -
    -

    What is Jan Aadhaar Download 2.0?

    -

    Jan Aadhaar Download 2.0 is the latest version of the Jan Aadhaar card that has been released by the Rajasthan government in 2021. It is a digital version of the Jan Aadhaar card that can be downloaded and stored on your smartphone, tablet, or computer. It can also be printed on a paper or plastic card if you prefer a physical copy.

    -

    The features and improvements of the new version

    -

    Some of the features and improvements of Jan Aadhaar Download 2.0 are:

    -
      -
    • It has a new design and layout that is more user-friendly and attractive.
    • -
    • It has a QR code that can be scanned to verify the authenticity and validity of the card.
    • -
    • It has a digital signature that ensures the security and integrity of the card.
    • -
    • It has a hologram that makes it more durable and resistant to tampering.
    • -
    • It has a chip that stores the biometric and demographic data of the cardholder.
    • -
    • It has a NFC feature that enables contactless verification and authentication of the card.
    • -
    -

    The steps to download and install the new version

    -

    The steps to download and install Jan Aadhaar Download 2.0 are:

    -
      -
    1. Visit the official website of Jan Aadhaar Yojana at https://jan-aadhaar.rajasthan.gov.in/ or download the Jan Aadhaar mobile app from Google Play Store or App Store.
    2. -
    3. Login with your Jan Aadhaar number and password, or register if you are a new user.
    4. -
    5. Click on the "Download" option and select the format of your choice (PDF, JPG, PNG, etc.).
    6. -
    7. Enter your OTP or biometric to confirm your identity.
    8. -
    9. Your Jan Aadhaar card will be downloaded to your device.
    10. -
    11. You can open it with any compatible software or app, or print it if you want a hard copy.
    12. -
    -

    The steps to use the new version for various services

    -

    The steps to use Jan Aadhaar Download 2.0 for various services are:

    -
      -
    1. Visit the website or app of the service provider that accepts Jan Aadhaar card as a valid document.
    2. -
    3. Select the option to use Jan Aadhaar card as your identity proof.
    4. -
    5. Scan the QR code or enter the Jan Aadhaar number on your card.
    6. -
    7. Verify your identity using OTP or biometric method.
    8. -
    9. You will be able to access the service or benefit that you are eligible for.
    10. -
    -

    How to troubleshoot common issues with Jan Aadhaar Download 2.0?

    -

    If you face any issues with Jan Aadhaar Download 2.0, such as downloading, installing, using, updating, or linking your card, you can try some of these solutions:

    -

    How to update your details on the Jan Aadhaar card

    -

    If you want to update any details on your Jan Aadhaar card, such as name, address, photo, etc., you can do so by following these steps:

    -
      -
    1. Visit the official website of Jan Aadhaar Yojana at https://jan-aadhaar.rajasthan.gov.in/ or download the Jan Aadhaar mobile app from Google Play Store or App Store.
    2. -
    3. Login with your Jan Aadhaar number and password, or register if you are a new user.
    4. -
    5. Click on the "Update" option and select the details that you want to update.
    6. -
    7. Enter the new or corrected details and upload the supporting documents if required.
    8. -
    9. Submit your request and pay the fee if applicable.
    10. -
    11. You will receive an acknowledgment slip with your update request number.
    12. -
    13. You can track the status of your request on the website or app.
    14. -
    15. Once your request is approved, you can download your updated Jan Aadhaar card from the website or app, or collect it from the nearest e-Mitra kiosk.
    16. -
    -

    How to link your Jan Aadhaar card with other documents

    -

    If you want to link your Jan Aadhaar card with other documents, such as ration card, bank account, PAN card, voter ID card, etc., you can do so by following these steps:

    -
      -
    1. Visit the website or app of the document issuer that allows linking with Jan Aadhaar card.
    2. -
    3. Select the option to link your document with Jan Aadhaar card.
    4. -
    5. Enter your document number and Jan Aadhaar number.
    6. -
    7. Verify your identity using OTP or biometric method.
    8. -
    9. You will receive a confirmation message that your document is linked with your Jan Aadhaar card.
    10. -
    -

    How to report any errors or complaints with the Jan Aadhaar card

    -

    If you find any errors or have any complaints with your Jan Aadhaar card, such as incorrect details, missing information, damaged card, etc., you can report them by following these steps:

    -
      -
    1. Visit the official website of Jan Aadhaar Yojana at https://jan-aadhaar.rajasthan.gov.in/ or download the Jan Aadhaar mobile app from Google Play Store or App Store.
    2. -
    3. Login with your Jan Aadhaar number and password, or register if you are a new user.
    4. -
    5. Click on the "Complaint" option and select the type of complaint that you have.
    6. -
    7. Enter the details of your complaint and upload any supporting documents or images if required.
    8. -
    9. Submit your complaint and note down the complaint number.
    10. -
    11. You can track the status of your complaint on the website or app.
    12. -
    13. You will receive a resolution or response to your complaint within a specified time frame.
    14. -
    -

    Conclusion

    -

    In this article, we have explained what is Jan Aadhaar Yojana, what is Jan Aadhaar Download 2.0, how to download and use the new version of Rajasthan's unique identity card, and how to troubleshoot common issues with it. We hope that this article has helped you to understand and appreciate this scheme that aims to provide a single identity card to every citizen of Rajasthan. If you have not enrolled for Jan Aadhaar Yojana yet, we urge you to do so as soon as possible and enjoy the benefits and convenience of this scheme. If you have any questions or feedback, please feel free to contact us through the website or app. Thank you for reading!

    -

    Frequently Asked Questions

    -

    Here are some of the frequently asked questions about Jan Aadhaar Download 2.0:

    -
      -
    1. What is the difference between Jan Aadhaar Download 2.0 and e-Jan Aadhaar?
    2. -

      e-Jan Aadhaar is an electronic version of the Jan Aadhaar card that can be downloaded from the UIDAI website using your Aadhaar number. Jan Aadhaar Download 2.0 is a digital version of the Jan Aadhaar card that can be downloaded from the Jan Aadhaar Yojana website or app using your Jan Aadhaar number. Both versions are valid and can be used for various purposes, but Jan Aadhaar Download 2.0 has some additional features and improvements that make it more user-friendly and secure.

      -
    3. Is Jan Aadhaar Download 2.0 free of cost?
    4. -

      Yes, Jan Aadhaar Download 2.0 is free of cost. You do not have to pay any fee to download or use it. However, if you want to print it on a paper or plastic card, you may have to pay a nominal fee at the e-Mitra kiosk or any other authorized center.

      -
    5. Can I use Jan Aadhaar Download 2.0 offline?
    6. -

      Yes, you can use Jan Aadhaar Download 2.0 offline. You do not need an internet connection to access or verify your card. However, you may need an internet connection to update your details or link your card with other documents.

      -
    7. How can I get a physical copy of my Jan Aadhaar card?
    8. -

      If you want a physical copy of your Jan Aadhaar card, you can print it on a paper or plastic card, you can do so by visiting the nearest e-Mitra kiosk or any other authorized center. You will have to pay a nominal fee for the printing service. You can also order a physical copy of your Jan Aadhaar card online from the Jan Aadhaar Yojana website or app, and get it delivered to your address.

      -
    9. How can I contact the Jan Aadhaar Yojana helpline?
    10. -

      If you have any queries or issues related to Jan Aadhaar Yojana, you can contact the Jan Aadhaar Yojana helpline by calling the toll-free number 1800-180-6127 or emailing at jan-aadhaar@rajasthan.gov.in. You can also visit the nearest e-Mitra kiosk or any other authorized center for assistance.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Music with Less Data Using Boomplay Lite APK.md b/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Music with Less Data Using Boomplay Lite APK.md deleted file mode 100644 index 1cd5126039a64255e5f3d270aaefbdcdd32d72ee..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Music with Less Data Using Boomplay Lite APK.md +++ /dev/null @@ -1,108 +0,0 @@ -
    -

    Download Boomplay Lite APK: A Music Player for Everyone

    -

    Do you love listening to music on your Android device? Do you want to stream and download millions of songs across different genres and artists? Do you want to save data and storage space while enjoying your favorite tunes? If you answered yes to any of these questions, then you should try Boomplay Lite, a music player app that offers all these features and more. In this article, we will tell you what Boomplay Lite is, how to download and install it, and how to use it.

    -

    download boomplay lite apk


    Download Zip 🆗 https://urlca.com/2uOfP3



    -

    What is Boomplay Lite?

    -

    Boomplay Lite is a music player app that lets you stream and download over 100 million songs from various genres like Pop, Rock, Afrobeats, Afropop, and Reggae. It is a lite version of the popular Boomplay app, which has over 200 million users worldwide. Boomplay Lite is designed to use fewer resources and data, making it suitable for low-end devices and slow internet connections. It also has a simple and user-friendly interface that makes it easy to navigate and enjoy music.

    -

    Features of Boomplay Lite

    -

    Boomplay Lite has many features that make it a great music player app for everyone. Here are some of them:

    -

    Stream and download music

    -

    With Boomplay Lite, you can stream and download music for free. You can choose from different quality options, ranging from 64 kbps to 320 kbps, depending on your preference and network condition. You can also download songs to your device for offline listening. You can access your downloaded songs from the "My Music" section of the app.

    -

    Discover new music and trending songs

    -

    Boomplay Lite helps you discover new music and trending songs from different regions and genres. You can browse through various categories, such as "New Releases", "Top Charts", "Editor's Picks", "Genres", "Moods", and "Artists". You can also follow your favorite artists and get notified when they release new songs or albums. You can also check out the "Boom Radio" feature, which lets you listen to curated playlists based on your mood or preference.

    -

    Save data and storage space

    -

    Boomplay Lite is designed to save data and storage space on your device. It is only 15 MB to download and install, compared to the 40 MB of the regular Boomplay app. It also uses less data when streaming or downloading music, thanks to its smart compression technology. You can also adjust the data usage settings in the app to suit your needs. You can choose to stream or download only on Wi-Fi, or limit the number of songs you can download per day.

    -

    download boomplay lite apk for android
    -download boomplay lite music downloader app
    -how to download boomplay lite apk on pc
    -download boomplay lite apk latest version
    -download boomplay lite apk mod
    -download boomplay lite apk for free
    -download boomplay lite apk offline
    -download boomplay lite apk without ads
    -download boomplay lite apk from google play
    -download boomplay lite apk from official website
    -download boomplay lite apk for low-end devices
    -download boomplay lite apk and enjoy over 95M songs
    -download boomplay lite apk and stream music with less data
    -download boomplay lite apk and support your favorite artists
    -download boomplay lite apk and discover new music and trending songs
    -download boomplay lite apk and listen to music offline
    -download boomplay lite apk and create personalized playlists
    -download boomplay lite apk and join the boombuddy community
    -download boomplay lite apk and get access to exclusive content
    -download boomplay lite apk and earn coins and rewards
    -download boomplay lite apk and share music with your friends
    -download boomplay lite apk and follow your favorite artists and podcasts
    -download boomplay lite apk and enjoy high-quality sound
    -download boomplay lite apk and customize your music player
    -download boomplay lite apk and get updates on the latest music trends
    -download boomplay lite apk for different genres of music
    -download boomplay lite apk for pop, rock, afrobeats, afropop and reggae music
    -download boomplay lite apk for top artists like Davido, Wizkid, Burna Boy, Tiwa Savage, etc.
    -download boomplay lite apk for local and international music
    -download boomplay lite apk for music in different languages
    -download boomplay lite apk for music from different countries and regions
    -download boomplay lite apk for music from different eras and decades
    -download boomplay lite apk for music from different moods and occasions
    -download boomplay lite apk for music from different categories and charts
    -download boomplay lite apk for music from different sources and platforms
    -how to install boomplay lite apk on your device
    -how to update boomplay lite apk to the latest version
    -how to uninstall boomplay lite apk from your device
    -how to use boomplay lite apk features and functions
    -how to troubleshoot boomplay lite apk issues and errors
    -how to contact boomplay lite apk customer support and feedback
    -how to rate and review boomplay lite apk on google play store
    -how to subscribe to boomplay premium with boomplay lite apk
    -how to redeem coins and rewards with boomplay lite apk
    -how to access exclusive content with boomplay premium on boomplay lite apk
    -how to stream music with less data with boomplay premium on boomplay lite apk
    -how to listen to music offline with boomplay premium on boomplay lite apk
    -how to create personalized playlists with boomplay premium on boomplay lite apk
    -how to enjoy high-quality sound with boomplay premium on boomlaye

    -

    How to download and install Boomplay Lite?

    -

    If you want to download and install Boomplay Lite on your Android device, you have three options:

    -

    Download from Google Play Store

    -

    The easiest way to get Boomplay Lite is to download it from the Google Play Store. Just search for "Boomplay Lite" in the store, or click on this link from your browser. Then, tap on the "Install" button and wait for the app to download and install on your device. You can then open the app and start enjoying music.

    -

    Download from Boomplay website

    -

    You can also download Boomplay Lite from the official Boomplay website. Just visit this link from your browser, or scan the QR code on the page with your device's camera. Then, tap on the "Download" button and follow the instructions to download and install the APK file on your device. You may need to enable the "Unknown sources" option in your device's settings to allow the installation of apps from outside the Google Play Store.

    -

    Download from third-party sources

    -

    Another option to download Boomplay Lite is to use third-party sources, such as APKPure or APKMirror. These are websites that host APK files of various apps, including Boomplay Lite. You can visit these websites from your browser, or use their dedicated apps to download and install Boomplay Lite on your device. However, you should be careful when using third-party sources, as they may contain malware or viruses that can harm your device. You should also check the authenticity and security of the APK file before installing it.

    -

    How to use Boomplay Lite?

    -

    Once you have downloaded and installed Boomplay Lite on your device, you can start using it to enjoy music. Here are some steps to help you use the app:

    -

    Sign up or log in

    -

    When you open Boomplay Lite for the first time, you will be asked to sign up or log in. You can sign up with your phone number, email address, or Facebook account. You can also log in with your existing Boomplay account if you have one. Signing up or logging in will allow you to access more features and benefits, such as personalized recommendations, rewards, and social interactions.

    -

    Browse and search music

    -

    Boomplay Lite has a lot of music for you to explore and discover. You can browse through different categories, such as "New Releases", "Top Charts", "Editor's Picks", "Genres", "Moods", and "Artists". You can also search for specific songs, albums, artists, or playlists by using the search bar at the top of the app. You can also use voice search by tapping on the microphone icon next to the search bar.

    -

    Play and download music

    -

    To play a song, just tap on it and it will start playing. You can also tap on the three-dot icon next to a song to see more options, such as adding it to your playlist, sharing it with others, or downloading it for offline listening. You can also swipe left or right on the screen to skip or go back to the previous or next song. You can also control the playback from the notification bar or the lock screen of your device.

    -

    Manage your library and playlist

    -

    You can access your library and playlist from the "My Music" section of the app. Here, you can see all the songs that you have downloaded, liked, or added to your playlist. You can also create your own playlist by tapping on the "+" icon at the bottom right corner of the screen. You can name your playlist, add songs to it, and edit it as you wish. You can also share your playlist with others by tapping on the share icon next to it.

    -

    Conclusion

    -

    Boomplay Lite is a music player app that lets you stream and download over 100 million songs from various genres and artists. It is a lite version of the popular Boomplay app, which has over 200 million users worldwide. Boomplay Lite is designed to use fewer resources and data, making it suitable for low-end devices and slow internet connections. It also has a simple and user-friendly interface that makes it easy to navigate and enjoy music.

    -

    If you want to download Boomplay Lite on your Android device, you have three options: download it from the Google Play Store, download it from the official Boomplay website, or download it from third-party sources . However, you should be careful when using third-party sources, as they may contain malware or viruses that can harm your device.

    -

    To use Boomplay Lite, you need to sign up or log in with your phone number, email address, or Facebook account. You can then browse and search music from different categories, such as "New Releases", "Top Charts", "Editor's Picks", "Genres", "Moods", and "Artists". You can also play and download music for free, and manage your library and playlist. You can also follow your favorite artists and get notified when they release new songs or albums.

    -

    Boomplay Lite is a music player app that offers a lot of features and benefits for music lovers. It is a great way to enjoy music on your Android device without worrying about data and storage space. Download Boomplay Lite today and experience music like never before!

    -

    Frequently Asked Questions

    -

    Here are some frequently asked questions about Boomplay Lite:

    -
      -
    1. What is the difference between Boomplay and Boomplay Lite? -

      Boomplay Lite is a lite version of the regular Boomplay app, which has more features and functions, such as podcasts, videos, live shows, and games. Boomplay Lite is designed to use less data and storage space, making it suitable for low-end devices and slow internet connections. It also has a simpler and more user-friendly interface than the regular Boomplay app.

    2. -
    3. Is Boomplay Lite free to use? -

      Yes, Boomplay Lite is free to use. You can stream and download music for free, without any subscription or payment. However, you may see some ads in the app, which help support the app and the artists. You can also earn coins by completing tasks or watching ads, which you can use to redeem rewards or gifts.

    4. -
    5. How can I update Boomplay Lite? -

      You can update Boomplay Lite by following the same steps as downloading it. You can check for updates in the Google Play Store, the official Boomplay website, or third-party sources. You can also enable the "Auto-update" option in your device's settings to update the app automatically when a new version is available.

    6. -
    7. How can I contact Boomplay Lite support? -

      If you have any questions, feedback, or issues with Boomplay Lite, you can contact the Boomplay Lite support team by using the "Feedback" option in the app's settings. You can also email them at feedback.boomplay@transsnet.com or visit their Facebook page. They will try to respond to you as soon as possible.

    8. -
    9. How can I delete Boomplay Lite? -

      If you want to delete Boomplay Lite from your device, you can follow these steps:

      -
        -
      • Go to your device's settings and tap on "Apps" or "Applications".
      • -
      • Find and tap on "Boomplay Lite" in the list of apps.
      • -
      • Tap on "Uninstall" and confirm your action.
      • -
      -

      You can also delete the app by long-pressing its icon on your home screen and dragging it to the "Uninstall" option.

    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Role of H-NS-Like Proteins in Bacterial Gene Regulation and Adaptation.md b/spaces/congsaPfin/Manga-OCR/logs/The Role of H-NS-Like Proteins in Bacterial Gene Regulation and Adaptation.md deleted file mode 100644 index 7cccb12a9fd64f1617759fbf55d7afe03dcb37a2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Role of H-NS-Like Proteins in Bacterial Gene Regulation and Adaptation.md +++ /dev/null @@ -1,63 +0,0 @@ -
    -

    What are ns likes and how to get them?

    -

    If you are a content creator, you might have heard of ns likes. But what are they and why do they matter? In this article, we will explain what ns likes are, why they are important, and how you can get them for your content. We will also introduce you to two tools that can help you get more ns likes: the Get Real Likes and Views: 100k app and the NS Like This plugin for WordPress.

    -

    ns like


    Download Ziphttps://urlca.com/2uO7eR



    -

    Introduction

    -

    What are ns likes?

    -

    Ns likes are a type of social media engagement that shows how much people like your content. Ns likes are similar to other types of likes, such as Facebook likes, Instagram likes, or YouTube likes. However, ns likes are not limited to any specific platform. You can get ns likes for any type of content, such as videos, photos, blogs, podcasts, or ebooks.

    -

    Why are ns likes important?

    -

    Ns likes are important because they can help you grow your audience, boost your credibility, and increase your revenue. Here are some of the benefits of getting more ns likes:

    -
      -
    • Ns likes can help you attract more viewers or readers to your content. People are more likely to check out your content if they see that it has a lot of ns likes. Ns likes can also help your content rank higher on search engines and social media algorithms.
    • -
    • Ns likes can help you establish your authority and reputation in your niche. People are more likely to trust your content if they see that it has a lot of ns likes. Ns likes can also help you build relationships with other influencers and potential collaborators in your field.
    • -
    • Ns likes can help you monetize your content. People are more likely to buy your products or services if they see that your content has a lot of ns likes. Ns likes can also help you attract sponsors and advertisers who want to reach your audience.
    • -
    -

    How to get ns likes for your content?

    -

    There are many ways to get more ns likes for your content, such as creating high-quality content, promoting your content on social media, engaging with your audience, and asking for feedback. However, if you want to get ns likes faster and easier, you might want to use some tools that can help you with that. Here are two tools that we recommend:

    -

    [Semrush](^2^) is a powerful SEO tool that shows you keywords that your competitors are ranking for. You can enter "ns like" as a seed keyword and get a list of phrase match and related keywords, along with their traffic, difficulty, and SERP analysis. For example, some of the keywords I found using Semrush are:
    -[Ahrefs Keyword Generator](^3^) is a free tool that pulls keyword suggestions from Ahrefs' database of over 8 billion queries. You can enter up to 10 words or phrases and choose from one of six keyword ideas reports. For example, some of the keywords I found using Ahrefs are:
    -[Moz Keyword Explorer](^1^) is another great SEO tool that helps you find relevant and low-competition keywords. You can enter "ns like" as a seed keyword and get a list of keyword suggestions, along with their monthly volume, difficulty, opportunity, and potential. For example, some of the keywords I found using Moz are:

    -

    Use the Get Real Likes and Views: 100k app

    -

    What is the app and how does it work?

    -

    The Get Real Likes and Views: 100k app is an Android app that helps you get more ns likes for your videos and photos. The app works by allowing you to exchange ns likes with other users who have similar interests as you. You can earn coins by liking other users' content, and then use those coins to get ns likes for your own content. You can also buy coins with real money if you want to get ns likes faster.

    -

    What are the benefits of using the app?

    -

    Some of the benefits of using the Get Real Likes and Views: 100k app are:

    -
      -
    • You can get real and organic ns likes from real users who are interested in your content.
    • -
    • You can get ns likes for any type of video or photo, regardless of the platform or format.
    • -
    • You can get ns likes instantly or schedule them for later.
    • -
    • You can customize the number and frequency of ns likes that you want to receive.
    • -
    • You can track your progress and performance with analytics and reports.
    • -
    -

    Use the NS Like This plugin for WordPressWhat is the plugin and how does it work?

    -

    The NS Like This plugin is a WordPress plugin that helps you get more ns likes for your blog posts. The plugin works by adding a ns like button to your blog posts, which allows your visitors to like your content with one click. The plugin also displays the number of ns likes that your posts have received, and allows you to customize the appearance and position of the ns like button.

    -

    What are the benefits of using the plugin?

    -

    Some of the benefits of using the NS Like This plugin are:

    -
      -
    • You can get more ns likes for your blog posts from your existing and potential readers.
    • -
    • You can get ns likes for any type of blog post, regardless of the topic or genre.
    • -
    • You can increase the engagement and interaction on your blog, as ns likes can encourage comments and shares.
    • -
    • You can improve the SEO and visibility of your blog, as ns likes can signal to search engines and social media platforms that your content is valuable and relevant.
    • -
    • You can easily install and configure the plugin, as it is compatible with most WordPress themes and plugins.
    • -
    -

    Conclusion

    -

    Summary of the main points

    -

    Ns likes are a type of social media engagement that shows how much people like your content. Ns likes are important because they can help you grow your audience, boost your credibility, and increase your revenue. You can get more ns likes for your content by using some tools that can help you with that, such as the Get Real Likes and Views: 100k app and the NS Like This plugin for WordPress.

    -

    Call to action

    -

    If you want to get more ns likes for your content, don't hesitate to try out these tools. You can download the Get Real Likes and Views: 100k app from the Google Play Store, or install the NS Like This plugin from the WordPress Plugin Directory. You will be amazed by how much ns likes can improve your content creation and marketing. Start getting more ns likes today!

    -

    FAQs

    -

    Here are some of the frequently asked questions about ns likes:

    -
      -
    1. What is the difference between ns likes and other types of likes?
    2. -

      Ns likes are different from other types of likes because they are not limited to any specific platform or format. You can get ns likes for any type of content, such as videos, photos, blogs, podcasts, or ebooks. Ns likes are also more flexible and customizable than other types of likes, as you can choose how many and how often you want to receive them.

      -
    3. How do I know if my content has received ns likes?
    4. -

      You can check if your content has received ns likes by using the tools that we have mentioned in this article. The Get Real Likes and Views: 100k app will show you how many ns likes your videos and photos have received, and the NS Like This plugin will show you how many ns likes your blog posts have received. You can also use analytics and reports to track your progress and performance.

      -
    5. Are ns likes safe and legal?
    6. -

      Yes, ns likes are safe and legal. Ns likes are generated by real users who are interested in your content, not by bots or fake accounts. Ns likes do not violate any terms of service or policies of any platform or format. Ns likes do not harm or spam your content or audience. Ns likes are ethical and legitimate ways to enhance your content creation and marketing.

      -
    7. How much do ns likes cost?
    8. -

      Ns likes are very affordable and cost-effective. You can get ns likes for free by exchanging them with other users who have similar interests as you. You can also buy coins with real money if you want to get ns likes faster. The Get Real Likes and Views: 100k app offers various packages of coins that you can choose from, starting from $0.99 for 100 coins. The NS Like This plugin is free to download and use, but you can also upgrade to a premium version for more features and support, starting from $19 per year.

      -
    9. Can I get more than ns likes for my content?
    10. -

      Yes, you can get more than ns likes for your content. Ns likes are just one type of social media engagement that you can get for your content. You can also get other types of engagement, such as comments, shares, views, followers, subscribers, ratings, reviews, bookmarks, downloads, etc. You can use different tools and strategies to get more engagement for your content, depending on your goals and preferences.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Whats New in Plague Inc. APK Latest Version? Find Out Here!.md b/spaces/congsaPfin/Manga-OCR/logs/Whats New in Plague Inc. APK Latest Version? Find Out Here!.md deleted file mode 100644 index 4516129ecd43b9f4216afac1ecd79e30d8f914a1..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Whats New in Plague Inc. APK Latest Version? Find Out Here!.md +++ /dev/null @@ -1,228 +0,0 @@ - -

    Plague Inc. APK Latest Version: A Guide for Beginners

    -

    Have you ever wondered what it would be like to create and unleash a deadly plague upon the world? Or maybe you want to save humanity from a global pandemic? If so, you might want to check out Plague Inc., a popular strategy-simulation game that lets you do just that.

    -

    Plague Inc. is a game developed by Ndemic Creations, a UK-based independent studio. It was released in 2012 for iOS and Android devices, and later for Windows Phone, PC, and consoles. The game has been downloaded over 160 million times as of May 2021, and has received positive reviews from critics and players alike.

    -

    plague inc apk latest version


    DOWNLOAD 🆓 https://urlca.com/2uO7TH



    -

    In Plague Inc., you can choose from different game modes and disease types, each with their own unique challenges and strategies. You can design your own pathogen, from bacteria to bio-weapons, and evolve it to infect and kill as many people as possible. You can also play as the cure team, trying to stop the spread of a deadly outbreak.

    -

    But how can you get the latest version of Plague Inc. on your device? And how can you play it effectively? In this article, we will answer these questions and more. We will show you how to download and install Plague Inc. APK latest version, how to play it, and some tips and tricks to help you master it.

    -

    What is Plague Inc. APK Latest Version?

    -

    Before we get into the details, let's first explain what an APK file is and why you might need it.

    -

    An APK file is an Android Package file, which is a format used to distribute and install applications on Android devices. It contains all the necessary files and data for an app to run properly.

    -

    Normally, you can download apps from the Google Play Store, which automatically installs them on your device. However, sometimes you might want to download apps from other sources, such as third-party websites or developers. This is where an APK file comes in handy.

    -

    plague inc android game free download apk
    -plague inc apk mod unlocked all 2023
    -plague inc apk full version no ads
    -plague inc apk latest update 1.19.10
    -plague inc apk for pc windows 10
    -plague inc apk old version 1.16.3
    -plague inc apk expansion pack download
    -plague inc apk the cure mode
    -plague inc apk xapk file
    -plague inc apk ndemic creations
    -plague inc apk simulation game
    -plague inc apk offline play
    -plague inc apk hack unlimited dna
    -plague inc apk scenarios guide
    -plague inc apk cheats codes
    -plague inc apk premium features
    -plague inc apk virus types
    -plague inc apk achievements list
    -plague inc apk strategy tips
    -plague inc apk review ratings
    -plague inc apk android tv support
    -plague inc apk tablet compatible
    -plague inc apk multiplayer online
    -plague inc apk custom scenarios
    -plague inc apk editor tool
    -plague inc apk speed run mode
    -plague inc apk neurax worm unlock
    -plague inc apk necroa virus origin
    -plague inc apk shadow plague release
    -plague inc apk simian flu outbreak
    -plague inc apk zombie apocalypse
    -plague inc apk bacteria mega brutal
    -plague inc apk fungus spore burst
    -plague inc apk parasite symbiosis
    -plague inc apk prion neural atrophy
    -plague inc apk nano-virus kill switch
    -plague inc apk bio-weapon lethality
    -plague inc apk fake news scenario
    -plague inc apk board game adaptation
    -plague inc apk rebel vs dictator
    -plague inc apk climate change impact
    -plague miniclip.com download link
    -play store install for free
    -google play pass subscription
    -app store download for ios
    -steam download for pc
    -xbox game pass ultimate
    -nintendo switch eshop
    -ps4 playstation store
    -amazon appstore for android

    -

    By downloading an APK file, you can get access to apps that are not available on the Play Store, or that have been modified or updated by someone else. For example, you might want to download an APK file of Plague Inc. if:

    -
      -
    • You want to get the latest version of the game before it is officially released on the Play Store.
    • -
    • You want to get access to features or content that are not available on the Play Store version.
    • -
    • You want to play the game on a device that does not support the Play Store or Google services.
    • -
    -

    However, downloading an APK file also comes with some risks. You need to be careful about where you get your APK file from, as some sources might contain malware or viruses that can harm your device or steal your data. You also need to enable unknown sources on your device settings, which might expose your device to security risks. Therefore, you should always download APK files from trusted and reputable sources, and scan them for malware before installing them.

    -

    How to Download and Install Plague Inc. APK Latest Version

    -

    Now that you know what an APK file is and why you might need it, let's see how you can download and install Plague Inc. APK latest version on your device. Here are the steps you need to follow:

    -

    Step 1: Find a Reliable Source for the APK File

    -

    The first step is to find a website or a developer that offers the Plague Inc. APK file that you want. You can search online for keywords like "Plague Inc. APK latest version" or "Plague Inc. APK mod" and see what results come up. However, as we mentioned before, not all sources are safe and reliable, so you need to be careful.

    -

    Some of the factors that you should consider when choosing a source are:

    -
      -
    • The reputation and credibility of the website or the developer.
    • -
    • The ratings and reviews from other users who have downloaded the APK file.
    • -
    • The date and version of the APK file, and whether it matches the latest version of the game.
    • -
    • The size and content of the APK file, and whether it contains any unwanted or malicious files.
    • -
    -

    One of the sources that we recommend for downloading Plague Inc. APK latest version is [APKPure], a website that provides safe and verified APK files for various apps and games. You can visit their website and search for Plague Inc., or use this link: [https://apkpure.com/plague-inc/com.miniclip.plagueinc].

    -

    Step 2: Enable Unknown Sources on Your Device

    -

    The next step is to enable unknown sources on your device settings, which will allow you to install apps from sources other than the Play Store. To do this, follow these steps:

    -
      -
    • Go to your device settings and look for the security or privacy option.
    • -
    • Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on.
    • -
    • A warning message will pop up, telling you about the risks of installing apps from unknown sources. Tap on "OK" or "Allow" to proceed.
    • -
    -

    Note that the exact steps and names of the options might vary depending on your device model and Android version. You can also disable unknown sources after installing the APK file if you want to.

    -

    Step 3: Download the APK File and Tap on It

    -

    The third step is to download the APK file from the source that you have chosen, and tap on it to start the installation process. To do this, follow these steps:

    -
      -
    • Open your browser and go to the website or the link that provides the Plague Inc. APK file.
    • -
    • Tap on the download button or link, and wait for the download to finish.
    • -
    • Once the download is complete, go to your downloads folder or notification bar, and tap on the APK file.
    • -
    -

    Step 4: Follow the Installation Instructions and Launch the Game

    -

    The final step is to follow the installation instructions that will appear on your screen, and launch the game once it is installed. To do this, follow these steps:

    -
      -
    • A prompt will ask you if you want to install the app. Tap on "Install" or "Yes" to continue.
    • -
    • The installation process will begin, and it might take a few seconds or minutes depending on your device speed and APK file size.
    • -
    • Once the installation is done, a message will say "App installed" or "Done". Tap on "Open" or "Launch" to start the game.
    • -
    • You can also find the game icon on your home screen or app drawer, and tap on it to play it anytime.
    • -
    -

    Congratulations! You have successfully downloaded and installed Plague Inc. APK latest version on your device. Now you can enjoy playing this amazing game with all its features and content.

    How to Play Plague Inc. APK Latest Version

    -

    Now that you have installed Plague Inc. APK latest version on your device, you might be wondering how to play it. Plague Inc. is a game that requires strategy, creativity, and patience, as you try to create and spread a deadly disease across the world. Here are some basic steps and tips on how to play Plague Inc. APK latest version:

    -

    Choose a Game Mode and a Disease Type

    -

    The first thing you need to do is to choose a game mode and a disease type. Plague Inc. offers different game modes, such as:

    -
      -
    • Main Game: This is the standard mode, where you can choose from 12 different disease types, each with their own strengths and weaknesses. You can also unlock 7 special scenarios, such as the zombie apocalypse, the vampire plague, or the planet of the apes.
    • -
    • Custom Scenarios: This is where you can play user-created scenarios, or create your own using the scenario creator. You can find thousands of scenarios on various topics and themes, such as historical events, fictional stories, or real-life outbreaks.
    • -
    • Speed Run: This is where you can compete with other players to see who can infect and kill the world faster. You can choose from 4 different speed run modes, such as Casual, Normal, Brutal, or Mega Brutal.
    • -
    • The Cure: This is where you can play as the cure team, trying to stop a deadly pandemic from spreading and killing everyone. You can use various tools and strategies, such as contact tracing, lockdowns, vaccines, or quarantines.
    • -
    -

    After choosing a game mode, you need to choose a disease type. Plague Inc. offers 12 different disease types, such as:

    -
      -
    • Bacteria: This is the most common and easy-to-play disease type. It has no special abilities, but it can survive in all climates.
    • -
    • Virus: This is a fast and aggressive disease type. It mutates rapidly, making it hard to cure, but also hard to control.
    • -
    • Fungus: This is a slow and stealthy disease type. It has a hard time spreading across countries, but it can use spores to infect new areas.
    • -
    • Parasite: This is a complex and intelligent disease type. It has a low severity, making it hard to detect, but it also has a low infectivity.
    • -
    • Prion: This is a subtle and incurable disease type. It has a long incubation period, making it hard to notice, but it also has a high lethality.
    • -
    • Nano-Virus: This is a synthetic and unstable disease type. It is detected at the start of the game, making it hard to evade, but it also has a high potential for modification.
    • -
    • Bio-Weapon: This is a lethal and unstoppable disease type. It has a high mortality rate, making it easy to kill, but it also has a high cost of evolution.
    • -
    • Neurax Worm: This is a manipulative and mind-controlling disease type. It can enter the brain of its hosts, making them do its bidding, but it also has a high visibility.
    • -
    • Necroa Virus: This is a reanimating and undead disease type. It can turn its victims into zombies, making them attack other humans, but it also has a high cure rate.
    • -
    • Simian Flu: This is an evolving and cross-species disease type. It can infect both humans and apes, making them smarter and more organized, but it also has a high complexity.
    • -
    • Shadow Plague: This is a powerful and ancient disease type. It can create vampires, making them hunt and feed on humans, but it also has a high weakness to sunlight.
    • -
    • Frozen Virus: This is an experimental and frozen disease type. It can survive in extreme cold temperatures, making it hard to eradicate, but it also has a low adaptability.
    • -
    -

    You can unlock new disease types by completing the main game on normal or higher difficulty levels. You can also customize your disease name and modify its genetic code using genes that you can unlock by playing the game.

    -

    Select a Starting Country and Evolve Your Pathogen

    -

    The next thing you need to do is to select a starting country for your pathogen. This will determine how your infection will spread across the world. You can choose from 195 countries in 6 continents, each with their own population size, climate, wealth, and health. You can also see the infection and death rates of each country on the world map.

    -

    After selecting a starting country, you need to evolve your pathogen using DNA points. DNA points are earned by infecting and killing people, popping DNA bubbles, or completing certain objectives. You can use DNA points to buy new traits for your pathogen, such as:

    -
      -
    • Transmission: These traits increase the infectivity of your pathogen, making it spread faster and easier. You can choose from different transmission modes, such as air, water, blood, animals, insects, or rodents.
    • -
    • Symptoms: These traits increase the severity and lethality of your pathogen, making it more noticeable and deadly. You can choose from different symptoms, such as coughing, fever, vomiting, insomnia, paranoia, necrosis, or coma.
    • -
    • Abilities: These traits increase the resistance and adaptability of your pathogen, making it harder to cure and eradicate. You can choose from different abilities, such as drug resistance, cold resistance, heat resistance, or genetic hardening.
    • -
    -

    You can also unlock special traits for each disease type, such as bacterial resilience, viral instability, fungal spores, parasitic DNA theft, prion neural atrophy, nano-virus code fragment, bio-weapon release switch, neurax worm trojan planes, necroa virus zombie reanimation, simian flu ape colonies, shadow plague blood rage, or frozen virus thawing.

    -

    You can evolve your pathogen at any time during the game, but you should also consider the cost and the consequences of each trait. Some traits might make your pathogen more effective in certain regions or situations, but they might also make it more visible or vulnerable in others. You should also balance your infectivity and lethality, as killing too fast might limit your spread or trigger a global response.

    -

    Spread Your Infection and Overcome Human Defenses

    -

    The next thing you need to do is to spread your infection across the world and overcome human defenses. Your goal is to infect and kill every single human on the planet before they find a cure or eradicate your pathogen. You can monitor the progress of your infection and the cure on the bottom right corner of the screen.

    -

    To spread your infection, you need to consider various factors that affect the transmission and the severity of your pathogen. Some of these factors are:

    -
      -
    • Population: The more people there are in a country or a region, the easier it is to infect them. However, densely populated areas might also have better health care and sanitation systems that can slow down your infection.
    • -
    • Climate: The temperature and humidity of a country or a region can affect the survival and the spread of your pathogen. Some pathogens might thrive in hot and wet climates, while others might prefer cold and dry climates. You can evolve traits that can help your pathogen adapt to different climates.
    • -
    • Wealth: The wealth and development of a country or a region can affect the awareness and the response of the people to your pathogen. Richer countries might have better health care and research facilities that can detect and cure your infection faster. You can evolve traits that can help your pathogen resist drugs and genetic analysis.
    • -
    • Borders: The borders and connections of a country or a region can affect the movement and the contact of the people with your pathogen. Some countries might have land borders or sea ports that can facilitate your infection. Others might have airports or railways that can spread your infection globally. You can evolve traits that can help your pathogen travel by air or water.
    • -
    -

    To overcome human defenses, you need to be aware of various events and actions that can affect the cure and the eradication of your pathogen. Some of these events and actions are:

    -
      -
    • News: The news headlines on the top left corner of the screen can give you clues about what is happening in the world and how people are reacting to your infection. You can see things like outbreaks, riots, wars, natural disasters, or political changes that can affect your infection or the cure.
    • -
    • Research: The research bar on the bottom right corner of the screen shows you the progress of the cure research and the countries that are leading it. You can see things like funding, cooperation, breakthroughs, or setbacks that can speed up or slow down the cure. You can also see the cure percentage and the time left until completion.
    • -
    • Actions: The actions menu on the bottom left corner of the screen shows you the actions that you can take to influence the game. You can see things like abilities, symptoms, transmissions, data, world, or genes that you can use to evolve your pathogen, view information, or modify your genetic code.
    • -
    • Events: The events pop-ups on the screen show you the events that are triggered by your infection or the cure. You can see things like infected countries, infected continents, infected islands, infected airports, infected ports, infected animals, infected zombies, infected apes, infected vampires, or infected humans that you can tap to get DNA points or spread your infection.
    • -
    -

    You need to be strategic and adaptive when dealing with human defenses, as they can vary depending on the game mode, the disease type, and the difficulty level. You can use various tactics to sabotage the cure research, such as evolving lethal symptoms, spreading misinformation, creating diversions, or destroying labs. You can also use various tactics to evade human eradication, such as hiding your presence, resisting drugs, adapting to climates, or infecting new hosts.

    -

    Wipe Out Humanity or Save It (Depending on the Mode)

    -

    The final thing you need to do is to wipe out humanity or save it, depending on the game mode that you have chosen. Your goal is to achieve one of these outcomes before the other side does. You can monitor the status of humanity on the top right corner of the screen.

    -

    If you are playing as the disease team, your goal is to wipe out humanity by infecting and killing everyone on the planet. You can achieve this by evolving your pathogen to be more infectious and lethal, and by spreading it to every country and region. You can also use special traits or events to create zombies, vampires, apes, or other creatures that can help you destroy humanity.

    -

    If you are playing as the cure team, your goal is to save humanity by curing and eradicating the disease. You can achieve this by researching and deploying a cure for the disease, and by implementing measures to slow down its spread and reduce its impact. You can also use special tools or events to quarantine zones, send teams, launch drones, or nuke cities that are infected.

    -

    Whether you are playing as the disease team or the cure team, you need to be aware of your opponents' moves and counter them accordingly. You also need to be prepared for unexpected events and challenges that might arise during the game. You can win or lose in various ways depending on the game mode, the disease type, and the difficulty level.

    -

    Tips and Tricks for Plague Inc. APK Latest Version

    -

    Plague Inc. is a game that requires skill and strategy to master. It is not easy to wipe out humanity or save it from a deadly plague. However, with some practice and some tips and tricks, you can improve your chances of winning and have more fun playing the game. Here are some tips and tricks for Plague Inc. APK latest version that you can use:

    -

    Infect Before Killing

    -

    One of the most important tips for Plague Inc. is to infect as many people as possible before killing them. This will ensure that your pathogen spreads widely and globally, and that it does not alert the humans too early. If you kill too fast, you might limit your spread or trigger a global response that can stop you.

    -

    To infect before killing, you should focus on evolving transmission traits that can increase your infectivity, especially in different climates and regions. You should also avoid evolving lethal symptoms that can increase your severity and lethality, or devolve them if they mutate randomly. You can also use abilities that can help you hide your presence, such as bacterial shell, viral latency, or frozen virus.

    -

    However, you should also be careful not to infect too slowly, as this might give the humans enough time to research a cure or eradicate your pathogen. You should also be ready to evolve lethal symptoms when you have infected most or all of the world, or when the cure is close to completion. You can also use abilities that can help you kill faster, such as bio-weapon activation, necroa virus reanimation, or shadow plague blood rage.

    -

    Start in an Isolated Country

    -

    Another tip for Plague Inc. is to start in an isolated country that has few connections or borders with other countries. This will ensure that your pathogen spreads slowly and stealthily, and that it does not attract too much attention from the humans. If you start in a well-connected or populous country, you might spread too fast or be detected too early.

    -

    To start in an isolated country, you should choose a country that has one or more of these characteristics:

    -
      -
    • It is an island or a landlocked country.
    • -
    • It has a low population or a low population density.
    • -
    • It has a poor or a rural economy.
    • -
    • It has a cold or a hot climate.
    • -
    -

    Some examples of isolated countries that you can start in are Greenland, Iceland, Madagascar, New Zealand, Bolivia, Central Africa, Saudi Arabia, or Kazakhstan. However, you should also consider the advantages and disadvantages of each country, such as their climate, wealth, health, and research capabilities.

    -

    However, you should also be careful not to start in a country that is too isolated or too hard to infect, as this might prevent you from spreading to other countries or regions. You should also be ready to evolve traits that can help you travel by air or water, such as air transmission, water transmission, bird transmission, rodent transmission, or trojan planes.

    -

    Watch the News and Research Countries

    -

    Another tip for Plague Inc. is to watch the news headlines and research the countries that are affected by your infection. This will help you understand what is happening in the world and how the humans are reacting to your pathogen. You can also use this information to plan your strategy and adapt to different situations.

    -

    To watch the news headlines, you should pay attention to the top left corner of the screen, where you can see various news stories and events that are related to your infection or the cure. You can see things like:

    -
      -
    • The first infection of a country or a continent.
    • -
    • The first death of a country or a continent.
    • -
    • The first detection of your pathogen by the humans.
    • -
    • The first announcement of the cure research by the humans.
    • -
    • The first outbreak of riots or wars caused by your infection.
    • -
    • The first occurrence of natural disasters or political changes influenced by your infection.
    • -
    -

    To research the countries, you should tap on any country on the world map, and see various information and data about that country, such as: - The population size and the percentage of infected, healthy, dead, or cured people. - The climate and the wealth of the country, and how they affect your infection or the cure. - The research and the health care capabilities of the country, and how they contribute to the cure research or the eradication efforts. - The borders and the connections of the country, and how they facilitate your infection or the cure deployment. You can also see a table that shows the infection and death rates of each country, and sort them by different criteria, such as: - The number of infected people or the percentage of infected population. - The number of dead people or the percentage of dead population. - The number of healthy people or the percentage of healthy population. - The number of cured people or the percentage of cured population. You can use this table to see which countries are most or least affected by your infection or the cure, and to prioritize your targets or your defenses.

    Plan Your Evolution and Balance Your Traits

    - Another tip for Plague Inc. is to plan your evolution and balance your traits. This means that you should have a clear idea of what kind of pathogen you want to create, and what kind of traits you want to evolve. You should also consider the cost and the benefit of each trait, and how they interact with each other. To plan your evolution, you should think about these questions: - What is your overall strategy? Do you want to be stealthy or aggressive, fast or slow, simple or complex? - What is your disease type? How does it affect your evolution options and your gameplay style? - What is your starting country? How does it affect your initial spread and your infection potential? - What are your main challenges? How can you overcome them with your traits? To balance your traits, you should think about these factors: - Infectivity: This is how easily your pathogen can spread from person to person. You want to have a high infectivity, but not too high that it alerts the humans too early. - Severity: This is how noticeable and harmful your pathogen is to the humans. You want to have a low severity, but not too low that it reduces your DNA points or your lethality. - Lethality: This is how deadly your pathogen is to the humans. You want to have a high lethality, but not too high that it kills too fast or limits your spread. - Resistance: This is how well your pathogen can survive in different environments and situations. You want to have a high resistance, but not too high that it increases your cost or reduces your infectivity. You should also be aware of the trade-offs and the synergies of different traits. Some traits might have positive or negative effects on other traits, such as: - Air transmission increases infectivity but also severity. - Necrosis increases lethality but also infectivity. - Drug resistance increases resistance but also cost. You should also be aware of the special abilities and events that are unique to each disease type. Some abilities and events might have powerful effects on your infection or the cure, such as: - Bacterial resilience reduces all climate penalties. - Viral instability increases mutation rate. - Fungal spores infect new countries randomly. - Parasitic DNA theft steals DNA points from the cure. - Prion neural atrophy slows down cure research. - Nano-virus code fragment reveals its location. - Bio-weapon release switch increases lethality dramatically. - Neurax worm trojan planes infect new countries directly. - Necroa virus zombie reanimation creates zombies from dead people. - Simian flu ape colonies create intelligent apes that can help you spread infection. - Shadow plague blood rage allows you to control a vampire that can hunt and feed on humans. - Frozen virus thawing increases infectivity in cold countries.

    Conclusion

    -

    Plague Inc. is a game that challenges you to create and spread a deadly disease across the world, or to stop one from doing so. It is a game that requires skill and strategy, as well as creativity and imagination. It is also a game that is fun and addictive, as well as educational and informative.

    -

    In this article, we have shown you how to download and install Plague Inc. APK latest version on your device, how to play it, and some tips and tricks to help you master it. We hope that you have enjoyed reading this article, and that you have learned something new.

    -

    Now that you have everything you need to play Plague Inc. APK latest version, why not give it a try? You can download it from [APKPure] using this link: [https://apkpure.com/plague-inc/com.miniclip.plagueinc]. You can also visit their website for more APK files for various apps and games.

    -

    Thank you for reading this article, and happy gaming!

    FAQs -

    Here are some frequently asked questions about Plague Inc. APK latest version that you might find useful:

    -

    What are the differences between Plague Inc. and Plague Inc: Evolved?

    -

    Plague Inc. is the original mobile version of the game, while Plague Inc: Evolved is the PC and console version of the game. Plague Inc: Evolved has some features and content that are not available on Plague Inc., such as:

    -
      -
    • Improved graphics and animations.
    • -
    • Multiplayer and co-op modes.
    • -
    • Custom scenario creator and editor.
    • -
    • Achievements and leaderboards.
    • -
    • More disease types and scenarios.
    • -
    -

    However, Plague Inc. APK latest version also has some features and content that are not available on Plague Inc: Evolved, such as:

    -
      -
    • The Cure mode.
    • -
    • The Frozen Virus disease type.
    • -
    • The Fake News scenario.
    • -
    -

    How can I unlock all disease types and scenarios in Plague Inc. APK?

    -

    You can unlock all disease types and scenarios in Plague Inc. APK by completing the main game on normal or higher difficulty levels, or by purchasing them with real money. You can also use mods or cheats to unlock them, but this might affect your gameplay experience or cause errors.

    -

    Is Plague Inc. APK safe and legal to use?

    -

    Plague Inc. APK is safe and legal to use, as long as you download it from a trusted and reputable source, such as [APKPure]. However, you should also be aware of the risks and responsibilities of using an APK file, such as:

    -
      -
    • You might violate the terms and conditions of the game developer or the Play Store.
    • -
    • You might not receive official updates or support from the game developer or the Play Store.
    • -
    • You might encounter bugs or glitches that are not present on the Play Store version.
    • -
    • You might expose your device or data to security threats or malware.
    • -
    -

    How realistic is Plague Inc. as a pandemic simulator?

    -

    Plague Inc. is a game that is based on scientific research and data, but it is not a realistic or accurate pandemic simulator. It is a game that simplifies and exaggerates some aspects of disease transmission and human behavior, for the sake of fun and entertainment. It is not a game that predicts or reflects real-life scenarios or outcomes.

    -

    Can I play Plague Inc. APK offline or with friends?

    -

    You can play Plague Inc. APK offline, as it does not require an internet connection to run. However, you might need an internet connection to download the APK file, to access some features or content, or to update the game.

    -

    You can also play Plague Inc. APK with friends, as it has a multiplayer mode that allows you to compete or cooperate with other players online. However, you might need an internet connection and a compatible device to play multiplayer mode.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Chrome For 64 Bit Windows 7 Download [NEW].md b/spaces/contluForse/HuggingGPT/assets/Chrome For 64 Bit Windows 7 Download [NEW].md deleted file mode 100644 index c8080200affd9e1a36e07557a8d34a8294ea1ed4..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Chrome For 64 Bit Windows 7 Download [NEW].md +++ /dev/null @@ -1,6 +0,0 @@ -

    chrome for 64 bit windows 7 download


    Download Zip ✔✔✔ https://ssurll.com/2uzvZw



    - -This is a temporary test as the 64-bit package will soon be combined into the main ... This is an online installer that will download Google Chrome Dev during setup ... System Requirements: Windows 7, 8, 10; App License: Freeware (Partially ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/bottleneck_attn.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/bottleneck_attn.py deleted file mode 100644 index 9604e8a6cfb992c50bc1fc15c54979f30b1d2c94..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/bottleneck_attn.py +++ /dev/null @@ -1,126 +0,0 @@ -""" Bottleneck Self Attention (Bottleneck Transformers) - -Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 - -@misc{2101.11605, -Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, -Title = {Bottleneck Transformers for Visual Recognition}, -Year = {2021}, -} - -Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 - -This impl is a WIP but given that it is based on the ref gist likely not too far off. - -Hacked together by / Copyright 2021 Ross Wightman -""" -from typing import List - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .helpers import to_2tuple -from .weight_init import trunc_normal_ - - -def rel_logits_1d(q, rel_k, permute_mask: List[int]): - """ Compute relative logits along one dimension - - As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 - Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 - - Args: - q: (batch, heads, height, width, dim) - rel_k: (2 * width - 1, dim) - permute_mask: permute output dim according to this - """ - B, H, W, dim = q.shape - x = (q @ rel_k.transpose(-1, -2)) - x = x.reshape(-1, W, 2 * W -1) - - # pad to shift from relative to absolute indexing - x_pad = F.pad(x, [0, 1]).flatten(1) - x_pad = F.pad(x_pad, [0, W - 1]) - - # reshape and slice out the padded elements - x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) - x = x_pad[:, :W, W - 1:] - - # reshape and tile - x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) - return x.permute(permute_mask) - - -class PosEmbedRel(nn.Module): - """ Relative Position Embedding - As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 - Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 - """ - def __init__(self, feat_size, dim_head, scale): - super().__init__() - self.height, self.width = to_2tuple(feat_size) - self.dim_head = dim_head - self.scale = scale - self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * self.scale) - self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * self.scale) - - def forward(self, q): - B, num_heads, HW, _ = q.shape - - # relative logits in width dimension. - q = q.reshape(B * num_heads, self.height, self.width, -1) - rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) - - # relative logits in height dimension. - q = q.transpose(1, 2) - rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) - - rel_logits = rel_logits_h + rel_logits_w - rel_logits = rel_logits.reshape(B, num_heads, HW, HW) - return rel_logits - - -class BottleneckAttn(nn.Module): - """ Bottleneck Attention - Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 - """ - def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, qkv_bias=False): - super().__init__() - assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' - dim_out = dim_out or dim - assert dim_out % num_heads == 0 - self.num_heads = num_heads - self.dim_out = dim_out - self.dim_head = dim_out // num_heads - self.scale = self.dim_head ** -0.5 - - self.qkv = nn.Conv2d(dim, self.dim_out * 3, 1, bias=qkv_bias) - - # NOTE I'm only supporting relative pos embedding for now - self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head, scale=self.scale) - - self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() - - def reset_parameters(self): - trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) - trunc_normal_(self.pos_embed.height_rel, std=self.scale) - trunc_normal_(self.pos_embed.width_rel, std=self.scale) - - def forward(self, x): - B, C, H, W = x.shape - assert H == self.pos_embed.height and W == self.pos_embed.width - - x = self.qkv(x) # B, 3 * num_heads * dim_head, H, W - x = x.reshape(B, -1, self.dim_head, H * W).transpose(-1, -2) - q, k, v = torch.split(x, self.num_heads, dim=1) - - attn_logits = (q @ k.transpose(-1, -2)) * self.scale - attn_logits = attn_logits + self.pos_embed(q) # B, num_heads, H * W, H * W - - attn_out = attn_logits.softmax(dim = -1) - attn_out = (attn_out @ v).transpose(1, 2).reshape(B, self.dim_out, H, W) # B, dim_out, H, W - attn_out = self.pool(attn_out) - return attn_out - - diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/senet.py b/spaces/cooelf/Multimodal-CoT/timm/models/senet.py deleted file mode 100644 index 3d0ba7b3ee573523523c3af574c835ccdf502a32..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/senet.py +++ /dev/null @@ -1,467 +0,0 @@ -""" -SEResNet implementation from Cadene's pretrained models -https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py -Additional credit to https://github.com/creafz - -Original model: https://github.com/hujie-frank/SENet - -ResNet code gently borrowed from -https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py - -FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate -support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. -""" -import math -from collections import OrderedDict - -import torch.nn as nn -import torch.nn.functional as F - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import create_classifier -from .registry import register_model - -__all__ = ['SENet'] - - -def _cfg(url='', **kwargs): - return { - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', - **kwargs - } - - -default_cfgs = { - 'legacy_senet154': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), - 'legacy_seresnet18': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', - interpolation='bicubic'), - 'legacy_seresnet34': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), - 'legacy_seresnet50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), - 'legacy_seresnet101': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), - 'legacy_seresnet152': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), - 'legacy_seresnext26_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', - interpolation='bicubic'), - 'legacy_seresnext50_32x4d': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), - 'legacy_seresnext101_32x4d': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), -} - - -def _weight_init(m): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1.) - nn.init.constant_(m.bias, 0.) - - -class SEModule(nn.Module): - - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - module_input = x - x = x.mean((2, 3), keepdim=True) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class Bottleneck(nn.Module): - """ - Base class for bottlenecks that implements `forward()` method. - """ - - def forward(self, x): - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - shortcut = self.downsample(x) - - out = self.se_module(out) + shortcut - out = self.relu(out) - - return out - - -class SEBottleneck(Bottleneck): - """ - Bottleneck for SENet154. - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None): - super(SEBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes * 2) - self.conv2 = nn.Conv2d( - planes * 2, planes * 4, kernel_size=3, stride=stride, - padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes * 4) - self.conv3 = nn.Conv2d( - planes * 4, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNetBottleneck(Bottleneck): - """ - ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe - implementation and uses `stride=stride` in `conv1` and not in `conv2` - (the latter is used in the torchvision implementation of ResNet). - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None): - super(SEResNetBottleneck, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, bias=False, stride=stride) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNeXtBottleneck(Bottleneck): - """ - ResNeXt bottleneck type C with a Squeeze-and-Excitation module. - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None, base_width=4): - super(SEResNeXtBottleneck, self).__init__() - width = math.floor(planes * (base_width / 64)) * groups - self.conv1 = nn.Conv2d( - inplanes, width, kernel_size=1, bias=False, stride=1) - self.bn1 = nn.BatchNorm2d(width) - self.conv2 = nn.Conv2d( - width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(width) - self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNetBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): - super(SEResNetBlock, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes, reduction=reduction) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - if self.downsample is not None: - shortcut = self.downsample(x) - - out = self.se_module(out) + shortcut - out = self.relu(out) - - return out - - -class SENet(nn.Module): - - def __init__(self, block, layers, groups, reduction, drop_rate=0.2, - in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, - downsample_padding=0, num_classes=1000, global_pool='avg'): - """ - Parameters - ---------- - block (nn.Module): Bottleneck class. - - For SENet154: SEBottleneck - - For SE-ResNet models: SEResNetBottleneck - - For SE-ResNeXt models: SEResNeXtBottleneck - layers (list of ints): Number of residual blocks for 4 layers of the - network (layer1...layer4). - groups (int): Number of groups for the 3x3 convolution in each - bottleneck block. - - For SENet154: 64 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 32 - reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - - For all models: 16 - dropout_p (float or None): Drop probability for the Dropout layer. - If `None` the Dropout layer is not used. - - For SENet154: 0.2 - - For SE-ResNet models: None - - For SE-ResNeXt models: None - inplanes (int): Number of input channels for layer1. - - For SENet154: 128 - - For SE-ResNet models: 64 - - For SE-ResNeXt models: 64 - input_3x3 (bool): If `True`, use three 3x3 convolutions instead of - a single 7x7 convolution in layer0. - - For SENet154: True - - For SE-ResNet models: False - - For SE-ResNeXt models: False - downsample_kernel_size (int): Kernel size for downsampling convolutions - in layer2, layer3 and layer4. - - For SENet154: 3 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 1 - downsample_padding (int): Padding for downsampling convolutions in - layer2, layer3 and layer4. - - For SENet154: 1 - - For SE-ResNet models: 0 - - For SE-ResNeXt models: 0 - num_classes (int): Number of outputs in `last_linear` layer. - - For all models: 1000 - """ - super(SENet, self).__init__() - self.inplanes = inplanes - self.num_classes = num_classes - self.drop_rate = drop_rate - if input_3x3: - layer0_modules = [ - ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), - ('bn1', nn.BatchNorm2d(64)), - ('relu1', nn.ReLU(inplace=True)), - ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), - ('bn2', nn.BatchNorm2d(64)), - ('relu2', nn.ReLU(inplace=True)), - ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), - ('bn3', nn.BatchNorm2d(inplanes)), - ('relu3', nn.ReLU(inplace=True)), - ] - else: - layer0_modules = [ - ('conv1', nn.Conv2d( - in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), - ('bn1', nn.BatchNorm2d(inplanes)), - ('relu1', nn.ReLU(inplace=True)), - ] - self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) - # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. - self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) - self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] - self.layer1 = self._make_layer( - block, - planes=64, - blocks=layers[0], - groups=groups, - reduction=reduction, - downsample_kernel_size=1, - downsample_padding=0 - ) - self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] - self.layer2 = self._make_layer( - block, - planes=128, - blocks=layers[1], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] - self.layer3 = self._make_layer( - block, - planes=256, - blocks=layers[2], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] - self.layer4 = self._make_layer( - block, - planes=512, - blocks=layers[3], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] - self.num_features = 512 * block.expansion - self.global_pool, self.last_linear = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool) - - for m in self.modules(): - _weight_init(m) - - def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, - downsample_kernel_size=1, downsample_padding=0): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, - stride=stride, padding=downsample_padding, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups, reduction)) - - return nn.Sequential(*layers) - - def get_classifier(self): - return self.last_linear - - def reset_classifier(self, num_classes, global_pool='avg'): - self.num_classes = num_classes - self.global_pool, self.last_linear = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool) - - def forward_features(self, x): - x = self.layer0(x) - x = self.pool0(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def logits(self, x): - x = self.global_pool(x) - if self.drop_rate > 0.: - x = F.dropout(x, p=self.drop_rate, training=self.training) - x = self.last_linear(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.logits(x) - return x - - -def _create_senet(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - SENet, variant, pretrained, - default_cfg=default_cfgs[variant], - **kwargs) - - -@register_model -def legacy_seresnet18(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet18', pretrained, **model_args) - - -@register_model -def legacy_seresnet34(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet34', pretrained, **model_args) - - -@register_model -def legacy_seresnet50(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet50', pretrained, **model_args) - - -@register_model -def legacy_seresnet101(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet101', pretrained, **model_args) - - -@register_model -def legacy_seresnet152(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet152', pretrained, **model_args) - - -@register_model -def legacy_senet154(pretrained=False, **kwargs): - model_args = dict( - block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, - downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) - return _create_senet('legacy_senet154', pretrained, **model_args) - - -@register_model -def legacy_seresnext26_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) - - -@register_model -def legacy_seresnext50_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) - - -@register_model -def legacy_seresnext101_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/visualizers/base.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/visualizers/base.py deleted file mode 100644 index 675f01682ddf5e31b6cc341735378c6f3b242e49..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/visualizers/base.py +++ /dev/null @@ -1,73 +0,0 @@ -import abc -from typing import Dict, List - -import numpy as np -import torch -from skimage import color -from skimage.segmentation import mark_boundaries - -from . import colors - -COLORS, _ = colors.generate_colors(151) # 151 - max classes for semantic segmentation - - -class BaseVisualizer: - @abc.abstractmethod - def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): - """ - Take a batch, make an image from it and visualize - """ - raise NotImplementedError() - - -def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str], - last_without_mask=True, rescale_keys=None, mask_only_first=None, - black_mask=False) -> np.ndarray: - mask = images_dict['mask'] > 0.5 - result = [] - for i, k in enumerate(keys): - img = images_dict[k] - img = np.transpose(img, (1, 2, 0)) - - if rescale_keys is not None and k in rescale_keys: - img = img - img.min() - img /= img.max() + 1e-5 - if len(img.shape) == 2: - img = np.expand_dims(img, 2) - - if img.shape[2] == 1: - img = np.repeat(img, 3, axis=2) - elif (img.shape[2] > 3): - img_classes = img.argmax(2) - img = color.label2rgb(img_classes, colors=COLORS) - - if mask_only_first: - need_mark_boundaries = i == 0 - else: - need_mark_boundaries = i < len(keys) - 1 or not last_without_mask - - if need_mark_boundaries: - if black_mask: - img = img * (1 - mask[0][..., None]) - img = mark_boundaries(img, - mask[0], - color=(1., 0., 0.), - outline_color=(1., 1., 1.), - mode='thick') - result.append(img) - return np.concatenate(result, axis=1) - - -def visualize_mask_and_images_batch(batch: Dict[str, torch.Tensor], keys: List[str], max_items=10, - last_without_mask=True, rescale_keys=None) -> np.ndarray: - batch = {k: tens.detach().cpu().numpy() for k, tens in batch.items() - if k in keys or k == 'mask'} - - batch_size = next(iter(batch.values())).shape[0] - items_to_vis = min(batch_size, max_items) - result = [] - for i in range(items_to_vis): - cur_dct = {k: tens[i] for k, tens in batch.items()} - result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask, - rescale_keys=rescale_keys)) - return np.concatenate(result, axis=0) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/pix2pix/models/base_model.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/pix2pix/models/base_model.py deleted file mode 100644 index a90c5f832404bc44ef247b42a72988a37fc834cb..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/pix2pix/models/base_model.py +++ /dev/null @@ -1,241 +0,0 @@ -import os -import torch, gc -from modules import devices -from collections import OrderedDict -from abc import ABC, abstractmethod -from . import networks - - -class BaseModel(ABC): - """This class is an abstract base class (ABC) for models. - To create a subclass, you need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate losses, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - """ - - def __init__(self, opt): - """Initialize the BaseModel class. - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - - When creating your custom class, you need to implement your own initialization. - In this function, you should first call - Then, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): define networks used in our training. - -- self.visual_names (str list): specify the images that you want to display and save. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - """ - self.opt = opt - self.gpu_ids = opt.gpu_ids - self.isTrain = opt.isTrain - self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU - self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir - if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. - torch.backends.cudnn.benchmark = True - self.loss_names = [] - self.model_names = [] - self.visual_names = [] - self.optimizers = [] - self.image_paths = [] - self.metric = 0 # used for learning rate policy 'plateau' - - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new model-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - return parser - - @abstractmethod - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input (dict): includes the data itself and its metadata information. - """ - pass - - @abstractmethod - def forward(self): - """Run forward pass; called by both functions and .""" - pass - - @abstractmethod - def optimize_parameters(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - pass - - def setup(self, opt): - """Load and print networks; create schedulers - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - if self.isTrain: - self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] - if not self.isTrain or opt.continue_train: - load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch - self.load_networks(load_suffix) - self.print_networks(opt.verbose) - - def eval(self): - """Make models eval mode during test time""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, 'net' + name) - net.eval() - - def test(self): - """Forward function used in test time. - - This function wraps function in no_grad() so we don't save intermediate steps for backprop - It also calls to produce additional visualization results - """ - with torch.no_grad(): - self.forward() - self.compute_visuals() - - def compute_visuals(self): - """Calculate additional output images for visdom and HTML visualization""" - pass - - def get_image_paths(self): - """ Return image paths that are used to load current data""" - return self.image_paths - - def update_learning_rate(self): - """Update learning rates for all the networks; called at the end of every epoch""" - old_lr = self.optimizers[0].param_groups[0]['lr'] - for scheduler in self.schedulers: - if self.opt.lr_policy == 'plateau': - scheduler.step(self.metric) - else: - scheduler.step() - - lr = self.optimizers[0].param_groups[0]['lr'] - print('learning rate %.7f -> %.7f' % (old_lr, lr)) - - def get_current_visuals(self): - """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" - visual_ret = OrderedDict() - for name in self.visual_names: - if isinstance(name, str): - visual_ret[name] = getattr(self, name) - return visual_ret - - def get_current_losses(self): - """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" - errors_ret = OrderedDict() - for name in self.loss_names: - if isinstance(name, str): - errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number - return errors_ret - - def save_networks(self, epoch): - """Save all the networks to the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - for name in self.model_names: - if isinstance(name, str): - save_filename = '%s_net_%s.pth' % (epoch, name) - save_path = os.path.join(self.save_dir, save_filename) - net = getattr(self, 'net' + name) - - if len(self.gpu_ids) > 0 and torch.cuda.is_available(): - torch.save(net.module.cpu().state_dict(), save_path) - net.cuda(self.gpu_ids[0]) - else: - torch.save(net.cpu().state_dict(), save_path) - - def unload_network(self, name): - """Unload network and gc. - """ - if isinstance(name, str): - net = getattr(self, 'net' + name) - del net - gc.collect() - devices.torch_gc() - return None - - def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): - """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" - key = keys[i] - if i + 1 == len(keys): # at the end, pointing to a parameter/buffer - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'running_mean' or key == 'running_var'): - if getattr(module, key) is None: - state_dict.pop('.'.join(keys)) - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'num_batches_tracked'): - state_dict.pop('.'.join(keys)) - else: - self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) - - def load_networks(self, epoch): - """Load all the networks from the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - for name in self.model_names: - if isinstance(name, str): - load_filename = '%s_net_%s.pth' % (epoch, name) - load_path = os.path.join(self.save_dir, load_filename) - net = getattr(self, 'net' + name) - if isinstance(net, torch.nn.DataParallel): - net = net.module - # print('Loading depth boost model from %s' % load_path) - # if you are using PyTorch newer than 0.4 (e.g., built from - # GitHub source), you can remove str() on self.device - state_dict = torch.load(load_path, map_location=str(self.device)) - if hasattr(state_dict, '_metadata'): - del state_dict._metadata - - # patch InstanceNorm checkpoints prior to 0.4 - for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop - self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) - net.load_state_dict(state_dict) - - def print_networks(self, verbose): - """Print the total number of parameters in the network and (if verbose) network architecture - - Parameters: - verbose (bool) -- if verbose: print the network architecture - """ - print('---------- Networks initialized -------------') - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, 'net' + name) - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - if verbose: - print(net) - print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) - print('-----------------------------------------------') - - def set_requires_grad(self, nets, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary computations - Parameters: - nets (network list) -- a list of networks - requires_grad (bool) -- whether the networks require gradients or not - """ - if not isinstance(nets, list): - nets = [nets] - for net in nets: - if net is not None: - for param in net.parameters(): - param.requires_grad = requires_grad diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py deleted file mode 100644 index 689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/ddad.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/ddad.py deleted file mode 100644 index 4bd0492bdec767685d3a21992b4a26e62d002d97..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/ddad.py +++ /dev/null @@ -1,117 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import os - -import numpy as np -import torch -from PIL import Image -from torch.utils.data import DataLoader, Dataset -from torchvision import transforms - - -class ToTensor(object): - def __init__(self, resize_shape): - # self.normalize = transforms.Normalize( - # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.normalize = lambda x : x - self.resize = transforms.Resize(resize_shape) - - def __call__(self, sample): - image, depth = sample['image'], sample['depth'] - image = self.to_tensor(image) - image = self.normalize(image) - depth = self.to_tensor(depth) - - image = self.resize(image) - - return {'image': image, 'depth': depth, 'dataset': "ddad"} - - def to_tensor(self, pic): - - if isinstance(pic, np.ndarray): - img = torch.from_numpy(pic.transpose((2, 0, 1))) - return img - - # # handle PIL Image - if pic.mode == 'I': - img = torch.from_numpy(np.array(pic, np.int32, copy=False)) - elif pic.mode == 'I;16': - img = torch.from_numpy(np.array(pic, np.int16, copy=False)) - else: - img = torch.ByteTensor( - torch.ByteStorage.from_buffer(pic.tobytes())) - # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK - if pic.mode == 'YCbCr': - nchannel = 3 - elif pic.mode == 'I;16': - nchannel = 1 - else: - nchannel = len(pic.mode) - img = img.view(pic.size[1], pic.size[0], nchannel) - - img = img.transpose(0, 1).transpose(0, 2).contiguous() - - if isinstance(img, torch.ByteTensor): - return img.float() - else: - return img - - -class DDAD(Dataset): - def __init__(self, data_dir_root, resize_shape): - import glob - - # image paths are of the form /{outleft, depthmap}/*.png - self.image_files = glob.glob(os.path.join(data_dir_root, '*.png')) - self.depth_files = [r.replace("_rgb.png", "_depth.npy") - for r in self.image_files] - self.transform = ToTensor(resize_shape) - - def __getitem__(self, idx): - - image_path = self.image_files[idx] - depth_path = self.depth_files[idx] - - image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 - depth = np.load(depth_path) # meters - - # depth[depth > 8] = -1 - depth = depth[..., None] - - sample = dict(image=image, depth=depth) - sample = self.transform(sample) - - if idx == 0: - print(sample["image"].shape) - - return sample - - def __len__(self): - return len(self.image_files) - - -def get_ddad_loader(data_dir_root, resize_shape, batch_size=1, **kwargs): - dataset = DDAD(data_dir_root, resize_shape) - return DataLoader(dataset, batch_size, **kwargs) diff --git a/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/apps/__init__.py b/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/apps/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cownclown/Image-and-3D-Model-Creator/README.md b/spaces/cownclown/Image-and-3D-Model-Creator/README.md deleted file mode 100644 index 90f87bc413a5dd0d4039bcfd087c37e851304f98..0000000000000000000000000000000000000000 --- a/spaces/cownclown/Image-and-3D-Model-Creator/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Image and 3D Model Creator -emoji: "⭐\_" -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.0.2 -app_file: ./PIFu/spaces.py -pinned: false -python_version: 3.7.13 -duplicated_from: F4RF4R4/Image-and-3D-Model-Creator ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/crytion/DeepNude/run.py b/spaces/crytion/DeepNude/run.py deleted file mode 100644 index aeb7b2bdd8ea654de1d3c017e1ad83c0568a1e17..0000000000000000000000000000000000000000 --- a/spaces/crytion/DeepNude/run.py +++ /dev/null @@ -1,150 +0,0 @@ -import cv2 - -#Import Neural Network Model -from gan import DataLoader, DeepModel, tensor2im - -#OpenCv Transform: -from opencv_transform.mask_to_maskref import create_maskref -from opencv_transform.maskdet_to_maskfin import create_maskfin -from opencv_transform.dress_to_correct import create_correct -from opencv_transform.nude_to_watermark import create_watermark - -""" -run.py - -This script manage the entire transormation. - -Transformation happens in 6 phases: - 0: dress -> correct [opencv] dress_to_correct - 1: correct -> mask: [GAN] correct_to_mask - 2: mask -> maskref [opencv] mask_to_maskref - 3: maskref -> maskdet [GAN] maskref_to_maskdet - 4: maskdet -> maskfin [opencv] maskdet_to_maskfin - 5: maskfin -> nude [GAN] maskfin_to_nude - 6: nude -> watermark [opencv] nude_to_watermark - -""" - -phases = ["dress_to_correct", "correct_to_mask", "mask_to_maskref", "maskref_to_maskdet", "maskdet_to_maskfin", "maskfin_to_nude", "nude_to_watermark"] - -class Options(): - - #Init options with default values - def __init__(self): - - # experiment specifics - self.norm = 'batch' #instance normalization or batch normalization - self.use_dropout = False #use dropout for the generator - self.data_type = 32 #Supported data type i.e. 8, 16, 32 bit - - # input/output sizes - self.batchSize = 1 #input batch size - self.input_nc = 3 # of input image channels - self.output_nc = 3 # of output image channels - - # for setting inputs - self.serial_batches = True #if true, takes images in order to make batches, otherwise takes them randomly - self.nThreads = 1 ## threads for loading data (???) - self.max_dataset_size = 1 #Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded. - - # for generator - self.netG = 'global' #selects model to use for netG - self.ngf = 64 ## of gen filters in first conv layer - self.n_downsample_global = 4 #number of downsampling layers in netG - self.n_blocks_global = 9 #number of residual blocks in the global generator network - self.n_blocks_local = 0 #number of residual blocks in the local enhancer network - self.n_local_enhancers = 0 #number of local enhancers to use - self.niter_fix_global = 0 #number of epochs that we only train the outmost local enhancer - - #Phase specific options - self.checkpoints_dir = "" - self.dataroot = "" - - #Changes options accordlying to actual phase - def updateOptions(self, phase): - - if phase == "correct_to_mask": - self.checkpoints_dir = "checkpoints/cm.lib" - - elif phase == "maskref_to_maskdet": - self.checkpoints_dir = "checkpoints/mm.lib" - - elif phase == "maskfin_to_nude": - self.checkpoints_dir = "checkpoints/mn.lib" - -# process(cv_img, mode) -# return: -# watermark image -def process(cv_img): - - #InMemory cv2 images: - dress = cv_img - correct = None - mask = None - maskref = None - maskfin = None - maskdet = None - nude = None - watermark = None - - for index, phase in enumerate(phases): - - print("Executing phase: " + phase) - - #GAN phases: - if (phase == "correct_to_mask") or (phase == "maskref_to_maskdet") or (phase == "maskfin_to_nude"): - - #Load global option - opt = Options() - - #Load custom phase options: - opt.updateOptions(phase) - - #Load Data - if (phase == "correct_to_mask"): - data_loader = DataLoader(opt, correct) - elif (phase == "maskref_to_maskdet"): - data_loader = DataLoader(opt, maskref) - elif (phase == "maskfin_to_nude"): - data_loader = DataLoader(opt, maskfin) - - dataset = data_loader.load_data() - - #Create Model - model = DeepModel() - model.initialize(opt) - - #Run for every image: - for i, data in enumerate(dataset): - - generated = model.inference(data['label'], data['inst']) - - im = tensor2im(generated.data[0]) - - #Save Data - if (phase == "correct_to_mask"): - mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - - elif (phase == "maskref_to_maskdet"): - maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - - elif (phase == "maskfin_to_nude"): - nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - - #Correcting: - elif (phase == 'dress_to_correct'): - correct = create_correct(dress) - - #mask_ref phase (opencv) - elif (phase == "mask_to_maskref"): - maskref = create_maskref(mask, correct) - - #mask_fin phase (opencv) - elif (phase == "maskdet_to_maskfin"): - maskfin = create_maskfin(maskref, maskdet) - - #nude_to_watermark phase (opencv) - elif (phase == "nude_to_watermark"): - watermark = create_watermark(nude) - - return watermark \ No newline at end of file diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/__init__.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/__init__.py deleted file mode 100644 index cfb1e4d7bb221c429082bd389d9140e5b1cc07b0..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -import importlib -from copy import deepcopy -from os import path as osp - -from basicsr.utils import get_root_logger, scandir -from basicsr.utils.registry import ARCH_REGISTRY - -__all__ = ['build_network'] - -# automatically scan and import arch modules for registry -# scan all the files under the 'archs' folder and collect files ending with -# '_arch.py' -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames] - - -def build_network(opt): - opt = deepcopy(opt) - network_type = opt.pop('type') - net = ARCH_REGISTRY.get(network_type)(**opt) - logger = get_root_logger() - logger.info(f'Network [{net.__class__.__name__}] is created.') - return net diff --git a/spaces/cscan/vocal_remover/inference.py b/spaces/cscan/vocal_remover/inference.py deleted file mode 100644 index 156630618307c60983b7128f7828ce1643277430..0000000000000000000000000000000000000000 --- a/spaces/cscan/vocal_remover/inference.py +++ /dev/null @@ -1,181 +0,0 @@ -import argparse -import os - -import librosa -import numpy as np -import soundfile as sf -import torch -from tqdm import tqdm - -from lib import dataset -from lib import nets -from lib import spec_utils -from lib import utils - - -class Separator(object): - - def __init__(self, model, device, batchsize, cropsize, postprocess=False): - self.model = model - self.offset = model.offset - self.device = device - self.batchsize = batchsize - self.cropsize = cropsize - self.postprocess = postprocess - - def _separate(self, X_mag_pad, roi_size): - X_dataset = [] - patches = (X_mag_pad.shape[2] - 2 * self.offset) // roi_size - for i in range(patches): - start = i * roi_size - X_mag_crop = X_mag_pad[:, :, start:start + self.cropsize] - X_dataset.append(X_mag_crop) - - X_dataset = np.asarray(X_dataset) - - self.model.eval() - with torch.no_grad(): - mask = [] - # To reduce the overhead, dataloader is not used. - for i in tqdm(range(0, patches, self.batchsize)): - X_batch = X_dataset[i: i + self.batchsize] - X_batch = torch.from_numpy(X_batch).to(self.device) - - pred = self.model.predict_mask(X_batch) - - pred = pred.detach().cpu().numpy() - pred = np.concatenate(pred, axis=2) - mask.append(pred) - - mask = np.concatenate(mask, axis=2) - - return mask - - def _preprocess(self, X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - def _postprocess(self, mask, X_mag, X_phase): - if self.postprocess: - mask = spec_utils.merge_artifacts(mask) - - y_spec = mask * X_mag * np.exp(1.j * X_phase) - v_spec = (1 - mask) * X_mag * np.exp(1.j * X_phase) - - return y_spec, v_spec - - def separate(self, X_spec): - X_mag, X_phase = self._preprocess(X_spec) - - n_frame = X_mag.shape[2] - pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.cropsize, self.offset) - X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') - X_mag_pad /= X_mag_pad.max() - - mask = self._separate(X_mag_pad, roi_size) - mask = mask[:, :, :n_frame] - - y_spec, v_spec = self._postprocess(mask, X_mag, X_phase) - - return y_spec, v_spec - - def separate_tta(self, X_spec): - X_mag, X_phase = self._preprocess(X_spec) - - n_frame = X_mag.shape[2] - pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.cropsize, self.offset) - X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') - X_mag_pad /= X_mag_pad.max() - - mask = self._separate(X_mag_pad, roi_size) - - pad_l += roi_size // 2 - pad_r += roi_size // 2 - X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') - X_mag_pad /= X_mag_pad.max() - - mask_tta = self._separate(X_mag_pad, roi_size) - mask_tta = mask_tta[:, :, roi_size // 2:] - mask = (mask[:, :, :n_frame] + mask_tta[:, :, :n_frame]) * 0.5 - - y_spec, v_spec = self._postprocess(mask, X_mag, X_phase) - - return y_spec, v_spec - - -def main(): - p = argparse.ArgumentParser() - p.add_argument('--gpu', '-g', type=int, default=-1) - p.add_argument('--pretrained_model', '-P', type=str, default='models/baseline.pth') - p.add_argument('--input', '-i', required=True) - p.add_argument('--sr', '-r', type=int, default=44100) - p.add_argument('--n_fft', '-f', type=int, default=2048) - p.add_argument('--hop_length', '-H', type=int, default=1024) - p.add_argument('--batchsize', '-B', type=int, default=4) - p.add_argument('--cropsize', '-c', type=int, default=256) - p.add_argument('--output_image', '-I', action='store_true') - p.add_argument('--postprocess', '-p', action='store_true') - p.add_argument('--tta', '-t', action='store_true') - p.add_argument('--output_dir', '-o', type=str, default="") - args = p.parse_args() - - print('loading model...', end=' ') - device = torch.device('cpu') - model = nets.CascadedNet(args.n_fft, 32, 128) - model.load_state_dict(torch.load(args.pretrained_model, map_location=device)) - if torch.cuda.is_available() and args.gpu >= 0: - device = torch.device('cuda:{}'.format(args.gpu)) - model.to(device) - print('done') - - print('loading wave source...', end=' ') - X, sr = librosa.load( - args.input, args.sr, False, dtype=np.float32, res_type='kaiser_fast') - basename = os.path.splitext(os.path.basename(args.input))[0] - print('done') - - if X.ndim == 1: - # mono to stereo - X = np.asarray([X, X]) - - print('stft of wave source...', end=' ') - X_spec = spec_utils.wave_to_spectrogram(X, args.hop_length, args.n_fft) - print('done') - - sp = Separator(model, device, args.batchsize, args.cropsize, args.postprocess) - - if args.tta: - y_spec, v_spec = sp.separate_tta(X_spec) - else: - y_spec, v_spec = sp.separate(X_spec) - - print('validating output directory...', end=' ') - output_dir = args.output_dir - if output_dir != "": # modifies output_dir if theres an arg specified - output_dir = output_dir.rstrip('/') + '/' - os.makedirs(output_dir, exist_ok=True) - print('done') - - print('inverse stft of instruments...', end=' ') - wave = spec_utils.spectrogram_to_wave(y_spec, hop_length=args.hop_length) - print('done') - # sf.write('{}{}_Instruments.wav'.format(output_dir, basename), wave.T, sr) - sf.write('{}Instruments.wav'.format(output_dir), wave.T, sr) - - print('inverse stft of vocals...', end=' ') - wave = spec_utils.spectrogram_to_wave(v_spec, hop_length=args.hop_length) - print('done') - sf.write('{}{}_Vocals.wav'.format(output_dir, basename), wave.T, sr) - - if args.output_image: - image = spec_utils.spectrogram_to_image(y_spec) - utils.imwrite('{}{}_Instruments.jpg'.format(output_dir, basename), image) - - image = spec_utils.spectrogram_to_image(v_spec) - utils.imwrite('{}{}_Vocals.jpg'.format(output_dir, basename), image) - - -if __name__ == '__main__': - main() diff --git a/spaces/dachenchen/HiWantJoin/modules/__init__.py b/spaces/dachenchen/HiWantJoin/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/utils/hparams.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/utils/hparams.py deleted file mode 100644 index 743c5c7d5a5a9e686f1ccd6fb3c2fb5cb382d62b..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/utils/hparams.py +++ /dev/null @@ -1,160 +0,0 @@ -from glob import glob -import os - -class HParams: - def __init__(self, **kwargs): - self.data = {} - - for key, value in kwargs.items(): - self.data[key] = value - - def __getattr__(self, key): - if key not in self.data: - raise AttributeError("'HParams' object has no attribute %s" % key) - return self.data[key] - - def set_hparam(self, key, value): - self.data[key] = value - - -# Default hyperparameters -hparams = HParams( - num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality - # network - rescale=True, # Whether to rescale audio prior to preprocessing - rescaling_max=0.9, # Rescaling value - - # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction - # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder - # Does not work if n_ffit is not multiple of hop_size!! - use_lws=False, - - n_fft=800, # Extra window size is filled with 0 paddings to match this parameter - hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate) - win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate) - sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i ) - - frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5) - - # Mel and Linear spectrograms normalization/scaling and clipping - signal_normalization=True, - # Whether to normalize mel spectrograms to some predefined range (following below parameters) - allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True - symmetric_mels=True, - # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, - # faster and cleaner convergence) - max_abs_value=4., - # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not - # be too big to avoid gradient explosion, - # not too small for fast convergence) - # Contribution by @begeekmyfriend - # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude - # levels. Also allows for better G&L phase reconstruction) - preemphasize=True, # whether to apply filter - preemphasis=0.97, # filter coefficient. - - # Limits - min_level_db=-100, - ref_level_db=20, - fmin=55, - # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To - # test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) - fmax=7600, # To be increased/reduced depending on data. - - ###################### Our training parameters ################################# - img_size=96, - fps=25, - - batch_size=16, - initial_learning_rate=1e-4, - nepochs=300000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs - num_workers=20, - checkpoint_interval=3000, - eval_interval=3000, - writer_interval=300, - save_optimizer_state=True, - - syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. - syncnet_batch_size=64, - syncnet_lr=1e-4, - syncnet_eval_interval=1000, - syncnet_checkpoint_interval=10000, - - disc_wt=0.07, - disc_initial_learning_rate=1e-4, -) - - - -# Default hyperparameters -hparamsdebug = HParams( - num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality - # network - rescale=True, # Whether to rescale audio prior to preprocessing - rescaling_max=0.9, # Rescaling value - - # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction - # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder - # Does not work if n_ffit is not multiple of hop_size!! - use_lws=False, - - n_fft=800, # Extra window size is filled with 0 paddings to match this parameter - hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate) - win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate) - sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i ) - - frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5) - - # Mel and Linear spectrograms normalization/scaling and clipping - signal_normalization=True, - # Whether to normalize mel spectrograms to some predefined range (following below parameters) - allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True - symmetric_mels=True, - # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, - # faster and cleaner convergence) - max_abs_value=4., - # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not - # be too big to avoid gradient explosion, - # not too small for fast convergence) - # Contribution by @begeekmyfriend - # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude - # levels. Also allows for better G&L phase reconstruction) - preemphasize=True, # whether to apply filter - preemphasis=0.97, # filter coefficient. - - # Limits - min_level_db=-100, - ref_level_db=20, - fmin=55, - # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To - # test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) - fmax=7600, # To be increased/reduced depending on data. - - ###################### Our training parameters ################################# - img_size=96, - fps=25, - - batch_size=2, - initial_learning_rate=1e-3, - nepochs=100000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs - num_workers=0, - checkpoint_interval=10000, - eval_interval=10, - writer_interval=5, - save_optimizer_state=True, - - syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. - syncnet_batch_size=64, - syncnet_lr=1e-4, - syncnet_eval_interval=10000, - syncnet_checkpoint_interval=10000, - - disc_wt=0.07, - disc_initial_learning_rate=1e-4, -) - - -def hparams_debug_string(): - values = hparams.values() - hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"] - return "Hyperparameters:\n" + "\n".join(hp) diff --git a/spaces/dariusstone7/PFE/README.md b/spaces/dariusstone7/PFE/README.md deleted file mode 100644 index 26308b32c07aa6c8e4d5747c997d8c8f683b0441..0000000000000000000000000000000000000000 --- a/spaces/dariusstone7/PFE/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PFE -emoji: 🐢 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_P_O_S_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_P_O_S_.py deleted file mode 100644 index ca8290bab440e31196dd009c5125e022a079d7af..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_P_O_S_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_G_P_O_S_(BaseTTXConverter): - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_V_G_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_V_G_.py deleted file mode 100644 index ebc2befdfe8540b3fdd6fa19002d708992787f5f..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_V_G_.py +++ /dev/null @@ -1,215 +0,0 @@ -"""Compiles/decompiles SVG table. - -https://docs.microsoft.com/en-us/typography/opentype/spec/svg - -The XML format is: - -.. code-block:: xml - - - - <complete SVG doc> ]] - </svgDoc> - ... - <svgDoc endGlyphID="n" startGlyphID="m"> - <![CDATA[ <complete SVG doc> ]] - </svgDoc> - </SVG> -""" - -from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr -from fontTools.misc import sstruct -from . import DefaultTable -from collections.abc import Sequence -from dataclasses import dataclass, astuple -from io import BytesIO -import struct -import logging - - -log = logging.getLogger(__name__) - - -SVG_format_0 = """ - > # big endian - version: H - offsetToSVGDocIndex: L - reserved: L -""" - -SVG_format_0Size = sstruct.calcsize(SVG_format_0) - -doc_index_entry_format_0 = """ - > # big endian - startGlyphID: H - endGlyphID: H - svgDocOffset: L - svgDocLength: L -""" - -doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) - - -class table_S_V_G_(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - self.docList = [] - # Version 0 is the standardized version of the table; and current. - # https://www.microsoft.com/typography/otspec/svg.htm - sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self) - if self.version != 0: - log.warning( - "Unknown SVG table version '%s'. Decompiling as version 0.", - self.version, - ) - # read in SVG Documents Index - # data starts with the first entry of the entry list. - pos = subTableStart = self.offsetToSVGDocIndex - self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0] - pos += 2 - if self.numEntries > 0: - data2 = data[pos:] - entries = [] - for i in range(self.numEntries): - record_data = data2[ - i - * doc_index_entry_format_0Size : (i + 1) - * doc_index_entry_format_0Size - ] - docIndexEntry = sstruct.unpack( - doc_index_entry_format_0, record_data, DocumentIndexEntry() - ) - entries.append(docIndexEntry) - - for entry in entries: - start = entry.svgDocOffset + subTableStart - end = start + entry.svgDocLength - doc = data[start:end] - compressed = False - if doc.startswith(b"\x1f\x8b"): - import gzip - - bytesIO = BytesIO(doc) - with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: - doc = gunzipper.read() - del bytesIO - compressed = True - doc = tostr(doc, "utf_8") - self.docList.append( - SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed) - ) - - def compile(self, ttFont): - version = 0 - offsetToSVGDocIndex = ( - SVG_format_0Size # I start the SVGDocIndex right after the header. - ) - # get SGVDoc info. - docList = [] - entryList = [] - numEntries = len(self.docList) - datum = struct.pack(">H", numEntries) - entryList.append(datum) - curOffset = len(datum) + doc_index_entry_format_0Size * numEntries - seenDocs = {} - allCompressed = getattr(self, "compressed", False) - for i, doc in enumerate(self.docList): - if isinstance(doc, (list, tuple)): - doc = SVGDocument(*doc) - self.docList[i] = doc - docBytes = tobytes(doc.data, encoding="utf_8") - if (allCompressed or doc.compressed) and not docBytes.startswith( - b"\x1f\x8b" - ): - import gzip - - bytesIO = BytesIO() - # mtime=0 strips the useless timestamp and makes gzip output reproducible; - # equivalent to `gzip -n` - with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper: - gzipper.write(docBytes) - gzipped = bytesIO.getvalue() - if len(gzipped) < len(docBytes): - docBytes = gzipped - del gzipped, bytesIO - docLength = len(docBytes) - if docBytes in seenDocs: - docOffset = seenDocs[docBytes] - else: - docOffset = curOffset - curOffset += docLength - seenDocs[docBytes] = docOffset - docList.append(docBytes) - entry = struct.pack( - ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength - ) - entryList.append(entry) - entryList.extend(docList) - svgDocData = bytesjoin(entryList) - - reserved = 0 - header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved) - data = [header, svgDocData] - data = bytesjoin(data) - return data - - def toXML(self, writer, ttFont): - for i, doc in enumerate(self.docList): - if isinstance(doc, (list, tuple)): - doc = SVGDocument(*doc) - self.docList[i] = doc - attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID} - if doc.compressed: - attrs["compressed"] = 1 - writer.begintag("svgDoc", **attrs) - writer.newline() - writer.writecdata(doc.data) - writer.newline() - writer.endtag("svgDoc") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "svgDoc": - if not hasattr(self, "docList"): - self.docList = [] - doc = strjoin(content) - doc = doc.strip() - startGID = int(attrs["startGlyphID"]) - endGID = int(attrs["endGlyphID"]) - compressed = bool(safeEval(attrs.get("compressed", "0"))) - self.docList.append(SVGDocument(doc, startGID, endGID, compressed)) - else: - log.warning("Unknown %s %s", name, content) - - -class DocumentIndexEntry(object): - def __init__(self): - self.startGlyphID = None # USHORT - self.endGlyphID = None # USHORT - self.svgDocOffset = None # ULONG - self.svgDocLength = None # ULONG - - def __repr__(self): - return ( - "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" - % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) - ) - - -@dataclass -class SVGDocument(Sequence): - data: str - startGlyphID: int - endGlyphID: int - compressed: bool = False - - # Previously, the SVG table's docList attribute contained a lists of 3 items: - # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute. - # For backward compatibility with code that depends of them being sequences of - # fixed length=3, we subclass the Sequence abstract base class and pretend only - # the first three items are present. 'compressed' is only accessible via named - # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]` - def __getitem__(self, index): - return astuple(self)[:3][index] - - def __len__(self): - return 3 diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http2.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http2.py deleted file mode 100644 index d141d459a59d134beac3b2dffb17d17f29abcea4..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http2.py +++ /dev/null @@ -1,589 +0,0 @@ -import enum -import logging -import time -import types -import typing - -import h2.config -import h2.connection -import h2.events -import h2.exceptions -import h2.settings - -from .._backends.base import NetworkStream -from .._exceptions import ( - ConnectionNotAvailable, - LocalProtocolError, - RemoteProtocolError, -) -from .._models import Origin, Request, Response -from .._synchronization import Lock, Semaphore, ShieldCancellation -from .._trace import Trace -from .interfaces import ConnectionInterface - -logger = logging.getLogger("httpcore.http2") - - -def has_body_headers(request: Request) -> bool: - return any( - k.lower() == b"content-length" or k.lower() == b"transfer-encoding" - for k, v in request.headers - ) - - -class HTTPConnectionState(enum.IntEnum): - ACTIVE = 1 - IDLE = 2 - CLOSED = 3 - - -class HTTP2Connection(ConnectionInterface): - READ_NUM_BYTES = 64 * 1024 - CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) - - def __init__( - self, - origin: Origin, - stream: NetworkStream, - keepalive_expiry: typing.Optional[float] = None, - ): - self._origin = origin - self._network_stream = stream - self._keepalive_expiry: typing.Optional[float] = keepalive_expiry - self._h2_state = h2.connection.H2Connection(config=self.CONFIG) - self._state = HTTPConnectionState.IDLE - self._expire_at: typing.Optional[float] = None - self._request_count = 0 - self._init_lock = Lock() - self._state_lock = Lock() - self._read_lock = Lock() - self._write_lock = Lock() - self._sent_connection_init = False - self._used_all_stream_ids = False - self._connection_error = False - - # Mapping from stream ID to response stream events. - self._events: typing.Dict[ - int, - typing.Union[ - h2.events.ResponseReceived, - h2.events.DataReceived, - h2.events.StreamEnded, - h2.events.StreamReset, - ], - ] = {} - - # Connection terminated events are stored as state since - # we need to handle them for all streams. - self._connection_terminated: typing.Optional[ - h2.events.ConnectionTerminated - ] = None - - self._read_exception: typing.Optional[Exception] = None - self._write_exception: typing.Optional[Exception] = None - - def handle_request(self, request: Request) -> Response: - if not self.can_handle_request(request.url.origin): - # This cannot occur in normal operation, since the connection pool - # will only send requests on connections that handle them. - # It's in place simply for resilience as a guard against incorrect - # usage, for anyone working directly with httpcore connections. - raise RuntimeError( - f"Attempted to send request to {request.url.origin} on connection " - f"to {self._origin}" - ) - - with self._state_lock: - if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): - self._request_count += 1 - self._expire_at = None - self._state = HTTPConnectionState.ACTIVE - else: - raise ConnectionNotAvailable() - - with self._init_lock: - if not self._sent_connection_init: - try: - kwargs = {"request": request} - with Trace("send_connection_init", logger, request, kwargs): - self._send_connection_init(**kwargs) - except BaseException as exc: - with ShieldCancellation(): - self.close() - raise exc - - self._sent_connection_init = True - - # Initially start with just 1 until the remote server provides - # its max_concurrent_streams value - self._max_streams = 1 - - local_settings_max_streams = ( - self._h2_state.local_settings.max_concurrent_streams - ) - self._max_streams_semaphore = Semaphore(local_settings_max_streams) - - for _ in range(local_settings_max_streams - self._max_streams): - self._max_streams_semaphore.acquire() - - self._max_streams_semaphore.acquire() - - try: - stream_id = self._h2_state.get_next_available_stream_id() - self._events[stream_id] = [] - except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover - self._used_all_stream_ids = True - self._request_count -= 1 - raise ConnectionNotAvailable() - - try: - kwargs = {"request": request, "stream_id": stream_id} - with Trace("send_request_headers", logger, request, kwargs): - self._send_request_headers(request=request, stream_id=stream_id) - with Trace("send_request_body", logger, request, kwargs): - self._send_request_body(request=request, stream_id=stream_id) - with Trace( - "receive_response_headers", logger, request, kwargs - ) as trace: - status, headers = self._receive_response( - request=request, stream_id=stream_id - ) - trace.return_value = (status, headers) - - return Response( - status=status, - headers=headers, - content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), - extensions={ - "http_version": b"HTTP/2", - "network_stream": self._network_stream, - "stream_id": stream_id, - }, - ) - except BaseException as exc: # noqa: PIE786 - with ShieldCancellation(): - kwargs = {"stream_id": stream_id} - with Trace("response_closed", logger, request, kwargs): - self._response_closed(stream_id=stream_id) - - if isinstance(exc, h2.exceptions.ProtocolError): - # One case where h2 can raise a protocol error is when a - # closed frame has been seen by the state machine. - # - # This happens when one stream is reading, and encounters - # a GOAWAY event. Other flows of control may then raise - # a protocol error at any point they interact with the 'h2_state'. - # - # In this case we'll have stored the event, and should raise - # it as a RemoteProtocolError. - if self._connection_terminated: # pragma: nocover - raise RemoteProtocolError(self._connection_terminated) - # If h2 raises a protocol error in some other state then we - # must somehow have made a protocol violation. - raise LocalProtocolError(exc) # pragma: nocover - - raise exc - - def _send_connection_init(self, request: Request) -> None: - """ - The HTTP/2 connection requires some initial setup before we can start - using individual request/response streams on it. - """ - # Need to set these manually here instead of manipulating via - # __setitem__() otherwise the H2Connection will emit SettingsUpdate - # frames in addition to sending the undesired defaults. - self._h2_state.local_settings = h2.settings.Settings( - client=True, - initial_values={ - # Disable PUSH_PROMISE frames from the server since we don't do anything - # with them for now. Maybe when we support caching? - h2.settings.SettingCodes.ENABLE_PUSH: 0, - # These two are taken from h2 for safe defaults - h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, - h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, - }, - ) - - # Some websites (*cough* Yahoo *cough*) balk at this setting being - # present in the initial handshake since it's not defined in the original - # RFC despite the RFC mandating ignoring settings you don't know about. - del self._h2_state.local_settings[ - h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL - ] - - self._h2_state.initiate_connection() - self._h2_state.increment_flow_control_window(2**24) - self._write_outgoing_data(request) - - # Sending the request... - - def _send_request_headers(self, request: Request, stream_id: int) -> None: - """ - Send the request headers to a given stream ID. - """ - end_stream = not has_body_headers(request) - - # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. - # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require - # HTTP/1.1 style headers, and map them appropriately if we end up on - # an HTTP/2 connection. - authority = [v for k, v in request.headers if k.lower() == b"host"][0] - - headers = [ - (b":method", request.method), - (b":authority", authority), - (b":scheme", request.url.scheme), - (b":path", request.url.target), - ] + [ - (k.lower(), v) - for k, v in request.headers - if k.lower() - not in ( - b"host", - b"transfer-encoding", - ) - ] - - self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) - self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) - self._write_outgoing_data(request) - - def _send_request_body(self, request: Request, stream_id: int) -> None: - """ - Iterate over the request body sending it to a given stream ID. - """ - if not has_body_headers(request): - return - - assert isinstance(request.stream, typing.Iterable) - for data in request.stream: - self._send_stream_data(request, stream_id, data) - self._send_end_stream(request, stream_id) - - def _send_stream_data( - self, request: Request, stream_id: int, data: bytes - ) -> None: - """ - Send a single chunk of data in one or more data frames. - """ - while data: - max_flow = self._wait_for_outgoing_flow(request, stream_id) - chunk_size = min(len(data), max_flow) - chunk, data = data[:chunk_size], data[chunk_size:] - self._h2_state.send_data(stream_id, chunk) - self._write_outgoing_data(request) - - def _send_end_stream(self, request: Request, stream_id: int) -> None: - """ - Send an empty data frame on on a given stream ID with the END_STREAM flag set. - """ - self._h2_state.end_stream(stream_id) - self._write_outgoing_data(request) - - # Receiving the response... - - def _receive_response( - self, request: Request, stream_id: int - ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: - """ - Return the response status code and headers for a given stream ID. - """ - while True: - event = self._receive_stream_event(request, stream_id) - if isinstance(event, h2.events.ResponseReceived): - break - - status_code = 200 - headers = [] - for k, v in event.headers: - if k == b":status": - status_code = int(v.decode("ascii", errors="ignore")) - elif not k.startswith(b":"): - headers.append((k, v)) - - return (status_code, headers) - - def _receive_response_body( - self, request: Request, stream_id: int - ) -> typing.Iterator[bytes]: - """ - Iterator that returns the bytes of the response body for a given stream ID. - """ - while True: - event = self._receive_stream_event(request, stream_id) - if isinstance(event, h2.events.DataReceived): - amount = event.flow_controlled_length - self._h2_state.acknowledge_received_data(amount, stream_id) - self._write_outgoing_data(request) - yield event.data - elif isinstance(event, h2.events.StreamEnded): - break - - def _receive_stream_event( - self, request: Request, stream_id: int - ) -> typing.Union[ - h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded - ]: - """ - Return the next available event for a given stream ID. - - Will read more data from the network if required. - """ - while not self._events.get(stream_id): - self._receive_events(request, stream_id) - event = self._events[stream_id].pop(0) - if isinstance(event, h2.events.StreamReset): - raise RemoteProtocolError(event) - return event - - def _receive_events( - self, request: Request, stream_id: typing.Optional[int] = None - ) -> None: - """ - Read some data from the network until we see one or more events - for a given stream ID. - """ - with self._read_lock: - if self._connection_terminated is not None: - last_stream_id = self._connection_terminated.last_stream_id - if stream_id and last_stream_id and stream_id > last_stream_id: - self._request_count -= 1 - raise ConnectionNotAvailable() - raise RemoteProtocolError(self._connection_terminated) - - # This conditional is a bit icky. We don't want to block reading if we've - # actually got an event to return for a given stream. We need to do that - # check *within* the atomic read lock. Though it also need to be optional, - # because when we call it from `_wait_for_outgoing_flow` we *do* want to - # block until we've available flow control, event when we have events - # pending for the stream ID we're attempting to send on. - if stream_id is None or not self._events.get(stream_id): - events = self._read_incoming_data(request) - for event in events: - if isinstance(event, h2.events.RemoteSettingsChanged): - with Trace( - "receive_remote_settings", logger, request - ) as trace: - self._receive_remote_settings_change(event) - trace.return_value = event - - elif isinstance( - event, - ( - h2.events.ResponseReceived, - h2.events.DataReceived, - h2.events.StreamEnded, - h2.events.StreamReset, - ), - ): - if event.stream_id in self._events: - self._events[event.stream_id].append(event) - - elif isinstance(event, h2.events.ConnectionTerminated): - self._connection_terminated = event - - self._write_outgoing_data(request) - - def _receive_remote_settings_change(self, event: h2.events.Event) -> None: - max_concurrent_streams = event.changed_settings.get( - h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS - ) - if max_concurrent_streams: - new_max_streams = min( - max_concurrent_streams.new_value, - self._h2_state.local_settings.max_concurrent_streams, - ) - if new_max_streams and new_max_streams != self._max_streams: - while new_max_streams > self._max_streams: - self._max_streams_semaphore.release() - self._max_streams += 1 - while new_max_streams < self._max_streams: - self._max_streams_semaphore.acquire() - self._max_streams -= 1 - - def _response_closed(self, stream_id: int) -> None: - self._max_streams_semaphore.release() - del self._events[stream_id] - with self._state_lock: - if self._connection_terminated and not self._events: - self.close() - - elif self._state == HTTPConnectionState.ACTIVE and not self._events: - self._state = HTTPConnectionState.IDLE - if self._keepalive_expiry is not None: - now = time.monotonic() - self._expire_at = now + self._keepalive_expiry - if self._used_all_stream_ids: # pragma: nocover - self.close() - - def close(self) -> None: - # Note that this method unilaterally closes the connection, and does - # not have any kind of locking in place around it. - self._h2_state.close_connection() - self._state = HTTPConnectionState.CLOSED - self._network_stream.close() - - # Wrappers around network read/write operations... - - def _read_incoming_data( - self, request: Request - ) -> typing.List[h2.events.Event]: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("read", None) - - if self._read_exception is not None: - raise self._read_exception # pragma: nocover - - try: - data = self._network_stream.read(self.READ_NUM_BYTES, timeout) - if data == b"": - raise RemoteProtocolError("Server disconnected") - except Exception as exc: - # If we get a network error we should: - # - # 1. Save the exception and just raise it immediately on any future reads. - # (For example, this means that a single read timeout or disconnect will - # immediately close all pending streams. Without requiring multiple - # sequential timeouts.) - # 2. Mark the connection as errored, so that we don't accept any other - # incoming requests. - self._read_exception = exc - self._connection_error = True - raise exc - - events: typing.List[h2.events.Event] = self._h2_state.receive_data(data) - - return events - - def _write_outgoing_data(self, request: Request) -> None: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("write", None) - - with self._write_lock: - data_to_send = self._h2_state.data_to_send() - - if self._write_exception is not None: - raise self._write_exception # pragma: nocover - - try: - self._network_stream.write(data_to_send, timeout) - except Exception as exc: # pragma: nocover - # If we get a network error we should: - # - # 1. Save the exception and just raise it immediately on any future write. - # (For example, this means that a single write timeout or disconnect will - # immediately close all pending streams. Without requiring multiple - # sequential timeouts.) - # 2. Mark the connection as errored, so that we don't accept any other - # incoming requests. - self._write_exception = exc - self._connection_error = True - raise exc - - # Flow control... - - def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: - """ - Returns the maximum allowable outgoing flow for a given stream. - - If the allowable flow is zero, then waits on the network until - WindowUpdated frames have increased the flow rate. - https://tools.ietf.org/html/rfc7540#section-6.9 - """ - local_flow: int = self._h2_state.local_flow_control_window(stream_id) - max_frame_size: int = self._h2_state.max_outbound_frame_size - flow = min(local_flow, max_frame_size) - while flow == 0: - self._receive_events(request) - local_flow = self._h2_state.local_flow_control_window(stream_id) - max_frame_size = self._h2_state.max_outbound_frame_size - flow = min(local_flow, max_frame_size) - return flow - - # Interface for connection pooling... - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._origin - - def is_available(self) -> bool: - return ( - self._state != HTTPConnectionState.CLOSED - and not self._connection_error - and not self._used_all_stream_ids - and not ( - self._h2_state.state_machine.state - == h2.connection.ConnectionState.CLOSED - ) - ) - - def has_expired(self) -> bool: - now = time.monotonic() - return self._expire_at is not None and now > self._expire_at - - def is_idle(self) -> bool: - return self._state == HTTPConnectionState.IDLE - - def is_closed(self) -> bool: - return self._state == HTTPConnectionState.CLOSED - - def info(self) -> str: - origin = str(self._origin) - return ( - f"{origin!r}, HTTP/2, {self._state.name}, " - f"Request Count: {self._request_count}" - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - origin = str(self._origin) - return ( - f"<{class_name} [{origin!r}, {self._state.name}, " - f"Request Count: {self._request_count}]>" - ) - - # These context managers are not used in the standard flow, but are - # useful for testing or working with connection instances directly. - - def __enter__(self) -> "HTTP2Connection": - return self - - def __exit__( - self, - exc_type: typing.Optional[typing.Type[BaseException]] = None, - exc_value: typing.Optional[BaseException] = None, - traceback: typing.Optional[types.TracebackType] = None, - ) -> None: - self.close() - - -class HTTP2ConnectionByteStream: - def __init__( - self, connection: HTTP2Connection, request: Request, stream_id: int - ) -> None: - self._connection = connection - self._request = request - self._stream_id = stream_id - self._closed = False - - def __iter__(self) -> typing.Iterator[bytes]: - kwargs = {"request": self._request, "stream_id": self._stream_id} - try: - with Trace("receive_response_body", logger, self._request, kwargs): - for chunk in self._connection._receive_response_body( - request=self._request, stream_id=self._stream_id - ): - yield chunk - except BaseException as exc: - # If we get an exception while streaming the response, - # we want to close the response (and possibly the connection) - # before raising that exception. - with ShieldCancellation(): - self.close() - raise exc - - def close(self) -> None: - if not self._closed: - self._closed = True - kwargs = {"stream_id": self._stream_id} - with Trace("response_closed", logger, self._request, kwargs): - self._connection._response_closed(stream_id=self._stream_id) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/__init__.py deleted file mode 100644 index 3e687f85b0bec57e161beaf9a150b1dd9c5d5a5a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# NOTE: plt.switch_backend() (called at import time) will add a "backend" -# attribute here for backcompat. -_QT_FORCE_QT5_BINDING = False diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pipeline_utils.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pipeline_utils.py deleted file mode 100644 index a03c454e9244e28e98bdcdcc8cdeb340da3f7903..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pipeline_utils.py +++ /dev/null @@ -1,1396 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fnmatch -import importlib -import inspect -import os -import re -import warnings -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL -import torch -from huggingface_hub import hf_hub_download, model_info, snapshot_download -from packaging import version -from PIL import Image -from tqdm.auto import tqdm - -import diffusers - -from .. import __version__ -from ..configuration_utils import ConfigMixin -from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT -from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME -from ..utils import ( - CONFIG_NAME, - DEPRECATED_REVISION_ARGS, - DIFFUSERS_CACHE, - HF_HUB_OFFLINE, - SAFETENSORS_WEIGHTS_NAME, - WEIGHTS_NAME, - BaseOutput, - deprecate, - get_class_from_dynamic_module, - is_accelerate_available, - is_accelerate_version, - is_compiled_module, - is_safetensors_available, - is_torch_version, - is_transformers_available, - logging, -) - - -if is_transformers_available(): - import transformers - from transformers import PreTrainedModel - from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME - from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME - from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME - -from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME - - -if is_accelerate_available(): - import accelerate - - -INDEX_FILE = "diffusion_pytorch_model.bin" -CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" -DUMMY_MODULES_FOLDER = "diffusers.utils" -TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" - - -logger = logging.get_logger(__name__) - - -LOADABLE_CLASSES = { - "diffusers": { - "ModelMixin": ["save_pretrained", "from_pretrained"], - "SchedulerMixin": ["save_pretrained", "from_pretrained"], - "DiffusionPipeline": ["save_pretrained", "from_pretrained"], - "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], - }, - "transformers": { - "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], - "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], - "PreTrainedModel": ["save_pretrained", "from_pretrained"], - "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], - "ProcessorMixin": ["save_pretrained", "from_pretrained"], - "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], - }, - "onnxruntime.training": { - "ORTModule": ["save_pretrained", "from_pretrained"], - }, -} - -ALL_IMPORTABLE_CLASSES = {} -for library in LOADABLE_CLASSES: - ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) - - -@dataclass -class ImagePipelineOutput(BaseOutput): - """ - Output class for image pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - - -@dataclass -class AudioPipelineOutput(BaseOutput): - """ - Output class for audio pipelines. - - Args: - audios (`np.ndarray`) - List of denoised samples of shape `(batch_size, num_channels, sample_rate)`. Numpy array present the - denoised audio samples of the diffusion pipeline. - """ - - audios: np.ndarray - - -def is_safetensors_compatible(filenames, variant=None) -> bool: - """ - Checking for safetensors compatibility: - - By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch - files to know which safetensors files are needed. - - The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file. - - Converting default pytorch serialized filenames to safetensors serialized filenames: - - For models from the diffusers library, just replace the ".bin" extension with ".safetensors" - - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin" - extension is replaced with ".safetensors" - """ - pt_filenames = [] - - sf_filenames = set() - - for filename in filenames: - _, extension = os.path.splitext(filename) - - if extension == ".bin": - pt_filenames.append(filename) - elif extension == ".safetensors": - sf_filenames.add(filename) - - for filename in pt_filenames: - # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam' - path, filename = os.path.split(filename) - filename, extension = os.path.splitext(filename) - - if filename == "pytorch_model": - filename = "model" - elif filename == f"pytorch_model.{variant}": - filename = f"model.{variant}" - else: - filename = filename - - expected_sf_filename = os.path.join(path, filename) - expected_sf_filename = f"{expected_sf_filename}.safetensors" - - if expected_sf_filename not in sf_filenames: - logger.warning(f"{expected_sf_filename} not found") - return False - - return True - - -def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: - weight_names = [ - WEIGHTS_NAME, - SAFETENSORS_WEIGHTS_NAME, - FLAX_WEIGHTS_NAME, - ONNX_WEIGHTS_NAME, - ONNX_EXTERNAL_WEIGHTS_NAME, - ] - - if is_transformers_available(): - weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] - - # model_pytorch, diffusion_model_pytorch, ... - weight_prefixes = [w.split(".")[0] for w in weight_names] - # .bin, .safetensors, ... - weight_suffixs = [w.split(".")[-1] for w in weight_names] - - variant_file_regex = ( - re.compile(f"({'|'.join(weight_prefixes)})(.{variant}.)({'|'.join(weight_suffixs)})") - if variant is not None - else None - ) - non_variant_file_regex = re.compile(f"{'|'.join(weight_names)}") - - if variant is not None: - variant_filenames = {f for f in filenames if variant_file_regex.match(f.split("/")[-1]) is not None} - else: - variant_filenames = set() - - non_variant_filenames = {f for f in filenames if non_variant_file_regex.match(f.split("/")[-1]) is not None} - - usable_filenames = set(variant_filenames) - for f in non_variant_filenames: - variant_filename = f"{f.split('.')[0]}.{variant}.{f.split('.')[1]}" - if variant_filename not in usable_filenames: - usable_filenames.add(f) - - return usable_filenames, variant_filenames - - -def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, variant, revision, model_filenames): - info = model_info( - pretrained_model_name_or_path, - use_auth_token=use_auth_token, - revision=None, - ) - filenames = {sibling.rfilename for sibling in info.siblings} - comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) - comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] - - if set(comp_model_filenames) == set(model_filenames): - warnings.warn( - f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", - FutureWarning, - ) - else: - warnings.warn( - f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", - FutureWarning, - ) - - -def maybe_raise_or_warn( - library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module -): - """Simple helper method to raise or warn in case incorrect module has been passed""" - if not is_pipeline_module: - library = importlib.import_module(library_name) - class_obj = getattr(library, class_name) - class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} - - expected_class_obj = None - for class_name, class_candidate in class_candidates.items(): - if class_candidate is not None and issubclass(class_obj, class_candidate): - expected_class_obj = class_candidate - - # Dynamo wraps the original model in a private class. - # I didn't find a public API to get the original class. - sub_model = passed_class_obj[name] - model_cls = sub_model.__class__ - if is_compiled_module(sub_model): - model_cls = sub_model._orig_mod.__class__ - - if not issubclass(model_cls, expected_class_obj): - raise ValueError( - f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be" - f" {expected_class_obj}" - ) - else: - logger.warning( - f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" - " has the correct type" - ) - - -def get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module): - """Simple helper method to retrieve class object of module as well as potential parent class objects""" - if is_pipeline_module: - pipeline_module = getattr(pipelines, library_name) - - class_obj = getattr(pipeline_module, class_name) - class_candidates = {c: class_obj for c in importable_classes.keys()} - else: - # else we just import it from the library. - library = importlib.import_module(library_name) - - class_obj = getattr(library, class_name) - class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} - - return class_obj, class_candidates - - -def load_sub_model( - library_name: str, - class_name: str, - importable_classes: List[Any], - pipelines: Any, - is_pipeline_module: bool, - pipeline_class: Any, - torch_dtype: torch.dtype, - provider: Any, - sess_options: Any, - device_map: Optional[Union[Dict[str, torch.device], str]], - model_variants: Dict[str, str], - name: str, - from_flax: bool, - variant: str, - low_cpu_mem_usage: bool, - cached_folder: Union[str, os.PathLike], -): - """Helper method to load the module `name` from `library_name` and `class_name`""" - # retrieve class candidates - class_obj, class_candidates = get_class_obj_and_candidates( - library_name, class_name, importable_classes, pipelines, is_pipeline_module - ) - - load_method_name = None - # retrive load method name - for class_name, class_candidate in class_candidates.items(): - if class_candidate is not None and issubclass(class_obj, class_candidate): - load_method_name = importable_classes[class_name][1] - - # if load method name is None, then we have a dummy module -> raise Error - if load_method_name is None: - none_module = class_obj.__module__ - is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( - TRANSFORMERS_DUMMY_MODULES_FOLDER - ) - if is_dummy_path and "dummy" in none_module: - # call class_obj for nice error message of missing requirements - class_obj() - - raise ValueError( - f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" - f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." - ) - - load_method = getattr(class_obj, load_method_name) - - # add kwargs to loading method - loading_kwargs = {} - if issubclass(class_obj, torch.nn.Module): - loading_kwargs["torch_dtype"] = torch_dtype - if issubclass(class_obj, diffusers.OnnxRuntimeModel): - loading_kwargs["provider"] = provider - loading_kwargs["sess_options"] = sess_options - - is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin) - - if is_transformers_available(): - transformers_version = version.parse(version.parse(transformers.__version__).base_version) - else: - transformers_version = "N/A" - - is_transformers_model = ( - is_transformers_available() - and issubclass(class_obj, PreTrainedModel) - and transformers_version >= version.parse("4.20.0") - ) - - # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers. - # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default. - # This makes sure that the weights won't be initialized which significantly speeds up loading. - if is_diffusers_model or is_transformers_model: - loading_kwargs["device_map"] = device_map - loading_kwargs["variant"] = model_variants.pop(name, None) - if from_flax: - loading_kwargs["from_flax"] = True - - # the following can be deleted once the minimum required `transformers` version - # is higher than 4.27 - if ( - is_transformers_model - and loading_kwargs["variant"] is not None - and transformers_version < version.parse("4.27.0") - ): - raise ImportError( - f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0" - ) - elif is_transformers_model and loading_kwargs["variant"] is None: - loading_kwargs.pop("variant") - - # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage` - if not (from_flax and is_transformers_model): - loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage - else: - loading_kwargs["low_cpu_mem_usage"] = False - - # check if the module is in a subdirectory - if os.path.isdir(os.path.join(cached_folder, name)): - loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) - else: - # else load from the root directory - loaded_sub_model = load_method(cached_folder, **loading_kwargs) - - return loaded_sub_model - - -class DiffusionPipeline(ConfigMixin): - r""" - Base class for all models. - - [`DiffusionPipeline`] takes care of storing all components (models, schedulers, processors) for diffusion pipelines - and handles methods for loading, downloading and saving models as well as a few methods common to all pipelines to: - - - move all PyTorch modules to the device of your choice - - enabling/disabling the progress bar for the denoising iteration - - Class attributes: - - - **config_name** (`str`) -- name of the config file that will store the class and module names of all - components of the diffusion pipeline. - - **_optional_components** (List[`str`]) -- list of all components that are optional so they don't have to be - passed for the pipeline to function (should be overridden by subclasses). - """ - config_name = "model_index.json" - _optional_components = [] - - def register_modules(self, **kwargs): - # import it here to avoid circular import - from diffusers import pipelines - - for name, module in kwargs.items(): - # retrieve library - if module is None: - register_dict = {name: (None, None)} - else: - # register the original module, not the dynamo compiled one - if is_compiled_module(module): - module = module._orig_mod - - library = module.__module__.split(".")[0] - - # check if the module is a pipeline module - pipeline_dir = module.__module__.split(".")[-2] if len(module.__module__.split(".")) > 2 else None - path = module.__module__.split(".") - is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) - - # if library is not in LOADABLE_CLASSES, then it is a custom module. - # Or if it's a pipeline module, then the module is inside the pipeline - # folder so we set the library to module name. - if library not in LOADABLE_CLASSES or is_pipeline_module: - library = pipeline_dir - - # retrieve class_name - class_name = module.__class__.__name__ - - register_dict = {name: (library, class_name)} - - # save model index config - self.register_to_config(**register_dict) - - # set models - setattr(self, name, module) - - def save_pretrained( - self, - save_directory: Union[str, os.PathLike], - safe_serialization: bool = False, - variant: Optional[str] = None, - ): - """ - Save all variables of the pipeline that can be saved and loaded as well as the pipelines configuration file to - a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading - method. The pipeline can easily be re-loaded using the `[`~DiffusionPipeline.from_pretrained`]` class method. - - Arguments: - save_directory (`str` or `os.PathLike`): - Directory to which to save. Will be created if it doesn't exist. - safe_serialization (`bool`, *optional*, defaults to `False`): - Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). - variant (`str`, *optional*): - If specified, weights are saved in the format pytorch_model.<variant>.bin. - """ - self.save_config(save_directory) - - model_index_dict = dict(self.config) - model_index_dict.pop("_class_name") - model_index_dict.pop("_diffusers_version") - model_index_dict.pop("_module", None) - - expected_modules, optional_kwargs = self._get_signature_keys(self) - - def is_saveable_module(name, value): - if name not in expected_modules: - return False - if name in self._optional_components and value[0] is None: - return False - return True - - model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)} - - for pipeline_component_name in model_index_dict.keys(): - sub_model = getattr(self, pipeline_component_name) - model_cls = sub_model.__class__ - - # Dynamo wraps the original model in a private class. - # I didn't find a public API to get the original class. - if is_compiled_module(sub_model): - sub_model = sub_model._orig_mod - model_cls = sub_model.__class__ - - save_method_name = None - # search for the model's base class in LOADABLE_CLASSES - for library_name, library_classes in LOADABLE_CLASSES.items(): - library = importlib.import_module(library_name) - for base_class, save_load_methods in library_classes.items(): - class_candidate = getattr(library, base_class, None) - if class_candidate is not None and issubclass(model_cls, class_candidate): - # if we found a suitable base class in LOADABLE_CLASSES then grab its save method - save_method_name = save_load_methods[0] - break - if save_method_name is not None: - break - - save_method = getattr(sub_model, save_method_name) - - # Call the save method with the argument safe_serialization only if it's supported - save_method_signature = inspect.signature(save_method) - save_method_accept_safe = "safe_serialization" in save_method_signature.parameters - save_method_accept_variant = "variant" in save_method_signature.parameters - - save_kwargs = {} - if save_method_accept_safe: - save_kwargs["safe_serialization"] = safe_serialization - if save_method_accept_variant: - save_kwargs["variant"] = variant - - save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs) - - def to( - self, - torch_device: Optional[Union[str, torch.device]] = None, - torch_dtype: Optional[torch.dtype] = None, - silence_dtype_warnings: bool = False, - ): - if torch_device is None and torch_dtype is None: - return self - - # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU. - def module_is_sequentially_offloaded(module): - if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"): - return False - - return hasattr(module, "_hf_hook") and not isinstance(module._hf_hook, accelerate.hooks.CpuOffload) - - def module_is_offloaded(module): - if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"): - return False - - return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload) - - # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer - pipeline_is_sequentially_offloaded = any( - module_is_sequentially_offloaded(module) for _, module in self.components.items() - ) - if pipeline_is_sequentially_offloaded and torch.device(torch_device).type == "cuda": - raise ValueError( - "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading." - ) - - # Display a warning in this case (the operation succeeds but the benefits are lost) - pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items()) - if pipeline_is_offloaded and torch.device(torch_device).type == "cuda": - logger.warning( - f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading." - ) - - module_names, _, _ = self.extract_init_dict(dict(self.config)) - is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded - for name in module_names.keys(): - module = getattr(self, name) - if isinstance(module, torch.nn.Module): - module.to(torch_device, torch_dtype) - if ( - module.dtype == torch.float16 - and str(torch_device) in ["cpu"] - and not silence_dtype_warnings - and not is_offloaded - ): - logger.warning( - "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It" - " is not recommended to move them to `cpu` as running them will fail. Please make" - " sure to use an accelerator to run the pipeline in inference, due to the lack of" - " support for`float16` operations on this device in PyTorch. Please, remove the" - " `torch_dtype=torch.float16` argument, or use another device for inference." - ) - return self - - @property - def device(self) -> torch.device: - r""" - Returns: - `torch.device`: The torch device on which the pipeline is located. - """ - module_names, _, _ = self.extract_init_dict(dict(self.config)) - for name in module_names.keys(): - module = getattr(self, name) - if isinstance(module, torch.nn.Module): - return module.device - return torch.device("cpu") - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): - r""" - Instantiate a PyTorch diffusion pipeline from pre-trained pipeline weights. - - The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). - - The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come - pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning - task. - - The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those - weights are discarded. - - Parameters: - pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): - Can be either: - - - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on - https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like - `CompVis/ldm-text2im-large-256`. - - A path to a *directory* containing pipeline weights saved using - [`~DiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. - torch_dtype (`str` or `torch.dtype`, *optional*): - Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype - will be automatically derived from the model's weights. - custom_pipeline (`str`, *optional*): - - <Tip warning={true}> - - This is an experimental feature and is likely to change in the future. - - </Tip> - - Can be either: - - - A string, the *repo id* of a custom pipeline hosted inside a model repo on - https://huggingface.co/. Valid repo ids have to be located under a user or organization name, - like `hf-internal-testing/diffusers-dummy-pipeline`. - - <Tip> - - It is required that the model repo has a file, called `pipeline.py` that defines the custom - pipeline. - - </Tip> - - - A string, the *file name* of a community pipeline hosted on GitHub under - https://github.com/huggingface/diffusers/tree/main/examples/community. Valid file names have to - match exactly the file name without `.py` located under the above link, *e.g.* - `clip_guided_stable_diffusion`. - - <Tip> - - Community pipelines are always loaded from the current `main` branch of GitHub. - - </Tip> - - - A path to a *directory* containing a custom pipeline, e.g., `./my_pipeline_directory/`. - - <Tip> - - It is required that the directory has a file, called `pipeline.py` that defines the custom - pipeline. - - </Tip> - - For more information on how to load and create custom pipelines, please have a look at [Loading and - Adding Custom - Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) - - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force the (re-)download of the model weights and configuration files, overriding the - cached versions if they exist. - cache_dir (`Union[str, os.PathLike]`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the - standard cache should not be used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - output_loading_info(`bool`, *optional*, defaults to `False`): - Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. - local_files_only(`bool`, *optional*, defaults to `False`): - Whether or not to only look at local files (i.e., do not try to download the model). - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `huggingface-cli login` (stored in `~/.huggingface`). - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - custom_revision (`str`, *optional*, defaults to `"main"` when loading from the Hub and to local version of `diffusers` when loading from GitHub): - The specific model version to use. It can be a branch name, a tag name, or a commit id similar to - `revision` when loading a custom pipeline from the Hub. It can be a diffusers version when loading a - custom pipeline from GitHub. - mirror (`str`, *optional*): - Mirror source to accelerate downloads in China. If you are from China and have an accessibility - problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. - Please refer to the mirror site for more information. specify the folder name here. - device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): - A map that specifies where each submodule should go. It doesn't need to be refined to each - parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the - same device. - - To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For - more information about each option see [designing a device - map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). - low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): - Speed up model loading by not initializing the weights and only loading the pre-trained weights. This - also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the - model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, - setting this argument to `True` will raise an error. - use_safetensors (`bool`, *optional* ): - If set to `True`, the pipeline will be loaded from `safetensors` weights. If set to `None` (the - default). The pipeline will load using `safetensors` if the safetensors weights are available *and* if - `safetensors` is installed. If the to `False` the pipeline will *not* use `safetensors`. - kwargs (remaining dictionary of keyword arguments, *optional*): - Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the - specific pipeline class. The overwritten components are then directly passed to the pipelines - `__init__` method. See example below for more information. - variant (`str`, *optional*): - If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is - ignored when using `from_flax`. - - <Tip> - - It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated - models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` - - </Tip> - - <Tip> - - Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use - this method in a firewalled environment. - - </Tip> - - Examples: - - ```py - >>> from diffusers import DiffusionPipeline - - >>> # Download pipeline from huggingface.co and cache. - >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") - - >>> # Download pipeline that requires an authorization token - >>> # For more information on access tokens, please refer to this section - >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) - >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") - - >>> # Use a different scheduler - >>> from diffusers import LMSDiscreteScheduler - - >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) - >>> pipeline.scheduler = scheduler - ``` - """ - cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) - resume_download = kwargs.pop("resume_download", False) - force_download = kwargs.pop("force_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE) - use_auth_token = kwargs.pop("use_auth_token", None) - revision = kwargs.pop("revision", None) - from_flax = kwargs.pop("from_flax", False) - torch_dtype = kwargs.pop("torch_dtype", None) - custom_pipeline = kwargs.pop("custom_pipeline", None) - custom_revision = kwargs.pop("custom_revision", None) - provider = kwargs.pop("provider", None) - sess_options = kwargs.pop("sess_options", None) - device_map = kwargs.pop("device_map", None) - low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) - variant = kwargs.pop("variant", None) - kwargs.pop("use_safetensors", None if is_safetensors_available() else False) - - # 1. Download the checkpoints and configs - # use snapshot download here to get it working from from_pretrained - if not os.path.isdir(pretrained_model_name_or_path): - cached_folder = cls.download( - pretrained_model_name_or_path, - cache_dir=cache_dir, - resume_download=resume_download, - force_download=force_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - from_flax=from_flax, - custom_pipeline=custom_pipeline, - variant=variant, - ) - else: - cached_folder = pretrained_model_name_or_path - - config_dict = cls.load_config(cached_folder) - - # 2. Define which model components should load variants - # We retrieve the information by matching whether variant - # model checkpoints exist in the subfolders - model_variants = {} - if variant is not None: - for folder in os.listdir(cached_folder): - folder_path = os.path.join(cached_folder, folder) - is_folder = os.path.isdir(folder_path) and folder in config_dict - variant_exists = is_folder and any(path.split(".")[1] == variant for path in os.listdir(folder_path)) - if variant_exists: - model_variants[folder] = variant - - # 3. Load the pipeline class, if using custom module then load it from the hub - # if we load from explicit class, let's use it - if custom_pipeline is not None: - if custom_pipeline.endswith(".py"): - path = Path(custom_pipeline) - # decompose into folder & file - file_name = path.name - custom_pipeline = path.parent.absolute() - else: - file_name = CUSTOM_PIPELINE_FILE_NAME - - pipeline_class = get_class_from_dynamic_module( - custom_pipeline, module_file=file_name, cache_dir=cache_dir, revision=custom_revision - ) - elif cls != DiffusionPipeline: - pipeline_class = cls - else: - diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) - pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) - - # DEPRECATED: To be removed in 1.0.0 - if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( - version.parse(config_dict["_diffusers_version"]).base_version - ) <= version.parse("0.5.1"): - from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy - - pipeline_class = StableDiffusionInpaintPipelineLegacy - - deprecation_message = ( - "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" - f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" - " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" - " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" - f" checkpoint {pretrained_model_name_or_path} to the format of" - " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" - " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." - ) - deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) - - # 4. Define expected modules given pipeline signature - # and define non-None initialized modules (=`init_kwargs`) - - # some modules can be passed directly to the init - # in this case they are already instantiated in `kwargs` - # extract them here - expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) - passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} - passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} - - init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) - - # define init kwargs - init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} - init_kwargs = {**init_kwargs, **passed_pipe_kwargs} - - # remove `null` components - def load_module(name, value): - if value[0] is None: - return False - if name in passed_class_obj and passed_class_obj[name] is None: - return False - return True - - init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} - - # Special case: safety_checker must be loaded separately when using `from_flax` - if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj: - raise NotImplementedError( - "The safety checker cannot be automatically loaded when loading weights `from_flax`." - " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker" - " separately if you need it." - ) - - # 5. Throw nice warnings / errors for fast accelerate loading - if len(unused_kwargs) > 0: - logger.warning( - f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." - ) - - if low_cpu_mem_usage and not is_accelerate_available(): - low_cpu_mem_usage = False - logger.warning( - "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" - " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" - " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" - " install accelerate\n```\n." - ) - - if device_map is not None and not is_torch_version(">=", "1.9.0"): - raise NotImplementedError( - "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" - " `device_map=None`." - ) - - if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): - raise NotImplementedError( - "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" - " `low_cpu_mem_usage=False`." - ) - - if low_cpu_mem_usage is False and device_map is not None: - raise ValueError( - f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and" - " dispatching. Please make sure to set `low_cpu_mem_usage=True`." - ) - - # import it here to avoid circular import - from diffusers import pipelines - - # 6. Load each module in the pipeline - for name, (library_name, class_name) in init_dict.items(): - # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names - if class_name.startswith("Flax"): - class_name = class_name[4:] - - # 6.2 Define all importable classes - is_pipeline_module = hasattr(pipelines, library_name) - importable_classes = ALL_IMPORTABLE_CLASSES if is_pipeline_module else LOADABLE_CLASSES[library_name] - loaded_sub_model = None - - # 6.3 Use passed sub model or load class_name from library_name - if name in passed_class_obj: - # if the model is in a pipeline module, then we load it from the pipeline - # check that passed_class_obj has correct parent class - maybe_raise_or_warn( - library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module - ) - - loaded_sub_model = passed_class_obj[name] - else: - # load sub model - loaded_sub_model = load_sub_model( - library_name=library_name, - class_name=class_name, - importable_classes=importable_classes, - pipelines=pipelines, - is_pipeline_module=is_pipeline_module, - pipeline_class=pipeline_class, - torch_dtype=torch_dtype, - provider=provider, - sess_options=sess_options, - device_map=device_map, - model_variants=model_variants, - name=name, - from_flax=from_flax, - variant=variant, - low_cpu_mem_usage=low_cpu_mem_usage, - cached_folder=cached_folder, - ) - - init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) - - # 7. Potentially add passed objects if expected - missing_modules = set(expected_modules) - set(init_kwargs.keys()) - passed_modules = list(passed_class_obj.keys()) - optional_modules = pipeline_class._optional_components - if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): - for module in missing_modules: - init_kwargs[module] = passed_class_obj.get(module, None) - elif len(missing_modules) > 0: - passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs - raise ValueError( - f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." - ) - - # 8. Instantiate the pipeline - model = pipeline_class(**init_kwargs) - - return_cached_folder = kwargs.pop("return_cached_folder", False) - if return_cached_folder: - message = f"Passing `return_cached_folder=True` is deprecated and will be removed in `diffusers=0.17.0`. Please do the following instead: \n 1. Load the cached_folder via `cached_folder={cls}.download({pretrained_model_name_or_path})`. \n 2. Load the pipeline by loading from the cached folder: `pipeline={cls}.from_pretrained(cached_folder)`." - deprecate("return_cached_folder", "0.17.0", message, take_from=kwargs) - return model, cached_folder - - return model - - @classmethod - def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: - r""" - Download and cache a PyTorch diffusion pipeline from pre-trained pipeline weights. - - Parameters: - pretrained_model_name (`str` or `os.PathLike`, *optional*): - Should be a string, the *repo id* of a pretrained pipeline hosted inside a model repo on - https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like - `CompVis/ldm-text2im-large-256`. - custom_pipeline (`str`, *optional*): - - <Tip warning={true}> - - This is an experimental feature and is likely to change in the future. - - </Tip> - - Can be either: - - - A string, the *repo id* of a custom pipeline hosted inside a model repo on - https://huggingface.co/. Valid repo ids have to be located under a user or organization name, - like `hf-internal-testing/diffusers-dummy-pipeline`. - - <Tip> - - It is required that the model repo has a file, called `pipeline.py` that defines the custom - pipeline. - - </Tip> - - - A string, the *file name* of a community pipeline hosted on GitHub under - https://github.com/huggingface/diffusers/tree/main/examples/community. Valid file names have to - match exactly the file name without `.py` located under the above link, *e.g.* - `clip_guided_stable_diffusion`. - - <Tip> - - Community pipelines are always loaded from the current `main` branch of GitHub. - - </Tip> - - - A path to a *directory* containing a custom pipeline, e.g., `./my_pipeline_directory/`. - - <Tip> - - It is required that the directory has a file, called `pipeline.py` that defines the custom - pipeline. - - </Tip> - - For more information on how to load and create custom pipelines, please have a look at [Loading and - Adding Custom - Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) - - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force the (re-)download of the model weights and configuration files, overriding the - cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - output_loading_info(`bool`, *optional*, defaults to `False`): - Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. - local_files_only(`bool`, *optional*, defaults to `False`): - Whether or not to only look at local files (i.e., do not try to download the model). - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `huggingface-cli login` (stored in `~/.huggingface`). - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - custom_revision (`str`, *optional*, defaults to `"main"` when loading from the Hub and to local version of - `diffusers` when loading from GitHub): - The specific model version to use. It can be a branch name, a tag name, or a commit id similar to - `revision` when loading a custom pipeline from the Hub. It can be a diffusers version when loading a - custom pipeline from GitHub. - mirror (`str`, *optional*): - Mirror source to accelerate downloads in China. If you are from China and have an accessibility - problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. - Please refer to the mirror site for more information. specify the folder name here. - variant (`str`, *optional*): - If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is - ignored when using `from_flax`. - - <Tip> - - It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated - models](https://huggingface.co/docs/hub/models-gated#gated-models) - - </Tip> - - """ - cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) - resume_download = kwargs.pop("resume_download", False) - force_download = kwargs.pop("force_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE) - use_auth_token = kwargs.pop("use_auth_token", None) - revision = kwargs.pop("revision", None) - from_flax = kwargs.pop("from_flax", False) - custom_pipeline = kwargs.pop("custom_pipeline", None) - variant = kwargs.pop("variant", None) - use_safetensors = kwargs.pop("use_safetensors", None) - - if use_safetensors and not is_safetensors_available(): - raise ValueError( - "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetenstors" - ) - - allow_pickle = False - if use_safetensors is None: - use_safetensors = is_safetensors_available() - allow_pickle = True - - pipeline_is_cached = False - allow_patterns = None - ignore_patterns = None - - if not local_files_only: - config_file = hf_hub_download( - pretrained_model_name, - cls.config_name, - cache_dir=cache_dir, - revision=revision, - proxies=proxies, - force_download=force_download, - resume_download=resume_download, - use_auth_token=use_auth_token, - ) - info = model_info( - pretrained_model_name, - use_auth_token=use_auth_token, - revision=revision, - ) - - config_dict = cls._dict_from_json_file(config_file) - # retrieve all folder_names that contain relevant files - folder_names = [k for k, v in config_dict.items() if isinstance(v, list)] - - filenames = {sibling.rfilename for sibling in info.siblings} - model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant) - - # if the whole pipeline is cached we don't have to ping the Hub - if revision in DEPRECATED_REVISION_ARGS and version.parse( - version.parse(__version__).base_version - ) >= version.parse("0.17.0"): - warn_deprecated_model_variant( - pretrained_model_name, use_auth_token, variant, revision, model_filenames - ) - - model_folder_names = {os.path.split(f)[0] for f in model_filenames} - - # all filenames compatible with variant will be added - allow_patterns = list(model_filenames) - - # allow all patterns from non-model folders - # this enables downloading schedulers, tokenizers, ... - allow_patterns += [os.path.join(k, "*") for k in folder_names if k not in model_folder_names] - # also allow downloading config.json files with the model - allow_patterns += [os.path.join(k, "*.json") for k in model_folder_names] - - allow_patterns += [ - SCHEDULER_CONFIG_NAME, - CONFIG_NAME, - cls.config_name, - CUSTOM_PIPELINE_FILE_NAME, - ] - - if ( - use_safetensors - and not allow_pickle - and not is_safetensors_compatible(model_filenames, variant=variant) - ): - raise EnvironmentError( - f"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})" - ) - if from_flax: - ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"] - elif use_safetensors and is_safetensors_compatible(model_filenames, variant=variant): - ignore_patterns = ["*.bin", "*.msgpack"] - - safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")} - safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")} - if ( - len(safetensors_variant_filenames) > 0 - and safetensors_model_filenames != safetensors_variant_filenames - ): - logger.warn( - f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." - ) - else: - ignore_patterns = ["*.safetensors", "*.msgpack"] - - bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} - bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} - if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: - logger.warn( - f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." - ) - - re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns] - re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns] - - expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)] - expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] - - snapshot_folder = Path(config_file).parent - pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) - - if pipeline_is_cached: - # if the pipeline is cached, we can directly return it - # else call snapshot_download - return snapshot_folder - - user_agent = {"pipeline_class": cls.__name__} - if custom_pipeline is not None and not custom_pipeline.endswith(".py"): - user_agent["custom_pipeline"] = custom_pipeline - - # download all allow_patterns - ignore_patterns - cached_folder = snapshot_download( - pretrained_model_name, - cache_dir=cache_dir, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - allow_patterns=allow_patterns, - ignore_patterns=ignore_patterns, - user_agent=user_agent, - ) - - return cached_folder - - @staticmethod - def _get_signature_keys(obj): - parameters = inspect.signature(obj.__init__).parameters - required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} - optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) - expected_modules = set(required_parameters.keys()) - {"self"} - return expected_modules, optional_parameters - - @property - def components(self) -> Dict[str, Any]: - r""" - - The `self.components` property can be useful to run different pipelines with the same weights and - configurations to not have to re-allocate memory. - - Examples: - - ```py - >>> from diffusers import ( - ... StableDiffusionPipeline, - ... StableDiffusionImg2ImgPipeline, - ... StableDiffusionInpaintPipeline, - ... ) - - >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") - >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) - >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) - ``` - - Returns: - A dictionary containing all the modules needed to initialize the pipeline. - """ - expected_modules, optional_parameters = self._get_signature_keys(self) - components = { - k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters - } - - if set(components.keys()) != expected_modules: - raise ValueError( - f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" - f" {expected_modules} to be defined, but {components.keys()} are defined." - ) - - return components - - @staticmethod - def numpy_to_pil(images): - """ - Convert a numpy image or a batch of images to a PIL image. - """ - if images.ndim == 3: - images = images[None, ...] - images = (images * 255).round().astype("uint8") - if images.shape[-1] == 1: - # special case for grayscale (single channel) images - pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] - else: - pil_images = [Image.fromarray(image) for image in images] - - return pil_images - - def progress_bar(self, iterable=None, total=None): - if not hasattr(self, "_progress_bar_config"): - self._progress_bar_config = {} - elif not isinstance(self._progress_bar_config, dict): - raise ValueError( - f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." - ) - - if iterable is not None: - return tqdm(iterable, **self._progress_bar_config) - elif total is not None: - return tqdm(total=total, **self._progress_bar_config) - else: - raise ValueError("Either `total` or `iterable` has to be defined.") - - def set_progress_bar_config(self, **kwargs): - self._progress_bar_config = kwargs - - def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): - r""" - Enable memory efficient attention as implemented in xformers. - - When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference - time. Speed up at training time is not guaranteed. - - Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention - is used. - - Parameters: - attention_op (`Callable`, *optional*): - Override the default `None` operator for use as `op` argument to the - [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) - function of xFormers. - - Examples: - - ```py - >>> import torch - >>> from diffusers import DiffusionPipeline - >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp - - >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16) - >>> pipe = pipe.to("cuda") - >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) - >>> # Workaround for not accepting attention shape using VAE for Flash Attention - >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None) - ``` - """ - self.set_use_memory_efficient_attention_xformers(True, attention_op) - - def disable_xformers_memory_efficient_attention(self): - r""" - Disable memory efficient attention as implemented in xformers. - """ - self.set_use_memory_efficient_attention_xformers(False) - - def set_use_memory_efficient_attention_xformers( - self, valid: bool, attention_op: Optional[Callable] = None - ) -> None: - # Recursively walk through all the children. - # Any children which exposes the set_use_memory_efficient_attention_xformers method - # gets the message - def fn_recursive_set_mem_eff(module: torch.nn.Module): - if hasattr(module, "set_use_memory_efficient_attention_xformers"): - module.set_use_memory_efficient_attention_xformers(valid, attention_op) - - for child in module.children(): - fn_recursive_set_mem_eff(child) - - module_names, _, _ = self.extract_init_dict(dict(self.config)) - for module_name in module_names: - module = getattr(self, module_name) - if isinstance(module, torch.nn.Module): - fn_recursive_set_mem_eff(module) - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is - provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` - must be a multiple of `slice_size`. - """ - self.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - def set_attention_slice(self, slice_size: Optional[int]): - module_names, _, _ = self.extract_init_dict(dict(self.config)) - for module_name in module_names: - module = getattr(self, module_name) - if isinstance(module, torch.nn.Module) and hasattr(module, "set_attention_slice"): - module.set_attention_slice(slice_size) diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/configs/3millions_pfc.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/configs/3millions_pfc.py deleted file mode 100644 index 77caafdbb300d8109d5bfdb844f131710ef81f20..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/configs/3millions_pfc.py +++ /dev/null @@ -1,23 +0,0 @@ -from easydict import EasyDict as edict - -# configs for test speed - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.1 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "synthetic" -config.num_classes = 300 * 10000 -config.num_epoch = 30 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = [] diff --git a/spaces/delmaksym/Huggy/README.md b/spaces/delmaksym/Huggy/README.md deleted file mode 100644 index 8ed01098734fca978b02080c610af11075d900ed..0000000000000000000000000000000000000000 --- a/spaces/delmaksym/Huggy/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Huggy -emoji: 🐶 -colorFrom: red -colorTo: indigo -sdk: static -pinned: false -license: cc-by-nc-sa-4.0 -duplicated_from: ThomasSimonini/Huggy ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Crack Do Gry Mustang Z Zielonej Doliny.md b/spaces/diacanFperku/AutoGPT/Crack Do Gry Mustang Z Zielonej Doliny.md deleted file mode 100644 index 80a5950b9fd10e923442cc92112099b81d681a62..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Crack Do Gry Mustang Z Zielonej Doliny.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>crack do gry mustang z zielonej doliny</h2><br /><p><b><b>Download</b> &#128505; <a href="https://gohhs.com/2uFVOV">https://gohhs.com/2uFVOV</a></b></p><br /><br /> -<br /> -Our heavy industry brochure: Standard ropes often do not meet the high requirements of … Problems ... crack do gry mustang z zielonej doliny 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/Ex4 To Mq4 4 0 432.md b/spaces/diacanFperku/AutoGPT/Ex4 To Mq4 4 0 432.md deleted file mode 100644 index 38809f46f5d8d9c8485a4e77eac07f83bde1147f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ex4 To Mq4 4 0 432.md +++ /dev/null @@ -1,10 +0,0 @@ - -<p>trading training software that helps you understand market psychology. with our charting software, highly customizable technical indicators, and custom. 710, 4. 0, chicago mercantile exchange, r4a, 239742, 200, week 4 monday european. 472, 02, chicago mercantile exchange, r4a, 239742, 200, week 4 monday european. </p> -<h2>Ex4 To Mq4 4 0 432</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://gohhs.com/2uFU70">https://gohhs.com/2uFU70</a></b></p><br /><br /> -<p>apr 26, 2016 by mjfry on exchange is best with binary options with forex 4. 0, 432. this document will be updated as our exchange offers are. ex4, 930,128, 930,128, 930,128, 845. 237, 472, ex4, 00, chicago mercantile. </p> -<p>alignment of a rod of iron from iron cobaltite material to a steel standard (1045) using methodology described in:. generated by ex4-to-mq4 decompiler v4.0.427.4 - website: e-mail: purebeamgmail.com 2012-10-04 by capella at. ex4 to mq4 4 0 432 basic cendrillon aff ex4 to. decompiler engine. ex4 or mq4 compile ex4 file 0 x 32. ex4 and mq4 extension are. ex4 decompiler to mq4 crack. ex4 to mq4 4 0 432 crack. ex4 t erm a mq4 or. </p> -<p>alignment of a rod of iron from iron cobaltite material to a steel standard (1045) using methodology described in:. ex4 to mq4 4 0 432 basic cendrillon aff ex4 to. decompiler engine. ex4 or mq4 compile ex4 file 0 x 32. ex4 and mq4 extension are. ex4 decompiler to mq4 crack. ex4 to mq4 4 0 432 crack. ex4 t erm a mq4 or. </p> -<p></p> -<p>tradestation protocols for mt4 metatrader 4: complete metatrader (mt4) for tradestation now includes access to all. ex4 to mq4 4 0 432 ex4 file to mq4 boot. ex4 to mq4 4 0 432 extension. decompilers basic/advanced/professional add-ons: creation tools, indices, ex4 to mq4 4 0 432 locate resources, reporter.</p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/mel_processing.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/mel_processing.py deleted file mode 100644 index 50435ecf88ef4fb6c1d47f3e6edd04c3ea7d3e80..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/tone_sandhi.py b/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/japanese.py b/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/dineshreddy/WALT/cwalt/utils.py b/spaces/dineshreddy/WALT/cwalt/utils.py deleted file mode 100644 index 57f8e05a01cb4895dd95a4175f96a35974ee3ea3..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/cwalt/utils.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri May 20 15:16:56 2022 - -@author: dinesh -""" - -import json -import cv2 -from PIL import Image -import numpy as np -from dateutil.parser import parse - -def bb_intersection_over_union(box1, box2): - #print(box1, box2) - boxA = box1.copy() - boxB = box2.copy() - boxA[2] = boxA[0]+boxA[2] - boxA[3] = boxA[1]+boxA[3] - boxB[2] = boxB[0]+boxB[2] - boxB[3] = boxB[1]+boxB[3] - # determine the (x, y)-coordinates of the intersection rectangle - xA = max(boxA[0], boxB[0]) - yA = max(boxA[1], boxB[1]) - xB = min(boxA[2], boxB[2]) - yB = min(boxA[3], boxB[3]) - - # compute the area of intersection rectangle - interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0)) - - if interArea == 0: - return 0 - # compute the area of both the prediction and ground-truth - # rectangles - boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1])) - boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1])) - - # compute the intersection over union by taking the intersection - # area and dividing it by the sum of prediction + ground-truth - # areas - the interesection area - iou = interArea / float(boxAArea + boxBArea - interArea) - return iou - -def bb_intersection_over_union_unoccluded(box1, box2, threshold=0.01): - #print(box1, box2) - boxA = box1.copy() - boxB = box2.copy() - boxA[2] = boxA[0]+boxA[2] - boxA[3] = boxA[1]+boxA[3] - boxB[2] = boxB[0]+boxB[2] - boxB[3] = boxB[1]+boxB[3] - # determine the (x, y)-coordinates of the intersection rectangle - xA = max(boxA[0], boxB[0]) - yA = max(boxA[1], boxB[1]) - xB = min(boxA[2], boxB[2]) - yB = min(boxA[3], boxB[3]) - - # compute the area of intersection rectangle - interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0)) - - if interArea == 0: - return 0 - # compute the area of both the prediction and ground-truth - # rectangles - boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1])) - boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1])) - - # compute the intersection over union by taking the intersection - # area and dividing it by the sum of prediction + ground-truth - # areas - the interesection area - iou = interArea / float(boxAArea + boxBArea - interArea) - - #print(iou) - # return the intersection over union value - occlusion = False - if iou > threshold and iou < 1: - #print(boxA[3], boxB[3], boxB[1]) - if boxA[3] < boxB[3]:# and boxA[3] > boxB[1]: - if boxB[2] > boxA[0]:# and boxB[2] < boxA[2]: - #print('first', (boxB[2] - boxA[0])/(boxA[2] - boxA[0])) - if (min(boxB[2],boxA[2]) - boxA[0])/(boxA[2] - boxA[0]) > threshold: - occlusion = True - - if boxB[0] < boxA[2]: # boxB[0] > boxA[0] and - #print('second', (boxA[2] - boxB[0])/(boxA[2] - boxA[0])) - if (boxA[2] - max(boxB[0],boxA[0]))/(boxA[2] - boxA[0]) > threshold: - occlusion = True - if occlusion == False: - iou = iou*0 - #asas - # asas - #iou = 0.9 #iou*0 - #print(box1, box2, iou, occlusion) - return iou -def draw_tracks(image, tracks): - """ - Draw on input image. - - Args: - image (numpy.ndarray): image - tracks (list): list of tracks to be drawn on the image. - - Returns: - numpy.ndarray: image with the track-ids drawn on it. - """ - - for trk in tracks: - - trk_id = trk[1] - xmin = trk[2] - ymin = trk[3] - width = trk[4] - height = trk[5] - - xcentroid, ycentroid = int(xmin + 0.5*width), int(ymin + 0.5*height) - - text = "ID {}".format(trk_id) - - cv2.putText(image, text, (xcentroid - 10, ycentroid - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) - cv2.circle(image, (xcentroid, ycentroid), 4, (0, 255, 0), -1) - - return image - - -def draw_bboxes(image, tracks): - """ - Draw the bounding boxes about detected objects in the image. - - Args: - image (numpy.ndarray): Image or video frame. - bboxes (numpy.ndarray): Bounding boxes pixel coordinates as (xmin, ymin, width, height) - confidences (numpy.ndarray): Detection confidence or detection probability. - class_ids (numpy.ndarray): Array containing class ids (aka label ids) of each detected object. - - Returns: - numpy.ndarray: image with the bounding boxes drawn on it. - """ - - for trk in tracks: - xmin = int(trk[2]) - ymin = int(trk[3]) - width = int(trk[4]) - height = int(trk[5]) - clr = (np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)) - cv2.rectangle(image, (xmin, ymin), (xmin + width, ymin + height), clr, 2) - - return image - - -def num(v): - number_as_float = float(v) - number_as_int = int(number_as_float) - return number_as_int if number_as_float == number_as_int else number_as_float - - -def parse_bbox(bbox_str): - bbox_list = bbox_str.strip('{').strip('}').split(',') - bbox_list = [num(elem) for elem in bbox_list] - return bbox_list - -def parse_seg(bbox_str): - bbox_list = bbox_str.strip('{').strip('}').split(',') - bbox_list = [num(elem) for elem in bbox_list] - ret = bbox_list # [] - # for i in range(0, len(bbox_list) - 1, 2): - # ret.append((bbox_list[i], bbox_list[i + 1])) - return ret diff --git a/spaces/divilis/chatgpt/README.md b/spaces/divilis/chatgpt/README.md deleted file mode 100644 index e480de7b25ab44894a247cf70e9954fd1b15f934..0000000000000000000000000000000000000000 --- a/spaces/divilis/chatgpt/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChuanhuChatGPT -emoji: 🐯 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: JohnSmith9982/ChuanhuChatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/divyahansg/text-generation-webui-space/download-model.py b/spaces/divyahansg/text-generation-webui-space/download-model.py deleted file mode 100644 index 8be398c4e0d3ca0c0a915efb442f432fc2056834..0000000000000000000000000000000000000000 --- a/spaces/divyahansg/text-generation-webui-space/download-model.py +++ /dev/null @@ -1,176 +0,0 @@ -''' -Downloads models from Hugging Face to models/model-name. - -Example: -python download-model.py facebook/opt-1.3b - -''' - -import argparse -import base64 -import json -import multiprocessing -import re -import sys -from pathlib import Path - -import requests -import tqdm - -parser = argparse.ArgumentParser() -parser.add_argument('MODEL', type=str, default=None, nargs='?') -parser.add_argument('--branch', type=str, default='main', help='Name of the Git branch to download from.') -parser.add_argument('--threads', type=int, default=1, help='Number of files to download simultaneously.') -parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).') -args = parser.parse_args() - -def get_file(args): - url = args[0] - output_folder = args[1] - idx = args[2] - tot = args[3] - - print(f"Downloading file {idx} of {tot}...") - r = requests.get(url, stream=True) - with open(output_folder / Path(url.split('/')[-1]), 'wb') as f: - total_size = int(r.headers.get('content-length', 0)) - block_size = 1024 - t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) - for data in r.iter_content(block_size): - t.update(len(data)) - f.write(data) - t.close() - -def sanitize_branch_name(branch_name): - pattern = re.compile(r"^[a-zA-Z0-9._-]+$") - if pattern.match(branch_name): - return branch_name - else: - raise ValueError("Invalid branch name. Only alphanumeric characters, period, underscore and dash are allowed.") - -def select_model_from_default_options(): - models = { - "Pygmalion 6B original": ("PygmalionAI", "pygmalion-6b", "b8344bb4eb76a437797ad3b19420a13922aaabe1"), - "Pygmalion 6B main": ("PygmalionAI", "pygmalion-6b", "main"), - "Pygmalion 6B dev": ("PygmalionAI", "pygmalion-6b", "dev"), - "Pygmalion 2.7B": ("PygmalionAI", "pygmalion-2.7b", "main"), - "Pygmalion 1.3B": ("PygmalionAI", "pygmalion-1.3b", "main"), - "Pygmalion 350m": ("PygmalionAI", "pygmalion-350m", "main"), - "OPT 6.7b": ("facebook", "opt-6.7b", "main"), - "OPT 2.7b": ("facebook", "opt-2.7b", "main"), - "OPT 1.3b": ("facebook", "opt-1.3b", "main"), - "OPT 350m": ("facebook", "opt-350m", "main"), - } - choices = {} - - print("Select the model that you want to download:\n") - for i,name in enumerate(models): - char = chr(ord('A')+i) - choices[char] = name - print(f"{char}) {name}") - char = chr(ord('A')+len(models)) - print(f"{char}) None of the above") - - print() - print("Input> ", end='') - choice = input()[0].strip().upper() - if choice == char: - print("""\nThen type the name of your desired Hugging Face model in the format organization/name. - -Examples: -PygmalionAI/pygmalion-6b -facebook/opt-1.3b -""") - - print("Input> ", end='') - model = input() - branch = "main" - else: - arr = models[choices[choice]] - model = f"{arr[0]}/{arr[1]}" - branch = arr[2] - - return model, branch - -def get_download_links_from_huggingface(model, branch): - base = "https://huggingface.co" - page = f"/api/models/{model}/tree/{branch}?cursor=" - cursor = b"" - - links = [] - classifications = [] - has_pytorch = False - has_safetensors = False - while True: - content = requests.get(f"{base}{page}{cursor.decode()}").content - - dict = json.loads(content) - if len(dict) == 0: - break - - for i in range(len(dict)): - fname = dict[i]['path'] - - is_pytorch = re.match("pytorch_model.*\.bin", fname) - is_safetensors = re.match("model.*\.safetensors", fname) - is_tokenizer = re.match("tokenizer.*\.model", fname) - is_text = re.match(".*\.(txt|json)", fname) or is_tokenizer - - if any((is_pytorch, is_safetensors, is_text, is_tokenizer)): - if is_text: - links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}") - classifications.append('text') - continue - if not args.text_only: - links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}") - if is_safetensors: - has_safetensors = True - classifications.append('safetensors') - elif is_pytorch: - has_pytorch = True - classifications.append('pytorch') - - cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50' - cursor = base64.b64encode(cursor) - cursor = cursor.replace(b'=', b'%3D') - - # If both pytorch and safetensors are available, download safetensors only - if has_pytorch and has_safetensors: - for i in range(len(classifications)-1, -1, -1): - if classifications[i] == 'pytorch': - links.pop(i) - - return links - -if __name__ == '__main__': - model = args.MODEL - branch = args.branch - if model is None: - model, branch = select_model_from_default_options() - else: - if model[-1] == '/': - model = model[:-1] - branch = args.branch - if branch is None: - branch = "main" - else: - try: - branch = sanitize_branch_name(branch) - except ValueError as err_branch: - print(f"Error: {err_branch}") - sys.exit() - if branch != 'main': - output_folder = Path("models") / (model.split('/')[-1] + f'_{branch}') - else: - output_folder = Path("models") / model.split('/')[-1] - if not output_folder.exists(): - output_folder.mkdir() - - links = get_download_links_from_huggingface(model, branch) - - # Downloading the files - print(f"Downloading the model to {output_folder}") - pool = multiprocessing.Pool(processes=args.threads) - results = pool.map(get_file, [[links[i], output_folder, i+1, len(links)] for i in range(len(links))]) - pool.close() - pool.join() diff --git a/spaces/evi0mo/vits-fastapi-server/index.html b/spaces/evi0mo/vits-fastapi-server/index.html deleted file mode 100644 index 3efed296d324073a17aff290fd05888085a15a5f..0000000000000000000000000000000000000000 --- a/spaces/evi0mo/vits-fastapi-server/index.html +++ /dev/null @@ -1,57 +0,0 @@ -<html> -<!-- Title --> - -<head> - <title>FastAPI Hello World</title> -</head> - -<!-- Stylesheet --> -<style> - body { - font-family: Arial, Helvetica, sans-serif; - font-size: 16px; - line-height: 1.5; - margin: 0; - padding: 0; - } - - h1 { - font-size: 2em; - margin: 0; - padding: 0; - /* Center */ - text-align: center; - } - - h3 { - margin: 0; - padding: 0; - /* Center */ - text-align: center; - } -</style> - -<!-- Body --> - -<body> - <h1>FastAPI Hello World Example</h1> - - <h3> - This is a simple example of a FastAPI Hello World application using Docker. See the links below for more - information. - </h3> - <!-- List of different relevant links and descriptions about them --> - - <div style="margin: 0 auto; width: 50%;"> - - <ul> - <!-- Link to self /docs --> - <li> - <a href="/docs">This API's Interactive Swaggar Docs</a> - Try the API out here! - </li> - </ul> - </div> - -</body> - -</html> \ No newline at end of file diff --git a/spaces/facebook/seamless_m4t/lang_list.py b/spaces/facebook/seamless_m4t/lang_list.py deleted file mode 100644 index a766d1783b8c1f2f06a98257e1269bbd2c95e663..0000000000000000000000000000000000000000 --- a/spaces/facebook/seamless_m4t/lang_list.py +++ /dev/null @@ -1,402 +0,0 @@ -# Language dict -language_code_to_name = { - "afr": "Afrikaans", - "amh": "Amharic", - "arb": "Modern Standard Arabic", - "ary": "Moroccan Arabic", - "arz": "Egyptian Arabic", - "asm": "Assamese", - "ast": "Asturian", - "azj": "North Azerbaijani", - "bel": "Belarusian", - "ben": "Bengali", - "bos": "Bosnian", - "bul": "Bulgarian", - "cat": "Catalan", - "ceb": "Cebuano", - "ces": "Czech", - "ckb": "Central Kurdish", - "cmn": "Mandarin Chinese", - "cym": "Welsh", - "dan": "Danish", - "deu": "German", - "ell": "Greek", - "eng": "English", - "est": "Estonian", - "eus": "Basque", - "fin": "Finnish", - "fra": "French", - "gaz": "West Central Oromo", - "gle": "Irish", - "glg": "Galician", - "guj": "Gujarati", - "heb": "Hebrew", - "hin": "Hindi", - "hrv": "Croatian", - "hun": "Hungarian", - "hye": "Armenian", - "ibo": "Igbo", - "ind": "Indonesian", - "isl": "Icelandic", - "ita": "Italian", - "jav": "Javanese", - "jpn": "Japanese", - "kam": "Kamba", - "kan": "Kannada", - "kat": "Georgian", - "kaz": "Kazakh", - "kea": "Kabuverdianu", - "khk": "Halh Mongolian", - "khm": "Khmer", - "kir": "Kyrgyz", - "kor": "Korean", - "lao": "Lao", - "lit": "Lithuanian", - "ltz": "Luxembourgish", - "lug": "Ganda", - "luo": "Luo", - "lvs": "Standard Latvian", - "mai": "Maithili", - "mal": "Malayalam", - "mar": "Marathi", - "mkd": "Macedonian", - "mlt": "Maltese", - "mni": "Meitei", - "mya": "Burmese", - "nld": "Dutch", - "nno": "Norwegian Nynorsk", - "nob": "Norwegian Bokm\u00e5l", - "npi": "Nepali", - "nya": "Nyanja", - "oci": "Occitan", - "ory": "Odia", - "pan": "Punjabi", - "pbt": "Southern Pashto", - "pes": "Western Persian", - "pol": "Polish", - "por": "Portuguese", - "ron": "Romanian", - "rus": "Russian", - "slk": "Slovak", - "slv": "Slovenian", - "sna": "Shona", - "snd": "Sindhi", - "som": "Somali", - "spa": "Spanish", - "srp": "Serbian", - "swe": "Swedish", - "swh": "Swahili", - "tam": "Tamil", - "tel": "Telugu", - "tgk": "Tajik", - "tgl": "Tagalog", - "tha": "Thai", - "tur": "Turkish", - "ukr": "Ukrainian", - "urd": "Urdu", - "uzn": "Northern Uzbek", - "vie": "Vietnamese", - "xho": "Xhosa", - "yor": "Yoruba", - "yue": "Cantonese", - "zlm": "Colloquial Malay", - "zsm": "Standard Malay", - "zul": "Zulu", -} -LANGUAGE_NAME_TO_CODE = {v: k for k, v in language_code_to_name.items()} - -# Source langs: S2ST / S2TT / ASR don't need source lang -# T2TT / T2ST use this -text_source_language_codes = [ - "afr", - "amh", - "arb", - "ary", - "arz", - "asm", - "azj", - "bel", - "ben", - "bos", - "bul", - "cat", - "ceb", - "ces", - "ckb", - "cmn", - "cym", - "dan", - "deu", - "ell", - "eng", - "est", - "eus", - "fin", - "fra", - "gaz", - "gle", - "glg", - "guj", - "heb", - "hin", - "hrv", - "hun", - "hye", - "ibo", - "ind", - "isl", - "ita", - "jav", - "jpn", - "kan", - "kat", - "kaz", - "khk", - "khm", - "kir", - "kor", - "lao", - "lit", - "lug", - "luo", - "lvs", - "mai", - "mal", - "mar", - "mkd", - "mlt", - "mni", - "mya", - "nld", - "nno", - "nob", - "npi", - "nya", - "ory", - "pan", - "pbt", - "pes", - "pol", - "por", - "ron", - "rus", - "slk", - "slv", - "sna", - "snd", - "som", - "spa", - "srp", - "swe", - "swh", - "tam", - "tel", - "tgk", - "tgl", - "tha", - "tur", - "ukr", - "urd", - "uzn", - "vie", - "yor", - "yue", - "zsm", - "zul", -] -TEXT_SOURCE_LANGUAGE_NAMES = sorted([language_code_to_name[code] for code in text_source_language_codes]) - -# Target langs: -# S2ST / T2ST -s2st_target_language_codes = [ - "eng", - "arb", - "ben", - "cat", - "ces", - "cmn", - "cym", - "dan", - "deu", - "est", - "fin", - "fra", - "hin", - "ind", - "ita", - "jpn", - "kor", - "mlt", - "nld", - "pes", - "pol", - "por", - "ron", - "rus", - "slk", - "spa", - "swe", - "swh", - "tel", - "tgl", - "tha", - "tur", - "ukr", - "urd", - "uzn", - "vie", -] -S2ST_TARGET_LANGUAGE_NAMES = sorted([language_code_to_name[code] for code in s2st_target_language_codes]) - -# S2TT / ASR -S2TT_TARGET_LANGUAGE_NAMES = TEXT_SOURCE_LANGUAGE_NAMES -# T2TT -T2TT_TARGET_LANGUAGE_NAMES = TEXT_SOURCE_LANGUAGE_NAMES - - -LANG_TO_SPKR_ID = { - "arb": [ - 0 - ], - "ben": [ - 2, - 1 - ], - "cat": [ - 3 - ], - "ces": [ - 4 - ], - "cmn": [ - 5 - ], - "cym": [ - 6 - ], - "dan": [ - 7, - 8 - ], - "deu": [ - 9 - ], - "eng": [ - 10 - ], - "est": [ - 11, - 12, - 13 - ], - "fin": [ - 14 - ], - "fra": [ - 15 - ], - "hin": [ - 16 - ], - "ind": [ - 17, - 24, - 18, - 20, - 19, - 21, - 23, - 27, - 26, - 22, - 25 - ], - "ita": [ - 29, - 28 - ], - "jpn": [ - 30 - ], - "kor": [ - 31 - ], - "mlt": [ - 32, - 33, - 34 - ], - "nld": [ - 35 - ], - "pes": [ - 36 - ], - "pol": [ - 37 - ], - "por": [ - 38 - ], - "ron": [ - 39 - ], - "rus": [ - 40 - ], - "slk": [ - 41 - ], - "spa": [ - 42 - ], - "swe": [ - 43, - 45, - 44 - ], - "swh": [ - 46, - 48, - 47 - ], - "tel": [ - 49 - ], - "tgl": [ - 50 - ], - "tha": [ - 51, - 54, - 55, - 52, - 53 - ], - "tur": [ - 58, - 57, - 56 - ], - "ukr": [ - 59 - ], - "urd": [ - 60, - 61, - 62 - ], - "uzn": [ - 63, - 64, - 65 - ], - "vie": [ - 66, - 67, - 70, - 71, - 68, - 69 - ] -} \ No newline at end of file diff --git a/spaces/faisalhr1997/chat-ggml/README.md b/spaces/faisalhr1997/chat-ggml/README.md deleted file mode 100644 index 75d559cc0e9b24e97edecbef7e838d12f1c42ca3..0000000000000000000000000000000000000000 --- a/spaces/faisalhr1997/chat-ggml/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: chat-ggml -emoji: 🚀 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: true -duplicated_from: mikeee/falcon-7b-ggml ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Italo Calvino Invisible Cities Epub Download Nook HOT.md b/spaces/falterWliame/Face_Mask_Detection/Italo Calvino Invisible Cities Epub Download Nook HOT.md deleted file mode 100644 index 9082544b076da51facafb0df5ae5af414e12e94d..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Italo Calvino Invisible Cities Epub Download Nook HOT.md +++ /dev/null @@ -1,24 +0,0 @@ -<br /> -<h1>How to Download Invisible Cities by Italo Calvino in Epub Format for Nook</h1> -<p>Invisible Cities is a classic novel by the Italian writer Italo Calvino, first published in 1972. It consists of a series of imaginative dialogues between the Venetian explorer Marco Polo and the Mongol emperor Kublai Khan, in which Polo describes various cities he has visited or imagined. The novel explores themes such as memory, language, culture, and the nature of human experience.</p> -<h2>Italo Calvino Invisible Cities Epub Download Nook</h2><br /><p><b><b>Download</b> &#9658; <a href="https://urlca.com/2uDcKY">https://urlca.com/2uDcKY</a></b></p><br /><br /> -<p>If you want to read this novel on your Nook device, you will need to download it in epub format, which is a common file format for ebooks. Here are some steps you can follow to do that:</p> -<ol> -<li>Go to the Internet Archive website and search for Invisible Cities by Italo Calvino. You should find two results that have epub files available for download[^1^] [^2^]. Alternatively, you can use this link to access one of them: <a href="https://archive.org/details/invisiblecities0000calv">https://archive.org/details/invisiblecities0000calv</a>.</li> -<li>Click on the epub file that you want to download. It should have a name like invisiblecities0000calv.epub or invisiblecities00calv.epub. A new page will open with a preview of the file and a download button.</li> -<li>Click on the download button and save the file to your computer. You may need to choose a location where you want to save it, such as your desktop or downloads folder.</li> -<li>Connect your Nook device to your computer using a USB cable. Your computer should recognize it as a removable drive.</li> -<li>Open the drive and locate the folder where you want to store your ebook. You can use the default folder called My Documents or create a new one.</li> -<li>Drag and drop the epub file from your computer to the folder on your Nook device. You may need to wait for a few seconds for the transfer to complete.</li> -<li>Eject your Nook device from your computer and disconnect the USB cable. You should be able to find your ebook on your Nook library and start reading it.</li> -</ol> -<p>I hope this helps you enjoy Invisible Cities by Italo Calvino on your Nook device. Happy reading! 😊</p><p>Here are some additional information about Invisible Cities by Italo Calvino that you may find interesting:</p> -<ul> -<li>The novel is divided into nine chapters, each containing five sections. The first and the last sections are always dialogues between Polo and Khan, while the other three are descriptions of different cities. The cities are grouped by themes, such as memory, desire, signs, trading, etc.</li> -<li>The novel is inspired by various sources, such as the historical accounts of Marco Polo's travels, the medieval cosmography of The Book of Marvels by Sir John Mandeville, the imaginary geography of Jorge Luis Borges, and the surrealist paintings of Giorgio de Chirico.</li> -<li>The novel has been praised for its poetic language, its philosophical insights, and its creative imagination. It has also been interpreted in various ways, such as a reflection on urbanism, a critique of modernity, a meditation on the human condition, and a metaphor for writing.</li> -<li>The novel has influenced many writers and artists, such as Salman Rushdie, David Mitchell, Chris Ware, and Gorillaz. It has also been adapted into various media forms, such as opera, theater, film, and video games.</li> -</ul> -<p>Invisible Cities by Italo Calvino is a masterpiece of literature that invites you to explore the infinite possibilities of the human mind. If you are looking for a book that will challenge your imagination and enrich your perspective, you should definitely give it a try.</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/8 Ball Pool Offline The Best Android Game for Billiard Lovers.md b/spaces/fatiXbelha/sd/8 Ball Pool Offline The Best Android Game for Billiard Lovers.md deleted file mode 100644 index 001015e892962a70e893ef9d95b99d473ed28f4f..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/8 Ball Pool Offline The Best Android Game for Billiard Lovers.md +++ /dev/null @@ -1,111 +0,0 @@ - -<h1>8 Ball Pool Offline APK Download: How to Play the World's Best Pool Game Without Internet</h1> -<p>Do you love playing pool but don't have access to a stable internet connection? Do you want to enjoy the world's most popular pool game on your Android device without worrying about data usage or online opponents? If yes, then you should download 8 Ball Pool offline APK and play the game anytime, anywhere.</p> -<h2>What is 8 Ball Pool?</h2> -<h3>A brief introduction to the game and its features</h3> -<p>8 Ball Pool is a pool game developed by Miniclip that lets you play against millions of players from around the world. You can choose from different game modes, such as 1-on-1 matches, tournaments, practice mode, and more. You can also customize your cue and table, earn coins and cash, buy new items in the shop, and join clubs and leagues.</p> -<h2>8 ball pool offline apk download</h2><br /><p><b><b>Download Zip</b> &#10037; <a href="https://urllie.com/2uNDYn">https://urllie.com/2uNDYn</a></b></p><br /><br /> -<h3>The difference between online and offline modes</h3> -<p>While 8 Ball Pool is mainly an online game that requires internet connection to play, there is also an offline mode that you can access by downloading an APK file. APK stands for Android Package Kit, which is a file format that contains all the elements needed to install an app on your device. By downloading 8 Ball Pool offline APK, you can play the game without internet connection. However, you will not be able to access some features that are only available online, such as multiplayer mode, chat function, leaderboards, etc.</p> -<h2>How to Download and Install 8 Ball Pool Offline APK</h2> -<h3>The steps to download the APK file from a trusted source</h3> -<p>Before you download 8 Ball Pool offline APK, you need to make sure that you have enough storage space on your device and that you allow installation from unknown sources in your settings. Then, you need to find a reliable website that offers the latest version of the APK file. You can use a search engine like Bing or Google to find one. Once you find a suitable website, follow these steps:</p> -<ol> -<li>Click on the download button or link.</li> -<li>Wait for the download to complete.</li> -<li>Locate the downloaded file in your device's file manager.</li> -</ol> -<h3>The steps to install the APK file on your Android device</h3> -<p>After you download 8 Ball Pool offline APK, you need to install it on your device. Follow these steps:</p> -<ol> -<li>Tap on the downloaded file.</li> -<li>A pop-up window will appear asking for your permission to install the app. Tap on "Install". <li>The installation process will begin and may take a few minutes.</li> -<li>Once the installation is done, you can open the app and start playing 8 Ball Pool offline.</li> -</ol> -<h2>How to Play 8 Ball Pool Offline</h2> -<h3>The rules and objectives of the game</h3> -<p>The rules of 8 Ball Pool offline are the same as the online version. You need to use your cue to hit the white ball and pot the colored balls in the pockets. There are two types of balls: solids and stripes. You need to pot all the balls of your type before you can pot the black 8 ball. The first player to pot the 8 ball wins the game. However, if you pot the 8 ball before clearing your type, or if you pot the white ball along with the 8 ball, you lose the game.</p> -<h3>The tips and tricks to improve your skills and win more matches</h3> -<p>Playing 8 Ball Pool offline can help you improve your skills and strategies for the online mode. Here are some tips and tricks to help you play better:</p> -<ul> -<li>Aim carefully and adjust your power and spin according to the distance and angle of the shot.</li> -<li>Use the guidelines to help you align your shots and predict the trajectory of the balls.</li> -<li>Plan ahead and think about your next shots and possible outcomes.</li> -<li>Practice different game modes and difficulty levels to challenge yourself and learn from your mistakes.</li> -<li>Have fun and enjoy the game!</li> -</ul> -<h2>The Benefits of Playing 8 Ball Pool Offline</h2> -<h3>The advantages of playing without internet connection</h3> -<p>Playing 8 Ball Pool offline has some benefits that you may not get from playing online. Here are some of them:</p> -<ul> -<li>You can play anytime, anywhere, without worrying about internet availability or speed.</li> -<li>You can save your data usage and battery life by playing offline.</li> -<li>You can avoid online distractions, such as ads, pop-ups, notifications, etc.</li> -<li>You can play at your own pace and relax without pressure from online opponents or timers.</li> -<li>You can improve your skills and confidence by playing offline before going online.</li> -</ul> -<h3>The disadvantages of playing without internet connection</h3> -<p>However, playing 8 Ball Pool offline also has some drawbacks that you may miss from playing online. Here are some of them:</p> -<ul> -<li>You cannot access some features that are only available online, such as multiplayer mode, chat function, leaderboards, etc.</li> -<li>You cannot earn coins and cash or buy new items in the shop by playing offline.</li> -<li>You cannot join clubs and leagues or participate in tournaments and events by playing offline.</li> -<li>You cannot update the app or get new content by playing offline.</li> -<li>You may get bored or lonely by playing offline without social interaction or competition.</li> -</ul> <h2>Conclusion</h2> -<p>8 Ball Pool offline APK is a great way to enjoy the world's best pool game without internet connection. You can download and install the APK file from a trusted source and play the game on your Android device. You can learn the rules and objectives of the game, improve your skills and strategies, and have fun and relax. However, you should also be aware of the disadvantages of playing offline, such as missing some online features, not earning coins and cash, not joining clubs and leagues, etc. Therefore, you should balance your offline and online gameplay and experience the best of both worlds.</p> -<p>8 ball billiards offline pool game apk<br /> -download 8 ball pool mod apk offline<br /> -8 ball pool offline apk free download<br /> -8 ball pool offline mode apk download<br /> -8 ball pool apk download offline version<br /> -8 ball pool offline unlimited coins apk<br /> -8 ball pool offline hack apk download<br /> -8 ball pool offline android apk download<br /> -8 ball pool offline apk latest version<br /> -8 ball pool offline apk no internet<br /> -8 ball pool offline apk for pc<br /> -8 ball pool offline apk pure download<br /> -8 ball pool offline multiplayer apk<br /> -8 ball pool offline apk old version<br /> -8 ball pool offline apk rexdl download<br /> -8 ball pool offline apk revdl download<br /> -8 ball pool offline apk uptodown download<br /> -8 ball pool offline apk mod menu<br /> -8 ball pool offline apk unlimited money<br /> -8 ball pool offline apk anti ban download<br /> -8 ball pool offline apk with friends<br /> -8 ball pool offline apk without internet<br /> -8 ball pool offline apk full version<br /> -8 ball pool offline apk no wifi needed<br /> -8 ball pool offline apk mega mod<br /> -8 ball pool offline pro apk download<br /> -8 ball pool offline premium apk download<br /> -8 ball pool offline cracked apk download<br /> -8 ball pool offline unlocked apk download<br /> -8 ball pool offline cheat engine apk<br /> -8 ball pool offline trainer apk download<br /> -8 ball pool offline patcher apk download<br /> -8 ball pool offline generator apk download<br /> -8 ball pool offline editor apk download<br /> -8 ball pool offline installer apk download<br /> -8 ball pool offline simulator apk download<br /> -8 ball pool offline emulator apk download<br /> -8 ball pool offline challenge mode apk<br /> -8 ball pool offline tournament mode apk<br /> -8 ball pool offline practice mode apk</p> -<p>If you liked this article, please share it with your friends and family who love playing pool. Also, don't forget to check out our other articles on 8 Ball Pool and other games. Thank you for reading and happy gaming!</p> -<h2>FAQs</h2> -<h3>Q1: Is 8 Ball Pool offline APK safe and legal?</h3> -<p>A1: 8 Ball Pool offline APK is safe and legal as long as you download it from a reputable website that does not contain any viruses or malware. However, you should always be careful when downloading any APK file from unknown sources and scan it with an antivirus software before installing it. Also, you should not use any modded or hacked versions of the APK file that may violate the terms and conditions of the game or harm your device.</p> -<h3>Q2: Can I play 8 Ball Pool offline with friends?</h3> -<p>A2: Yes, you can play 8 Ball Pool offline with friends by using the local multiplayer mode. This mode allows you to play with another player on the same device by using the split-screen feature. You can also use Bluetooth or Wi-Fi to connect with another device and play with your friend. However, you will not be able to chat or communicate with your friend while playing offline.</p> -<h3>Q3: How can I update 8 Ball Pool offline APK?</h3> -<p>A3: To update 8 Ball Pool offline APK, you need to download the latest version of the APK file from the same website that you downloaded it from before. Then, you need to uninstall the old version of the app from your device and install the new version. Alternatively, you can go online and update the app from the Google Play Store or the official website of Miniclip.</p> -<h3>Q4: How can I transfer my progress from online to offline mode?</h3> -<p>A4: Unfortunately, you cannot transfer your progress from online to offline mode or vice versa. The online and offline modes are separate and have different data and settings. Therefore, you will have to start from scratch when you switch between modes. However, you can still use your same account and login details for both modes.</p> -<h3>Q5: How can I contact the developers of 8 Ball Pool?</h3> -<p>A5: If you have any questions, feedback, suggestions, or issues regarding 8 Ball Pool, you can contact the developers of Miniclip by using their support page, their email address (support@miniclip.com), their Facebook page, their Twitter account, or their YouTube channel. They will try to respond to your queries as soon as possible.</p> - : https://support.miniclip.com/hc/en-us/categories/115000055067-8-Ball-Pool : https://www.facebook.com/8ballpoolfans : https://twitter.com/8ballpool : https://www.youtube.com/user/Miniclip</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Become a Legendary Knight with Stickman The Flash MOD APK (Unlocked Map God Mode).md b/spaces/fatiXbelha/sd/Become a Legendary Knight with Stickman The Flash MOD APK (Unlocked Map God Mode).md deleted file mode 100644 index ef8e4f29716e348a68948cf09569c7ac07cf079c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Become a Legendary Knight with Stickman The Flash MOD APK (Unlocked Map God Mode).md +++ /dev/null @@ -1,79 +0,0 @@ -<br /> -<h1>Download Stickman The Flash Mod APK An1: A Fast and Fun Action Game</h1> -<p>If you are a fan of stickman games, you will love Stickman The Flash Mod APK An1. This is a modded version of the original game that gives you god mode, unlimited power, and no ads. You can enjoy the thrilling action of fighting against shadow warriors, ninjas, and monsters with super speed and skills. In this article, we will tell you what is Stickman The Flash Mod APK An1, how to download and install it, and some tips and tricks for playing it.</p> -<h2>What is Stickman The Flash Mod APK An1?</h2> -<p>Stickman The Flash Mod APK An1 is a modified version of the original game Stickman The Flash, which is developed by JDI Game Studio. The game is inspired by the popular superhero The Flash, who can move faster than light. You play as a stickman hero who has the same power and can defeat enemies with lightning-fast attacks. You can also use various skills and weapons to enhance your combat abilities.</p> -<h2>download stickman the flash mod apk an1</h2><br /><p><b><b>Download</b> &#127383; <a href="https://urllie.com/2uNAqp">https://urllie.com/2uNAqp</a></b></p><br /><br /> -<p>The modded version of the game gives you some extra advantages that make the game more fun and easy. You can activate god mode, which makes you invincible to any damage. You can also use unlimited power, which lets you unleash your skills without any cooldown or energy limit. Moreover, you can enjoy the game without any annoying ads that interrupt your gameplay.</p> -<h3>Features of Stickman The Flash Mod APK An1</h3> -<h4>God mode</h4> -<p>With god mode, you can play the game without worrying about dying or losing health. You can take on any enemy or boss without fear. You can also explore the different stages and levels without any obstacles or traps. God mode makes the game more relaxing and enjoyable.</p> -<h4>Unlimited power</h4> -<p>With unlimited power, you can use your skills as much as you want. You don't have to wait for the cooldown or energy bar to refill. You can spam your skills and create combos that deal massive damage to your enemies. You can also switch between different skills and weapons to suit your situation. Unlimited power makes the game more exciting and dynamic.</p> -<h4>No ads</h4> -<p>With no ads, you can play the game without any interruption or distraction. You don't have to watch any videos or banners that pop up on your screen. You can focus on the game and enjoy the smooth graphics and sound effects. No ads makes the game more immersive and satisfying.</p> -<h3>How to download and install Stickman The Flash Mod APK An1?</h3> -<p>If you want to download and install Stickman The Flash Mod APK An1, you need to follow these simple steps:</p> -<h4>Step 1: Download the APK file</h4> -<p>You can download the APK file from this link: [Download Stickman The Flash Mod APK An1](^1^). This is a safe and secure link that will give you the latest version of the modded game.</p> -<p>How to download stickman the flash mod apk an1 for free<br /> -Stickman the flash mod apk an1 latest version download<br /> -Stickman the flash mod apk an1 unlimited money and power<br /> -Download stickman the flash mod apk an1 for android devices<br /> -Stickman the flash mod apk an1 gameplay and features<br /> -Stickman the flash mod apk an1 review and rating<br /> -Stickman the flash mod apk an1 cheats and hacks<br /> -Stickman the flash mod apk an1 online multiplayer mode<br /> -Stickman the flash mod apk an1 offline mode download<br /> -Stickman the flash mod apk an1 best weapons and upgrades<br /> -Download stickman the flash mod apk an1 from thestickmanapk.com[^1^]<br /> -Stickman the flash mod apk an1 god mode and unlimited ammo<br /> -Stickman the flash mod apk an1 tips and tricks<br /> -Stickman the flash mod apk an1 vs stickman legends mod apk<br /> -Stickman the flash mod apk an1 new characters and skins<br /> -Download stickman the flash mod apk an1 for PC and laptop<br /> -Stickman the flash mod apk an1 no root required<br /> -Stickman the flash mod apk an1 installation guide and tutorial<br /> -Stickman the flash mod apk an1 best settings and optimization<br /> -Stickman the flash mod apk an1 comparison with original version<br /> -Download stickman the flash mod apk an1 from apkpure.com<br /> -Stickman the flash mod apk an1 challenges and missions<br /> -Stickman the flash mod apk an1 fun and addictive gameplay<br /> -Stickman the flash mod apk an1 support and feedback<br /> -Stickman the flash mod apk an1 bugs and issues fix<br /> -Download stickman the flash mod apk an1 from happymod.com<br /> -Stickman the flash mod apk an1 fan art and wallpapers<br /> -Stickman the flash mod apk an1 community and forum<br /> -Stickman the flash mod apk an1 update and patch notes<br /> -Stickman the flash mod apk an1 alternatives and similar games</p> -<h4>Step 2: Enable unknown sources</h4> -<p>Before you install the APK file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p> -<h4>Step 3: Install the APK file</h4> -<p>After you enable unknown sources, you can install the APK file by tapping on it. You will see a confirmation message and some permissions that the app requires. Tap on Install and wait for the installation to finish.</p> -<h4>Step 4: Enjoy the game</h4> -<p>Once the installation is done, you can open the game and start playing. You will see the mod features on the main menu and in the game settings. You can activate or deactivate them as you wish. You can also customize your stickman hero and choose your preferred skills and weapons. Have fun with Stickman The Flash Mod APK An1!</p> -<h3>Tips and tricks for playing Stickman The Flash Mod APK An1</h3> -<p>Stickman The Flash Mod APK An1 is a fast-paced and fun action game, but it can also be challenging and tricky at times. Here are some tips and tricks that can help you master the game and become the ultimate stickman hero:</p> -<h4>Use your power wisely</h4> -<p>Even though you have unlimited power, you still need to use it wisely. Different skills and weapons have different effects and advantages. For example, some skills can stun, freeze, or burn your enemies, while others can heal, shield, or boost your speed. Some weapons can deal more damage, while others can have longer range or faster attack speed. You need to experiment with different combinations and find out what works best for you.</p> -<h4>Dodge and counterattack</h4> -<p>Even though you have god mode, you still need to dodge and counterattack your enemies. This will make the game more fun and challenging, and also improve your reaction time and skills. You can use your super speed to dodge incoming attacks and move around the battlefield. You can also use your skills to counterattack your enemies and create openings for more damage. You need to be alert and agile to survive the waves of enemies and bosses.</p> -<h4>Upgrade your skills and equipment</h4> -<p>Even though you have unlimited power, you still need to upgrade your skills and equipment. This will make the game more rewarding and satisfying, and also increase your power and performance. You can use the coins and gems that you earn from completing stages and levels to upgrade your skills and equipment. You can also unlock new skills and weapons as you progress in the game. You need to invest in your stickman hero and make him stronger and faster.</p> -<h3>Conclusion</h3> -<p>Stickman The Flash Mod APK An1 is a great game for anyone who loves stickman games, action games, or superhero games. It has amazing graphics, sound effects, and gameplay that will keep you hooked for hours. It also has mod features that will make the game more fun and easy. You can download and install Stickman The Flash Mod APK An1 from this link: [Download Stickman The Flash Mod APK An1]. You can also follow our tips and tricks to master the game and become the ultimate stickman hero. We hope you enjoy playing Stickman The Flash Mod APK An1!</p> -<h2>FAQs</h2> -<p>Here are some frequently asked questions about Stickman The Flash Mod APK An1:</p> -<ul> -<li><b>Is Stickman The Flash Mod APK An1 safe to download and install?</b></li> -<p>Yes, Stickman The Flash Mod APK An1 is safe to download and install. It does not contain any viruses or malware that can harm your device or data. It is also compatible with most Android devices.</p> -<li><b>Is Stickman The Flash Mod APK An1 free to play?</b></li> -<p>Yes, Stickman The Flash Mod APK An1 is free to play. You don't have to pay any money to download or install it. You also don't have to spend any money to play it. However, you can choose to support the developers by making in-app purchases if you want.</p> -<li><b>Can I play Stickman The Flash Mod APK An1 offline?</b></li> -<p>Yes, you can play Stickman The Flash Mod APK An1 offline. You don't need an internet connection to play it. However, you may need an internet connection to access some features or updates.</p> -<li><b>Can I play Stickman The Flash Mod APK An1 with friends?</b></li> -<p>No, you cannot play Stickman The Flash Mod APK An1 with friends. It is a single-player game that does not support multiplayer mode. However, you can share your achievements and scores with your friends on social media.</p> -<li><b>How can I contact the developers of Stickman The Flash Mod APK An1?</b></li> -<p>You can contact the developers of Stickman The Flash Mod APK An1 by sending them an email at jdigamestudio@gmail.com. You can also visit their website at https://jdi-game-studio.com/ or follow them on Facebook at https://www.facebook.com/JDI-Game-Studio-101201995389613. You can also leave a review or rating on the Google Play Store or the App Store.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Domino QiuQiu 99 QQ Gaple Slot and Play 21 Classic Card Games Online.md b/spaces/fatiXbelha/sd/Download Domino QiuQiu 99 QQ Gaple Slot and Play 21 Classic Card Games Online.md deleted file mode 100644 index 62f942536ebc11f6212e2e6cfd0f495e16cd7796..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Domino QiuQiu 99 QQ Gaple Slot and Play 21 Classic Card Games Online.md +++ /dev/null @@ -1,136 +0,0 @@ -<br /> -<h1>Domino Qiu Qiu 2019 Apk: A Guide for Beginners</h1> - <p>If you are looking for a fun and exciting card game to play online, you might want to try domino qiu qiu. This is a popular game in Indonesia that is also known as 99 domino poker or kiu kiu. It is played with a set of 28 dominoes or cards, and it has a lot of variations and challenges to keep you entertained. In this article, we will give you a comprehensive guide on how to play domino qiu qiu online using the 2019 apk version. We will cover the basics of the game, how to download and install the apk, how to find the best apps and websites, and how to win more games. Let's get started!</p> -<h2>domino qiu qiu 2019 apk</h2><br /><p><b><b>Download</b> &raquo; <a href="https://urllie.com/2uNH6j">https://urllie.com/2uNH6j</a></b></p><br /><br /> - <h2>What is Domino Qiu Qiu?</h2> - <p>Domino qiu qiu is a game of dominoes that is related to pai gow, a Chinese gambling game. It is believed that domino qiu qiu originated from China and was brought to Indonesia by traders or immigrants. It became very popular among the local people, especially in Java, Sumatra, and Kalimantan. Today, domino qiu qiu is one of the most played online games in Indonesia, with millions of players joining every day.</p> - <h3>The origin and popularity of the game</h3> - <p>Domino qiu qiu is a game that combines elements of luck, skill, and strategy. It is played with a set of 28 dominoes or cards, each with two sides that have a number of dots from 0 to 6. There are seven different types of cards, from 0/0 (blank) to 6/6 (double six). The game can be played by 2 to 6 players, but it is best to play with 4 players.</p> - <h3>The rules and objectives of the game</h3> - <p>The objective of domino qiu qiu is to win bets by making the best hand with your cards. Each player starts with three cards, and then there is a round of betting. The players can either check, bet, call, raise, or fold. After the betting round, each player who did not fold gets a fourth card, and then there is another round of betting. The players then reveal their cards and compare their hands.</p> - <p>The hands are formed by pairing two cards together, and then adding up the dots on each pair. The value of each pair is determined by taking the last digit of the sum. For example, if you have a pair of 5/4 and 6/2, your value is (5+4)+(6+2) = 17, so your pair value is 7. The highest pair value possible is 9 (or 'kiu'). Your final hand value consists of two pair values, such as 9/9 (the best ordinary hand) or 7/5 (a lower hand).</p> - <h3>The special hands and rankings of the game</h3> - <p>There are four special hands that rank above any ordinary hand in domino qiu qiu. They are:</p> - <ul> -<li>Enam Dewa (Six Gods): This is the highest hand possible in domino qiu qiu. It consists of four cards that have six dots on each side, such as 6/6, 5/1, 4/2, and 3/3.</li> -<li>Empat Balak (Four Doubles): This is the second highest hand in domino q iu qiu. It consists of four cards that are doubles, such as 6/6, 5/5, 4/4, and 3/3.</li> -<li>Besar (Big): This is the third highest hand in domino qiu qiu. It consists of four cards that have a total of at least 39 dots, such as 6/6, 6/5, 5/4, and 4/3.</li> -<li>Kecil (Small): This is the fourth highest hand in domino qiu qiu. It consists of four cards that have a total of at most 9 dots, such as 1/1, 2/0, 3/0, and 4/0.</li> -</ul> - <p>If two players have the same type of hand, the winner is determined by comparing the highest pair value of each hand. If the pair values are also the same, the winner is determined by comparing the highest single card value of each hand. If the single card values are also the same, the pot is split between the two players.</p> -<p>domino qiu qiu online 99(QQ) pc<br /> -gaple offline domino qiu qiu 2019<br /> -domino qiu qiu 2019 apk mod<br /> -domino qiu qiu online 2019 terbaru<br /> -domino qiu qiu 2019 offline download<br /> -domino qiu qiu online 99(QQ) bluestacks<br /> -gaple offline domino qiu qiu 2019 apk<br /> -domino qiu qiu 2019 apk free<br /> -domino qiu qiu online 2019 gratis<br /> -domino qiu qiu 2019 offline android<br /> -domino qiu qiu online 99(QQ) app<br /> -gaple offline domino qiu qiu 2019 game<br /> -domino qiu qiu 2019 apk latest<br /> -domino qiu qiu online 2019 indonesia<br /> -domino qiu qiu 2019 offline ios<br /> -domino qiu qiu online 99(QQ) play<br /> -gaple offline domino qiu qiu 2019 card<br /> -domino qiu qiu 2019 apk update<br /> -domino qiu qiu online 2019 uang asli<br /> -domino qiu qiu 2019 offline pc<br /> -domino qiu qiu online 99(QQ) download<br /> -gaple offline domino qiu qiu 2019 install<br /> -domino qiu qiu 2019 apk hack<br /> -domino qiu qiu online 2019 tips and tricks<br /> -domino qiu qiu 2019 offline mod<br /> -domino qiu qiu online 99(QQ) review<br /> -gaple offline domino qiu qiu 2019 features<br /> -domino qiu qiu 2019 apk old version<br /> -domino qiu qiu online 2019 cheat engine<br /> -domino qiu qiu 2019 offline apk<br /> -domino qiu qiu online 99(QQ) guide<br /> -gaple offline domino qiu qiu 2019 strategy<br /> -domino qiu qiu 2019 apk new version<br /> -domino qiu qiu online 2019 tutorial<br /> -domino qiu quu 2019 offline free<br /> -domino quu quu online 99(QQ) support<br /> -gaple offline domino quu quu 2019 rating<br /> -domino quu quu 2019 apk mirror<br /> -domino quu quu online 2019 forum<br /> -domino quu quu 2019 offline hack</p> - <h2>How to Play Domino Qiu Qiu Online?</h2> - <p>Playing domino qiu qiu online is very convenient and fun. You can play anytime and anywhere, with your friends or with strangers from all over the world. You can also enjoy various features and benefits that online gaming offers, such as bonuses, tournaments, chat rooms, and more. Here are some steps and tips on how to play domino qiu qiu online using the 2019 apk version.</p> - <h3>The advantages and features of playing online</h3> - <p>Playing domino qiu qiu online has many advantages and features that make it more enjoyable and rewarding than playing offline. Some of them are:</p> - <ul> -<li>You can play for free or for real money, depending on your preference and budget.</li> -<li>You can choose from different levels of difficulty and stakes, from beginner to expert, from low to high.</li> -<li>You can join various tables and games, from regular to VIP, from classic to modern.</li> -<li>You can participate in various events and promotions, such as daily bonuses, weekly tournaments, monthly jackpots, and seasonal offers.</li> -<li>You can interact with other players and make new friends, using chat functions, emojis, gifts, and more.</li> -<li>You can customize your profile and avatar, using different themes, colors, pictures, and more.</li> -<li>You can access your account and game history, using secure and easy login methods, such as email, phone number, or social media.</li> -<li>You can get support and assistance from customer service agents, using live chat, email, phone call, or FAQ section.</li> -</ul> - <h3>The steps and tips for downloading and installing the apk</h3> - <p>To play domino qiu qiu online using the 2019 apk version, you need to download and install the apk file on your device. Here are some steps and tips on how to do that:</p> - <ol> -<li>Find a reliable and reputable website that offers the domino qiu qiu 2019 apk file. You can search on Google or Bing for reviews and ratings of different websites.</li> -<li>Download the apk file from the website to your device. Make sure you have enough storage space and a stable internet connection.</li> -<li>Enable the installation of unknown sources on your device. Go to your device settings and security options and allow the installation of apps from sources other than Google Play Store or App Store.</li> -<li>Install the apk file on your device. Follow the instructions on the screen and wait for the installation to complete.</li> -<li>Launch the app on your device. Create an account or log in with your existing account. Enjoy playing domino qiu qiu online!</li> -</ol> - <p>Some tips to keep in mind when downloading and installing the apk are:</p> - <ul> -<li>Make sure you download the apk file from a trusted website that has a valid certificate and encryption.</li> -<li>Make sure you scan the apk file with an antivirus software before installing it on your device.</li> -<li>Make sure you update the app regularly to get the latest features and bug fixes.</li> -</ul> - <h3>The best apps and websites for playing online</h3> - <p>There are many apps and websites that offer domino qiu qiu online games. However, not all of them are equal in terms of quality, security, and service. Here are some of the best apps and websites that we recommend for playing domino qiu qiu online using the 2019 apk version:</p> - <table> -<tr><th>Name</th><th>Description</th><th>Rating</th></tr> -<tr><td>PokerV</td><td>PokerV is one of the most popular and trusted apps for playing domino qiu qiu online. It has a user-friendly interface, a large player base, and a variety of games and features. You can play for free or for real money, join tournaments and events, chat with other players, and get bonuses and rewards. You can download the app from their official website or from Google Play Store.</td><td>4.5/5</td></tr> -<tr><td>Domino QiuQiu 99</td><td>Domino QiuQiu 99 is another great app for playing domino qiu qiu online. It has a simple and elegant design, a fast and smooth gameplay, and a lot of fun and excitement. You can play with your friends or with millions of players online, enjoy various modes and themes, and win prizes and jackpots. You can download the app from Google Play Store or App Store.</td><td>4.4/5</td></tr> -<tr><td>Domino Gaple Online</td><td>Domino Gaple Online is a website that offers domino qiu qiu online games. It has a professional and secure platform, a high-quality graphics and sound, and a fair and random system. You can play with real money or virtual coins, join different rooms and tables, and get support and assistance from customer service. You can access the website from any browser or device.</td><td>4.3/5</td></tr> -</table> - <p>These are some of the best apps and websites that we have found for playing domino qiu qiu online using the 2019 apk version. However, you can also explore other options and find the one that suits your preferences and needs.</p> - <h2>How to Win Domino Qiu Qiu Online?</h2> - <p>Playing domino qiu qiu online is not only fun but also rewarding. You can win money, prizes, and fame by playing well and beating your opponents. However, winning domino qiu qiu online is not easy. You need to have skills, strategies, and luck to succeed. Here are some tips and tricks on how to win domino qiu qiu online:</p> - <h3>The strategies and tricks for improving your skills</h3> - <p>To improve your skills in domino qiu qiu online, you need to practice a lot and learn from your mistakes. You also need to study the game rules, the hand rankings, the odds, and the probabilities. Here are some strategies and tricks that can help you improve your skills:</p> - <ul> -<li>Know when to bet, raise, call, or fold. You need to be able to assess the strength of your hand and the situation of the game. You also need to be able to bluff or deceive your opponents.</li> -<li>Know when to play aggressively or conservatively. You need to be able to adapt your style of play depending on your position, your stack size, your opponents' behavior, and the stage of the game.</li> -<li>Know how to calculate the pot odds and the expected value. You need to be able to compare the amount of money you have to risk with the amount of money you can win. You also need to be able to estimate the chances of winning or losing with your hand.</li> -<li>Know how to manage your bankroll and emotions. You need to be able to set a budget and stick to it. You also need to be able to control your impulses and emotions.</li> -</ul> - <h3>The common mistakes and pitfalls to avoid</h3> - <p>To avoid losing in domino qiu qiu online, you need to avoid making common mistakes and pitfalls that can cost you money and reputation. Here are some of them:</p> - <ul> -<li>Playing too many hands or too few hands. You need to be selective with your starting hands and avoid playing weak or marginal hands that can get you into trouble.</li> -<li>Playing out of position or against too many opponents. You need to be aware of your position at the table and avoid playing in early or middle position unless you have a strong hand. You also need to avoid playing against too many opponents who can outdraw or outplay you.</li> -<li>Chasing losses or tilting. You need to avoid chasing losses by increasing your bets or playing more hands when you are losing. You also need to avoid tilting by letting your emotions affect your decisions when you are frustrated or angry.</li> -<li>Falling for traps or being predictable. You need to avoid falling for traps by calling bets or raises when you have a weak hand or when you are unsure of your opponent's hand. You also need to avoid being predictable by changing your style of play occasionally and mixing up your moves.</li> -</ul> - <h3>The resources and tools for learning more</h3> - <p>To learn more about domino qiu iu qiu online using the 2019 apk version?</li> -<p>You can get free coins or bonuses in domino qiu qiu online using the 2019 apk version by doing various activities, such as:</p> - <ul> -<li>Signing up or logging in daily.</li> -<li>Inviting or referring your friends to join the app or website.</li> -<li>Completing tasks or missions in the app or website.</li> -<li>Joining events or promotions in the app or website.</li> -<li>Watching ads or videos in the app or website.</li> -</ul> - <li>How can I contact customer service or support in domino qiu qiu online using the 2019 apk version?</li> -<p>You can contact customer service or support in domino qiu qiu online using the 2019 apk version by using various methods, such as:</p> - <ul> -<li>Live chat. You can chat with a customer service agent in real time in the app or website.</li> -<li>Email. You can send an email to the customer service address provided in the app or website.</li> -<li>Phone call. You can call the customer service number provided in the app or website.</li> -<li>FAQ section. You can read the frequently asked questions and answers provided in the app or website.</li> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Goat Simulator 3 Free for Android and Enjoy the Crazy Fun.md b/spaces/fatiXbelha/sd/Download Goat Simulator 3 Free for Android and Enjoy the Crazy Fun.md deleted file mode 100644 index 46b21e3d2fe40dbdab202da0e3baa0d86d595554..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Goat Simulator 3 Free for Android and Enjoy the Crazy Fun.md +++ /dev/null @@ -1,94 +0,0 @@ - -<h1>Goat Simulator 3 Free Download Android: How to Play the Latest Goat Game on Your Phone</h1> -<p>Do you love goats? Do you love chaos? Do you love games that are completely stupid but also hilarious? If you answered yes to any of these questions, then you will love <strong>Goat Simulator 3</strong>, the latest installment in the popular goat simulation series. In this article, we will show you how to download and play Goat Simulator 3 for free on your Android phone, as well as some tips and tricks to make the most out of your goat experience.</p> - <h2>Introduction: What is Goat Simulator 3 and why you should play it</h2> -<p>Goat Simulator 3 is a third-person sandbox adventure game that lets you become a goat and do whatever you want in a huge open world. You can headbutt, lick, jump, fly, explode, drive, grind, and more as you explore the island of San Angora, which is full of secrets, quests, collectibles, easter eggs, lies, betrayal, heartbreak, and goats. Lots of goats.</p> -<h2>goat simulator 3 free download android</h2><br /><p><b><b>Download File</b> &#10038;&#10038;&#10038; <a href="https://urllie.com/2uNzao">https://urllie.com/2uNzao</a></b></p><br /><br /> -<p>Goat Simulator 3 is not a realistic or educational game. It is a game that is meant to be fun and funny. It is a game that does not take itself seriously at all. It is a game that will make you laugh out loud as you witness the absurdity of goat physics, goat status effects, goat mini-games, goat musicals, goat Keanu Reeves (just kidding), and goat goldfish (not kidding).</p> -<p>If you are looking for a game that will challenge your mind, test your skills, or teach you something new, then Goat Simulator 3 is not for you. But if you are looking for a game that will entertain you, amuse you, surprise you, and make you feel like a goat, then Goat Simulator 3 is definitely for you.</p> - <h2>How to download Goat Simulator 3 for free on Android</h2> -<p>Goat Simulator 3 is not officially available on the Google Play Store. However, you can still download it for free from other sources as an APK file. An APK file is an application package file that contains all the data and code needed to install and run an app on an Android device. Here are the steps to download Goat Simulator 3 APK for free on Android:</p> - <ol> -<li>Go to a reliable and trustworthy website that offers Goat Simulator 3 APK for free. For example, you can use this link: <a href="">Goat Simulator 3 APK Download</a> .</li> -<li>Click on the download button and wait for the APK file to be downloaded to your device.</li> -<li>Before installing the APK file, make sure you have enough storage space on your device and that your battery is sufficiently charged.</li> -<li>Also, make sure you have taken the necessary precautions before installing any APK file from unknown sources. This includes scanning the file for viruses, malware, or spyware, backing up your data, and reading the reviews and ratings of other users who have downloaded the same file.</li> -<li>Disclaimer: Downloading and installing APK files from unknown sources may violate the terms of service of the Google Play Store and may expose your device to security risks. We are not responsible for any damage or harm that may occur to your device or data as a result of downloading and installing Goat Simulator 3 APK. Download and install at your own risk.</li> -</ol> - <h2>How to install and run Goat Simulator 3 on your Android device</h2> -<p>After downloading Goat Simulator 3 APK, you need to install it on your Android device. Here are the steps to install and run Goat Simulator 3 on your Android device:</p> - <ol> -<li>Go to your device settings and enable unknown sources. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li> -<li>Locate the Goat Simulator 3 APK file on your device using a file manager app or your browser's download history.</li> -<li>Tap on the file and follow the instructions to install it on your device. You may need to grant some permissions to the app during the installation process.</li> -<li>Once the installation is complete, you can launch Goat Simulator 3 from your app drawer or home screen.</li> -<li>The first time you run Goat Simulator 3, you may need to grant some additional permissions to the app, such as access to your storage, microphone, camera, location, etc. These permissions are necessary for the app to function properly and to provide you with the best goat experience possible.</li> -</ol> - <h2>Tips and tricks for playing Goat Simulator 3 on Android</h2> -<p>Now that you have installed and launched Goat Simulator 3 on your Android device, you are ready to play the game and have some fun. Here are some tips and tricks for playing Goat Simulator 3 on Android:</p> -<p>goat simulator 3 apk latest version<br /> -goat simulator 3 mobile game update<br /> -goat simulator 3 android app free<br /> -goat simulator 3 download for android<br /> -goat simulator 3 mod apk unlimited money<br /> -goat simulator 3 gameplay and features<br /> -goat simulator 3 cheats and hacks<br /> -goat simulator 3 online multiplayer mode<br /> -goat simulator 3 review and rating<br /> -goat simulator 3 best tips and tricks<br /> -goat simulator 3 new maps and missions<br /> -goat simulator 3 fun and crazy physics<br /> -goat simulator 3 how to install on android<br /> -goat simulator 3 system requirements and compatibility<br /> -goat simulator 3 official trailer and screenshots<br /> -goat simulator 3 unlock all goats and skins<br /> -goat simulator 3 guide and walkthrough<br /> -goat simulator 3 alternatives and similar games<br /> -goat simulator 3 patch notes and bug fixes<br /> -goat simulator 3 support and feedback<br /> -goat simulator 3 release date and price<br /> -goat simulator 3 developer and publisher<br /> -goat simulator 3 awards and achievements<br /> -goat simulator 3 fan art and memes<br /> -goat simulator 3 news and updates<br /> -goat simulator 3 forum and community<br /> -goat simulator 3 wiki and faq<br /> -goat simulator 3 comparison with previous versions<br /> -goat simulator 3 secrets and easter eggs<br /> -goat simulator 3 soundtracks and voice actors<br /> -goat simulator 3 custom mods and levels<br /> -goat simulator 3 controller support and settings<br /> -goat simulator 3 vr mode and compatibility<br /> -goat simulator 3 cross-platform play and cloud save<br /> -goat simulator 3 merchandise and accessories<br /> -goat simulator 3 theme song and lyrics<br /> -goat simulator 3 trivia and facts<br /> -goat simulator 3 jokes and puns<br /> -goat simulator 3 challenges and achievements<br /> -goat simulator 3 history and development</p> - <ul> -<li>To customize your goat, go to the main menu and tap on the goat icon. You can choose from different skins, outfits, and accessories for your goat. You can also unlock more options by completing quests, finding collectibles, or buying them with in-game currency.</li> -<li>To explore the open world of San Angora, use the virtual joystick on the left side of the screen to move your goat and swipe on the right side of the screen to look around. You can also use buttons on the right side of the screen to perform actions such as headbutt, lick, jump, fly, explode, etc.</li> -<li>To cause mayhem and mischief with various physics, status effects, and elements, interact with different objects, animals, people, vehicles, buildings, etc. in the world. You can also use items such as jetpacks, fireworks, bombs, magnets, etc. to create more chaos. Be creative and experiment with different combinations of actions and items.</li> -<li>To play with your friends in local or online co-op mode or compete in mini-games, go to the main menu and tap on the multiplayer icon. You can join or create a room with up to four players and choose from different modes such as free roam, capture the flag, king of the hill, etc. You can also chat with other players using voice or text messages.</li> -<li>To unlock achievements and rewards, go to the main menu and tap on the trophy icon. You can see a list of achievements that you can complete by doing various things in the game such as destroying a certain number of objects, reaching a certain height or speed, finding hidden secrets, etc. You can also earn rewards such as coins or gems by completing achievements.</li> -</ul> - <h2>Conclusion: The benefits of playing Goat Simulator 3 on Android</h2> -<p>Goat Simulator 3 is a game that will make you laugh out loud as you become a goat and do whatever you want in a huge open world. It is a game that will entertain you, amuse you, surprise you, and make you feel like a goat. It is a game that will not disappoint you if you are looking for a silly and ridiculous game that does not care about realism or logic. It is a game that will give you hours of fun and laughter with your friends or by yourself.</p> - <p>So what are you waiting for? Download Goat Simulator 3 for free on your Android phone today and enjoy the ultimate goat simulation experience. You will not regret it. Trust us, we are goats.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Goat Simulator 3 and their answers:</p> - <ol> -<li><strong>Is Goat Simulator 3 safe to download and play on Android?</strong></li> -<p>Goat Simulator 3 is safe to download and play on Android as long as you download it from a reliable and trustworthy source and follow the precautions mentioned in this article. However, we cannot guarantee that Goat Simulator 3 will not cause any glitches, bugs, crashes, or errors on your device or data. Play at your own risk.</p> -<li><strong>Is Goat Simulator 3 compatible with all Android devices?</strong></li> -<p>Goat Simulator 3 is compatible with most Android devices that have Android 4.4 or higher and at least 1 GB of RAM. However, some devices may not be able to run Goat Simulator 3 smoothly or at all due to hardware limitations or software issues. If you encounter any problems with Goat Simulator 3 on your device, please contact the developer for support.</p> -<li><strong>How can I get more coins and gems in Goat Simulator 3?</strong></li> -<p>You can get more coins and gems in Goat Simulator 3 by completing achievements, finding collectibles, playing mini-games, watching ads, or buying them with real money. You can use coins and gems to buy more items, skins, outfits, and accessories for your goat.</p> -<li><strong>How can I update Goat Simulator 3 on my Android device?</strong></li> -<p>You can update Goat Simulator 3 on your Android device by downloading and installing the latest version of the APK file from the same source that you downloaded it from. You may need to uninstall the previous version of Goat Simulator 3 before installing the new one. Make sure you backup your data before updating.</p> -<li><strong>How can I uninstall Goat Simulator 3 from my Android device?</strong></li> -<p>You can uninstall Goat Simulator 3 from your Android device by going to your device settings and tapping on Apps or Applications. Find Goat Simulator 3 in the list of apps and tap on it. Then tap on Uninstall and confirm your choice. You may also need to delete the APK file and any other files related to Goat Simulator 3 from your device.</p> -</ol></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download UMT Support Access 2.0 for Free - All Features and Functions Explained.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download UMT Support Access 2.0 for Free - All Features and Functions Explained.md deleted file mode 100644 index 8ad53158e10a7ac177ec8814cae8aeeb2001ba0f..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download UMT Support Access 2.0 for Free - All Features and Functions Explained.md +++ /dev/null @@ -1,151 +0,0 @@ - -<h1>What is UMT Support Access 2.0?</h1> -<p>If you are looking for a powerful and versatile tool to flash, unlock, repair, and remove FRP from your Android devices, then you might want to check out UMT Support Access 2.0. UMT stands for Ultimate Multi Tool, and it is a dongle-based software that allows you to perform various tasks on your devices with ease.</p> -<h2>download umt support access 2.0</h2><br /><p><b><b>Download Zip</b> &#10040; <a href="https://gohhs.com/2uPpCX">https://gohhs.com/2uPpCX</a></b></p><br /><br /> -<p>UMT Support Access 2.0 is the latest version of the tool that has been updated with new features and improvements. It supports a wide range of devices from different brands and models, such as Samsung, Huawei, Xiaomi, Oppo, Vivo, LG, Motorola, Nokia, and more. It also supports various chipsets, such as MTK, Qualcomm, SPD, Allwinner, Hisilicon, etc.</p> -<p>Some of the features of UMT Support Access 2.0 are:</p> -<ul> -<li>Flashing: You can flash stock or custom firmware on your devices using UMT Support Access 2.0.</li> -<li>Unlocking: You can unlock the SIM lock or network lock on your devices using UMT Support Access 2.0.</li> -<li>Repairing: You can repair the IMEI, Bluetooth, Wi-Fi, or baseband issues on your devices using UMT Support Access 2.0.</li> -<li>FRP Removal: You can remove the Google account or factory reset protection (FRP) on your devices using UMT Support Access 2.0.</li> -</ul> - <h2>Why do you need UMT Support Access 2.0?</h2> -<p>There are many reasons why you might need UMT Support Access 2.0 for your devices. Here are some of them:</p> -<ul> -<li>You want to update or downgrade your device's firmware to fix bugs or improve performance.</li> -<li>You want to switch to a different carrier or network provider and need to unlock your device.</li> -<li>You want to fix some software issues or errors on your device that affect its functionality.</li> -<li>You want to bypass the FRP lock on your device after a hard reset or factory reset.</li> -</ul> -<p>UMT Support Access 2.0 can help you with all these scenarios and more. It is a reliable and efficient tool that can save you time and money by doing the job yourself instead of taking your device to a service center or paying for an online service.</p> - <h3>How to download UMT Support Access 2.0?</h3> -<p>To download UMT Support Access 2.0, you need to have a UMT dongle first. A UMT dongle is a hardware device that connects to your PC via USB and allows you to use the UMT Support Access 2.0 software. You can buy a UMT dongle from various online stores or local shops, depending on your location and availability.</p> -<p>How to download umt support access 2.0 latest version<br /> -Umt support access 2.0 official updated version 2023 download link<br /> -Umt support access 2.0 features and compatibility<br /> -Download umt support access 2.0 for windows 10<br /> -Umt support access 2.0 free download for flashing and frp removal<br /> -Umt support access 2.0 rar file download<br /> -Umt support access 2.0 setup file download<br /> -Umt support access 2.0 installation guide<br /> -Umt support access 2.0 download for ultimate multi tool box/dongle<br /> -Umt support access 2.0 download for repair bt and imei<br /> -Umt support access 2.0 download for remove sim lock and google account<br /> -Umt support access 2.0 download for mtk, qc, sprd, allwinner, hisilicon support<br /> -Umt support access 2.0 download for xiaomi server and wipe phones<br /> -Umt support access 2.0 latest update download<br /> -Umt support access 2.0 official website download<br /> -Umt support access 2.0 crack version download<br /> -Umt support access 2.0 license key download<br /> -Umt support access 2.0 activation code download<br /> -Umt support access 2.0 offline installer download<br /> -Umt support access 2.0 online installer download<br /> -Umt support access 2.0 direct download link<br /> -Umt support access 2.0 torrent download link<br /> -Umt support access 2.0 mirror download link<br /> -Umt support access 2.0 alternative download link<br /> -Umt support access 2.0 safe and secure download link<br /> -Umt support access 2.0 fast and easy download link<br /> -Umt support access 2.0 full version download link<br /> -Umt support access 2.0 trial version download link<br /> -Umt support access 2.0 portable version download link<br /> -Umt support access 2.0 zip file download link<br /> -Download umt support access v2.0 latest updated version<br /> -Download umt v2_support_access.rar file latest version<br /> -Download umt v2_support_access.exe file latest version<br /> -Download umt v2_support_access.zip file latest version<br /> -Download umt v2_support_access setup file latest version<br /> -Download umt v2_support_access crack file latest version<br /> -Download umt v2_support_access license file latest version<br /> -Download umt v2_support_access activation file latest version<br /> -Download umt v2_support_access offline file latest version<br /> -Download umt v2_support_access online file latest version<br /> -Download umt v2_support_access direct file latest version<br /> -Download umt v2_support_access torrent file latest version<br /> -Download umt v2_support_access mirror file latest version<br /> -Download umt v2_support_access alternative file latest version<br /> -Download umt v2_support_access safe file latest version<br /> -Download umt v2_support_access secure file latest version<br /> -Download umt v2_support_access fast file latest version<br /> -Download umt v2_support_access easy file latest version<br /> -Download umt v2_support_access full file latest version</p> -<p>Once you have a UMT dongle, you can download the UMT Support Access 2.0 software from the official website of UMT or from our website. The official website of UMT is <a href="">https://umtpro.com/</a> and our website is <a href="">https://umtsupportaccess.com/</a>. Both websites offer the latest version of the software, which is currently v2.0.9.1.</p> -<p>To download the software from the official website, you need to register an account first and then log in. You can find the download link under the "Downloads" section of the website. To download the software from our website, you don't need to register or log in. You can simply click on the download button on the homepage and get the software in a zip file.</p> - <h4>How to install UMT Support Access 2.0?</h4> -<p>To install UMT Support Access 2.0 on your Windows PC, you need to meet some requirements first. These are:</p> -<ul> -<li>A UMT dongle connected to your PC via USB.</li> -<li>A Windows PC running Windows XP, Vista, 7, 8, 8.1, or 10 (32-bit or 64-bit).</li> -<li>A minimum of 512 MB RAM and 200 MB free disk space.</li> -<li>A stable internet connection for downloading and updating the software.</li> -</ul> -<p>Once you have met these requirements, you can follow these instructions to install the software:</p> -<ol> -<li>Extract the zip file that contains the software using WinRAR or any other extraction tool.</li> -<li>Open the extracted folder and double-click on the "UMT Support Access.exe" file to run the installer.</li> -<li>Follow the on-screen instructions and accept the terms and conditions to complete the installation process.</li> -<li>Launch the software from your desktop shortcut or start menu.</li> -</ol> - <h4>How to use UMT Support Access 2.0?</h4> -<p>To use UMT Support Access 2.0 for flashing, unlocking, repairing, or removing FRP from your devices, you need to follow these tips and tricks:</p> -<ul> -<li>Make sure your device is compatible with the tool and has a sufficient battery level (at least 50%).</li> -<li>Make sure you have a backup of your device's data before performing any operation with the tool.</li> -<li>Make sure you have the correct drivers installed for your device and your PC recognizes it.</li> -<li>Select the appropriate module and option for your device and task from the tool's interface.</li> -<li>Follow the instructions on the screen and wait for the process to finish.</li> -</ul> -<p>If you encounter any problem or error while using the tool, you can contact the UMT support team via email or phone. You can also check their forum or YouTube channel for more help and tutorials.</p> - <h2>What are the supported devices and operating systems for UMT Support Access 2.0?</h2> -<p>UMT Support Access 2.0 supports a large number of devices and operating systems. Here are some of them:</p> - <h3>Supported devices</h3> -<p>The following table shows some of the supported devices by brand and model:</p> - <table border="1"> -<tr><th>Brand</th><th>Model</th></tr> -<tr><td>Samsung</td><td>A10, A20, A30, A50, A70, S10, S20, Note 10, Note 20, etc.</td></tr> -<tr><td>Huawei</td><td>P20, P30, P40, Mate 10, Mate 20, Mate 30, Nova 3, Nova 4, Nova 5, etc.</td></tr> -<tr><td>Xiaomi</td><td>Mi 8, Mi 9, Mi 10, Redmi Note 5, Redmi Note 6, Redmi Note 7, Redmi Note 8, Redmi Note 9, etc.</td></tr> -<tr><td>Oppo</td><td>F9, F11, F15, Reno 2, Reno 3, Reno 4, Reno 5, A3s, A5s, A9s, etc.</td></tr> -<tr><td>Vivo</td><td>V9, V11, V15, V17, V19, Y11, Y12, Y15, Y17, Y19, etc.</td></tr> -<tr><td>LG</td><td>G6, G7, G8, V20, V30, V40, V50, V60, K10, K20, K30, K40, K50, etc.</td></tr> -<tr><td>Motorola</td><td>Moto G5, Moto G6, Moto G7, Moto G8, Moto E4, Moto E5, Moto E6, Moto Z2, Moto Z3, Moto Z4, etc.</td></tr> -<tr><td>Nokia</td><td>Nokia 2.1, Nokia 2.2, Nokia 2.3, Nokia 3.1, Nokia 3.2, Nokia 4.2, Nokia 5.1, Nokia 5.3, Nokia 6.1, Nokia 6.2, etc.</td></tr> -</table> - <h3>Supported operating systems</h3> -<p>The following list shows some of the supported Windows versions for UMT Support Access 2.0:</p> -<ul> -<li>Windows XP (32-bit or 64-bit)</li> -<li>Windows Vista (32-bit or 64-bit)</li> -<li>Windows 7 (32-bit or 64-bit)</li> -<li>Windows 8 (32-bit or 64-bit)</li> -<li>Windows 8.1 (32-bit or 64-bit)</li> -<li>Windows 10 (32-bit or 64-bit)</li> -</ul> - <h2>What are the alternatives to UMT Support Access 2.0?</h2> -<p>If you don't have a UMT dongle or you want to try some other tools that can perform similar functions as UMT Support Access 2.0, you can check out these alternatives:</p> - <h3>Miracle Box</h3> -<p>Miracle Box is another dongle-based software that can flash, unlock, repair, and remove FRP from various Android devices. It supports MTK, SPD, Qualcomm, RDA, Coolpad, MStar, and CDMA chipsets. It also has a user-friendly interface and a large database of firmware files. You can download Miracle Box from <a href="">https://miraclebox.com/</a>.</p> - <h3>NCK Dongle</h3> -<p>NCK Dongle is another dongle-based software that can flash, unlock, repair, and remove FRP from various Android devices. It supports MTK, SPD, Qualcomm, Samsung, Huawei, HTC, LG, Sony, Motorola, Alcatel, and more brands and models. It also has a simple and easy-to-use interface and a regular update system. You can download NCK Dongle from <a href="">https://nckdongle.com/</a>.</p> - <h3>MRT Dongle</h3> -<p>MRT Dongle is another dongle-based software that can flash, unlock, repair, and remove FRP from various Android devices. It supports MTK, Qualcomm, Huawei, Oppo, Vivo, Meizu, Xiaomi, and more brands and models. It also has a fast and powerful performance and a unique method of unlocking devices. You can download MRT Dongle from <a href="">https://mrt-dongle.com/</a>.</p> - <h2>Conclusion</h2> -<p>UMT Support Access 2.0 is a great tool for flashing, unlocking, repairing, and removing FRP from your Android devices. It supports a wide range of devices and operating systems and has many features and functions. It is easy to download, install, and use with a UMT dongle. However, if you don't have a UMT dongle or you want to try some other tools, you can also check out the alternatives we mentioned above.</p> -<p>We hope this article was helpful for you and answered your questions about UMT Support Access 2.0. If you have any feedback or suggestions, please let us know in the comments below. Thank you for reading!</p> - <h3>FAQs</h3> -<p>Here are some frequently asked questions related to UMT Support Access 2.0:</p> -<ol> -<li>Q: How much does a UMT dongle cost?</li> -<li>A: The price of a UMT dongle may vary depending on the seller and the location. However, the average price is around $50-$60.</li> -<li>Q: How can I update UMT Support Access 2.0?</li> -<li>A: You can update UMT Support Access 2.0 by clicking on the "Update" button on the software's interface. You can also check for updates manually by visiting the official website or our website.</li> -<li>Q: What are the risks of using UMT Support Access 2.0?</li> -<li>A: Using UMT Support Access 2.0 may void your device's warranty or cause some damage to your device if not done properly. Therefore, you should always backup your data before using the tool and follow the instructions carefully.</li> -<li>Q: Is UMT Support Access 2.0 safe to use?</li> -<li>A: UMT Support Access 2.0 is safe to use as long as you download it from the official website or our website. You should avoid downloading it from any other sources as they may contain viruses or malware.</li> -<li>Q: Can I use UMT Support Access 2.0 without a UMT dongle?</li> -<li>A: No, you cannot use UMT Support Access 2.0 without a UMT dongle as it is a dongle-based software that requires a hardware device to work.</li> -</ol></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fffiloni/Image-to-MusicGen/audiocraft/utils/autocast.py b/spaces/fffiloni/Image-to-MusicGen/audiocraft/utils/autocast.py deleted file mode 100644 index ed644843bb37cf8a92a20fbd51d6cebaa43b9a08..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Image-to-MusicGen/audiocraft/utils/autocast.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - - -class TorchAutocast: - """TorchAutocast utility class. - Allows you to enable and disable autocast. This is specially useful - when dealing with different architectures and clusters with different - levels of support. - - Args: - enabled (bool): Whether to enable torch.autocast or not. - args: Additional args for torch.autocast. - kwargs: Additional kwargs for torch.autocast - """ - def __init__(self, enabled: bool, *args, **kwargs): - self.autocast = torch.autocast(*args, **kwargs) if enabled else None - - def __enter__(self): - if self.autocast is None: - return - try: - self.autocast.__enter__() - except RuntimeError: - device = self.autocast.device - dtype = self.autocast.fast_dtype - raise RuntimeError( - f"There was an error autocasting with dtype={dtype} device={device}\n" - "If you are on the FAIR Cluster, you might need to use autocast_dtype=float16" - ) - - def __exit__(self, *args, **kwargs): - if self.autocast is None: - return - self.autocast.__exit__(*args, **kwargs) diff --git a/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/utils/export.py b/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/utils/export.py deleted file mode 100644 index b513b52267f7bf5aae09282c15b0a2e20c8a8fee..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/SplitTrack2MusicGen/audiocraft/utils/export.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Utility to export a training checkpoint to a lightweight release checkpoint. -""" - -from pathlib import Path -import typing as tp - -from omegaconf import OmegaConf, DictConfig -import torch - - -def _clean_lm_cfg(cfg: DictConfig): - OmegaConf.set_struct(cfg, False) - # This used to be set automatically in the LM solver, need a more robust solution - # for the future. - cfg['transformer_lm']['card'] = 2048 - cfg['transformer_lm']['n_q'] = 4 - # Experimental params no longer supported. - bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', - 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] - for name in bad_params: - del cfg['transformer_lm'][name] - OmegaConf.set_struct(cfg, True) - return cfg - - -def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['ema']['state']['model'], - 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file - - -def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['fsdp_best_state']['model'], - 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])) - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file diff --git a/spaces/fkhuggingme/gpt-academic/docs/README_RS.md b/spaces/fkhuggingme/gpt-academic/docs/README_RS.md deleted file mode 100644 index f8d925a27a6e5a19304db6f6d266e3bb3163172f..0000000000000000000000000000000000000000 --- a/spaces/fkhuggingme/gpt-academic/docs/README_RS.md +++ /dev/null @@ -1,291 +0,0 @@ -> **Note** -> -> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным. -> - -# <img src="logo.png" width="40" > ChatGPT Academic Optimization - -**Если вам понравился этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные академические ярлыки или функциональные плагины, не стесняйтесь создавать запросы на изменение или пул-запросы. Мы также имеем [README на английском языке](docs/README_EN.md), переведенный этим же проектом. - -> **Примечание** -> -> 1. Пожалуйста, обратите внимание, что только функциonal plugins (buttons) с **красным цветом** могут читать файлы, некоторые из которых находятся в **выпадающем меню** плагинов. Кроме того, мы приветствуем и обрабатываем любые новые плагины с **наивысшим приоритетом**! -> -> 2. Функции каждого файла в этом проекте подробно описаны в собственном анализе [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) . При повторных итерациях вы также можете вызывать обновленный отчет функций проекта, щелкнув соответствующий функциональный плагин GPT. Часто задаваемые вопросы собраны в [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) . - -<div align="center"> - -Функция | Описание ---- | --- -Редактирование одним кликом | Поддержка редактирования одним кликом, поиск грамматических ошибок в академических статьях -Переключение языков "Английский-Китайский" одним кликом | Одним кликом переключайте языки "Английский-Китайский" -Разъяснение программного кода одним кликом | Вы можете правильно отобразить и объяснить программный код. -[Настраиваемые сочетания клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настраиваемых сочетаний клавиш -[Настройка сервера-прокси](https://www.bilibili.com/video/BV1rc411W7Dr) | Поддержка настройки сервера-прокси -Модульный дизайн | Поддержка настраиваемых функциональных плагинов высших порядков и функциональных плагинов, поддерживающих [горячее обновление](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Автоанализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Прочтение в один клик](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) кода программы проекта -[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Один клик для проанализирования дерева других проектов Python/C/C++/Java/Lua/... -Чтение статей| [Функциональный плагин] Одним кликом прочитайте весь латех (LaTex) текст статьи и сгенерируйте краткое описание -Перевод и редактирование всех статей из LaTex | [Функциональный плагин] Перевод или редактирование LaTex-статьи всего одним нажатием кнопки -Генерация комментариев в пакетном режиме | [Функциональный плагин] Одним кликом сгенерируйте комментарии к функциям в пакетном режиме -Генерация отчетов пакета CHAT | [Функциональный плагин] Автоматически создавайте сводные отчеты после выполнения -[Помощник по arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи arxiv, чтобы легко перевести резюме и загрузить PDF-файл -[Перевод полного текста статьи в формате PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлеките заголовок статьи, резюме и переведите весь текст статьи (многопоточно) -[Помощник интеграции Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] Дайте GPT выбрать для вас интересные статьи на любой странице поиска Google Scholar. -Отображение формул/изображений/таблиц | Одновременно отображается tex-форма и рендер-форма формул, поддержка формул, высокоскоростных кодов -Поддержка функциональных плагинов многопоточности | Поддержка многопоточной работы с плагинами, обрабатывайте огромные объемы текста или программы одним кликом -Запуск темной темы gradio[подробнее](https://github.com/binary-husky/chatgpt_academic/issues/173) | Добавьте / ?__dark-theme=true в конец URL браузера, чтобы переключиться на темную тему. -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), поддержка API2D | Находиться между GPT3.5, GPT4 и [清华ChatGLM](https://github.com/THUDM/ChatGLM-6B) должно быть очень приятно, не так ли? -Альтернатива huggingface без использования научной сети [Онлайн-эксперимент](https://huggingface.co/spaces/qingxu98/gpt-academic) | Войдите в систему, скопируйте пространство [этот пространственный URL](https://huggingface.co/spaces/qingxu98/gpt-academic) -…… | …… - - -</div> - -- Новый интерфейс (вы можете изменить настройку LAYOUT в config.py, чтобы переключаться между "горизонтальным расположением" и "вертикальным расположением") -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" > -</div> - - -Вы профессиональный переводчик научных статей. - -- Все кнопки генерируются динамически путем чтения functional.py и могут быть легко настроены под пользовательские потребности, освобождая буфер обмена. -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" > -</div> - -- Редактирование/корректирование -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" > -</div> - -- Если вывод содержит формулы, они отображаются одновременно как в формате tex, так и в рендеринговом формате для удобства копирования и чтения. -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" > -</div> - -- Лень смотреть код проекта? Просто покажите chatgpt. -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" > -</div> - -- Несколько моделей больших языковых моделей смешиваются (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/) -GPT4) -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" > -</div> - -Несколько моделей больших языковых моделей смешиваются в [бета-версии huggingface] (https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (huggingface-версия не поддерживает chatglm). - - ---- - -## Установка - Метод 1: Запуск (Windows, Linux или MacOS) - -1. Скачайте проект -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Настройка API_KEY и настройки прокси - -В файле `config.py` настройте зарубежный прокси и OpenAI API KEY, пояснения ниже -``` -1. Если вы находитесь в Китае, вам нужно настроить зарубежный прокси, чтобы использовать OpenAI API. Пожалуйста, внимательно прочитайте config.py для получения инструкций (1. Измените USE_PROXY на True; 2. Измените прокси в соответствии с инструкциями). -2. Настройка API KEY OpenAI. Вам необходимо зарегистрироваться на сайте OpenAI и получить API KEY. После получения API KEY настройте его в файле config.py. -3. Вопросы, связанные с сетевыми проблемами (тайм-аут сети, прокси не работает), можно найти здесь: https://github.com/binary-husky/chatgpt_academic/issues/1 -``` -(Примечание: при запуске программы будет проверяться наличие конфиденциального файла конфигурации с именем `config_private.py` и использоваться в нем конфигурация параметров, которая перезаписывает параметры с такими же именами в `config.py`. Поэтому, если вы понимаете логику чтения нашей конфигурации, мы настоятельно рекомендуем вам создать новый файл конфигурации с именем `config_private.py` рядом с `config.py` и переместить (скопировать) настройки из `config.py` в `config_private.py`. `config_private.py` не подвергается контролю git, что делает конфиденциальную информацию более безопасной.) - - -3. Установить зависимости -```sh -# (Выбор 1) Рекомендуется -python -m pip install -r requirements.txt - -# (Выбор 2) Если вы используете anaconda, то шаги будут аналогичны: -# (Шаг 2.1) conda create -n gptac_venv python=3.11 -# (Шаг 2.2) conda activate gptac_venv -# (Шаг 2.3) python -m pip install -r requirements.txt - -# Примечание: используйте официальный источник pip или источник pip.aliyun.com. Другие источники pip могут вызывать проблемы. временный метод замены источника: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -Если требуется поддержка TUNA ChatGLM, необходимо установить дополнительные зависимости (если вы неудобны с python, необходимо иметь хорошую конфигурацию компьютера): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Запустите -```sh -python main.py -``` - -5. Тестовые функции плагина -``` -- Тестирвоание анализа проекта Python - В основной области введите `./crazy_functions/test_project/python/dqn` , а затем нажмите "Анализировать весь проект Python" -- Тестирование самостоятельного чтения кода - Щелкните " [Демонстрационный режим многопоточности] Проанализируйте сам проект (расшифровка источника кода)" -- Тестирование функций шаблонного плагина (вы можете использовать эту функцию как шаблон для более сложных функций, требующих ответа от gpt в связи с тем, что произошло сегодня в истории) - Щелкните " [Функции шаблонного плагина] День в истории" -- На нижней панели дополнительные функции для выбора -``` - -## Установка - Метод 2: Использование docker (Linux) - - -1. Только ChatGPT (рекомендуется для большинства пользователей): -``` sh -# Скачать проект -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# Настроить прокси за границей и OpenAI API KEY -Отредактируйте файл config.py в любом текстовом редакторе. -# Установка -docker build -t gpt-academic . -# Запустить -docker run --rm -it --net=host gpt-academic - -# Проверка функциональности плагина -## Проверка шаблонной функции плагина (требуется, чтобы gpt ответил, что произошло "в истории на этот день"), вы можете использовать эту функцию в качестве шаблона для реализации более сложных функций. -Нажмите "[Шаблонный демонстрационный плагин] История на этот день". -## Тест абстрактного резюме для проекта на Latex -В области ввода введите ./crazy_functions/test_project/latex/attention, а затем нажмите "Чтение реферата о тезисах статьи на LaTeX". -## Тестовый анализ проекта на Python -Введите в область ввода ./crazy_functions/test_project/python/dqn, затем нажмите "Проанализировать весь проект на Python". - -Выбирайте больше функциональных плагинов в нижнем выпадающем меню. -``` - -2. ChatGPT + ChatGLM (требуется глубокое знание Docker и достаточно мощное компьютерное оборудование): - -``` sh -# Изменение Dockerfile -cd docs && nano Dockerfile+ChatGLM -# Как построить | Как запустить (Dockerfile+ChatGLM в пути docs, сначала перейдите в папку с помощью cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# Как запустить | Как запустить (2) я хочу войти в контейнер и сделать какие-то настройки до запуска: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - - -## Установка-Метод 3: Другие способы развертывания - -1. Развертывание на удаленном облачном сервере -Пожалуйста, посетите [Deploy Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Использование WSL2 (Windows Subsystem for Linux) -Пожалуйста, посетите [Deploy Wiki-2] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Установка-Настройки прокси -### Метод 1: Обычный способ -[Конфигурация прокси] (https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Метод 2: Руководство новичка -[Руководство новичка] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## Настройка новой удобной кнопки (настройка быстрой клавиши для научной работы) -Откройте `core_functional.py` любым текстовым редактором, добавьте элементы, как показано ниже, затем перезапустите программу. (Если кнопка уже успешно добавлена и видна, то префикс и суффикс поддерживают горячее изменение, чтобы они оказались в действии, не нужно перезапускать программу.) -например -``` -"Супер анг-рус": { - # Префикс, будет добавлен перед вашим вводом. Например, используется для описания ваших потребностей, таких как перевод, кодинг, редактирование и т. д. - "Prefix": "Пожалуйста, переведите этот фрагмент на русский язык, а затем создайте пошаговую таблицу в markdown, чтобы объяснить все специализированные термины, которые встречаются в тексте:\n\n", - - # Суффикс, будет добавлен после вашего ввода. Например, совместно с префиксом можно обрамить ваш ввод в кавычки. - "Suffix": "", -}, -``` -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" > -</div> - ---- - - -## Демонстрация некоторых возможностей - -### Отображение изображений: - -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" > -</div> - - -### Если программа может понимать и разбирать сама себя: - -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" > -</div> - -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" > -</div> - - -### Анализ других проектов на Python/Cpp: -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" > -</div> - -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" > -</div> - -### Генерация понимания и абстрактов с помощью Латех статей в один клик -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" > -</div> - -### Автоматическое создание отчетов -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" > -<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" > -<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" > -</div> - -### Модульный дизайн функций -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" > -<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" > -</div> - - -### Трансляция исходного кода на английский язык - -<div align="center"> -<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" > -</div> - -## Todo и планирование версий: -- version 3.2+ (todo): функция плагины поддерживают более многочисленные интерфейсы параметров -- version 3.1: поддержка одновременного опроса нескольких моделей gpt! Поддержка api2d, поддержка балансировки нагрузки множества apikey. -- version 3.0: поддержка chatglm и других маленьких llm -- version 2.6: реструктурировал структуру плагинов, повысил интерактивность, добавил больше плагинов -- version 2.5: само обновление, решение проблемы слишком длинного текста и переполнения токена при переводе всего проекта исходного кода -- version 2.4: (1) добавлена функция перевода всего PDF-документа; (2) добавлена функция изменения положения входной области; (3) добавлена опция вертикального макета; (4) оптимизация функций многопоточности плагина. -- version 2.3: улучшение многопоточной интерактивности -- version 2.2: функция плагинов поддерживает горячую перезагрузку -- version 2.1: блочная раскладка -- version 2.0: модульный дизайн функций плагина -- version 1.0: основные функции - -## Ссылки на изучение и обучение - -``` -В коде использовано много хороших дизайнерских решений из других отличных проектов, в том числе: - -# Project1: использование многих приемов из ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Project2: ChatGLM-6B в Тхуде: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/flowers-team/SocialAISchool/scripts/visualize.py b/spaces/flowers-team/SocialAISchool/scripts/visualize.py deleted file mode 100644 index f643eda716d4d1f24f38cc5517f7685b7569850c..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/scripts/visualize.py +++ /dev/null @@ -1,159 +0,0 @@ -import argparse -import json -import time -import numpy as np -import torch -from pathlib import Path - -from utils.babyai_utils.baby_agent import load_agent -from utils.env import make_env -from utils.other import seed -from utils.storage import get_model_dir -from utils.storage import get_status -from models import * -import subprocess - -# Parse arguments - -parser = argparse.ArgumentParser() -parser.add_argument("--model", required=True, - help="name of the trained model (REQUIRED)") -parser.add_argument("--seed", type=int, default=0, - help="random seed (default: 0)") -parser.add_argument("--max-steps", type=int, default=None, - help="max num of steps") -parser.add_argument("--shift", type=int, default=0, - help="number of times the environment is reset at the beginning (default: 0)") -parser.add_argument("--argmax", action="store_true", default=False, - help="select the action with highest probability (default: False)") -parser.add_argument("--pause", type=float, default=0.5, - help="pause duration between two consequent actions of the agent (default: 0.5)") -parser.add_argument("--env-name", type=str, default=None, required=True, - help="env name") -parser.add_argument("--gif", type=str, default=None, - help="store output as gif with the given filename", required=True) -parser.add_argument("--episodes", type=int, default=10, - help="number of episodes to visualize") - -args = parser.parse_args() - -# Set seed for all randomness sources - -seed(args.seed) - -save = args.gif -if save: - savename = args.gif - if savename == "model_id": - savename = args.model.replace('storage/', '') - savename = savename.replace('/','_') - savename += '_{}'.format(args.seed) - - - - -# Set device - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print(f"Device: {device}\n") - -# Load environment - -if str(args.model).startswith("./storage/"): - args.model = args.model.replace("./storage/", "") - -if str(args.model).startswith("storage/"): - args.model = args.model.replace("storage/", "") - -with open(Path("./storage") / args.model / "config.json") as f: - conf = json.load(f) - -if args.env_name is None: - # load env_args from status - env_args = {} - if not "env_args" in conf.keys(): - env_args = get_status(get_model_dir(args.model), None)['env_args'] - else: - env_args = conf["env_args"] - - env = make_env(args.env_name, args.seed, env_args=env_args) -else: - env_name = args.env_name - env = make_env(args.env_name, args.seed) - -for _ in range(args.shift): - env.reset() -print("Environment loaded\n") - -# Define agent -model_dir = get_model_dir(args.model) -num_frames = None -agent = load_agent(env, model_dir, args.argmax, num_frames) - -print("Agent loaded\n") - -# Run the agent - -if save: - from imageio import mimsave - old_frames = [] - frames = [] - -# Create a window to view the environment -env.render(mode='human') - -def plt_2_rgb(env): - data = np.frombuffer(env.window.fig.canvas.tostring_rgb(), dtype=np.uint8) - data = data.reshape(env.window.fig.canvas.get_width_height()[::-1] + (3,)) - return data - - -for episode in range(args.episodes): - print("episode:", episode) - obs = env.reset() - - env.render(mode='human') - if save: - frames.append(plt_2_rgb(env)) - - i = 0 - while True: - i += 1 - - action = agent.get_action(obs) - obs, reward, done, _ = env.step(action) - agent.analyze_feedback(reward, done) - env.render(mode='human') - - if save: - img = plt_2_rgb(env) - frames.append(img) - if done: - # quadruple last frame to pause between episodes - for i in range(3): - same_img = np.copy(img) - # toggle a pixel between frames to avoid cropping when going from gif to mp4 - same_img[0,0,2] = 0 if (i % 2) == 0 else 255 - frames.append(same_img) - - if done or env.window.closed: - break - - if args.max_steps is not None: - if i > args.max_steps: - break - - - if env.window.closed: - break - -if save: - # from IPython import embed; embed() - print(f"Saving to {savename} ", end="") - mimsave(savename + '.gif', frames, duration=args.pause) - # Reduce gif size - # bashCommand = "gifsicle -O3 --colors 32 -o {}.gif {}.gif".format(savename, savename) - # process = subprocess.run(bashCommand.split(), stdout=subprocess.PIPE) - - - print("Done.") diff --git a/spaces/frncscp/bullerengue/musika/22kHz/models.py b/spaces/frncscp/bullerengue/musika/22kHz/models.py deleted file mode 100644 index fc7d83b6d85a9e68a4478abb4d2fe46e24bb0558..0000000000000000000000000000000000000000 --- a/spaces/frncscp/bullerengue/musika/22kHz/models.py +++ /dev/null @@ -1,849 +0,0 @@ -import numpy as np -import tensorflow as tf -import tensorflow_addons as tfa -from tensorflow.keras import mixed_precision -from tensorflow.keras.layers import ( - Add, - BatchNormalization, - Concatenate, - Conv2D, - Conv2DTranspose, - Cropping1D, - Cropping2D, - Dense, - Dot, - Flatten, - GlobalAveragePooling2D, - Input, - Lambda, - LeakyReLU, - Multiply, - ReLU, - Reshape, - SeparableConv2D, - UpSampling2D, - ZeroPadding2D, -) -from tensorflow.keras.models import Model, Sequential -from tensorflow.keras.optimizers import Adam -from tensorflow.python.keras.utils.layer_utils import count_params - -from layers import ConvSN2D, DenseSN, PosEnc, AddNoise - - -class Models_functions: - def __init__(self, args): - - self.args = args - if self.args.mixed_precision: - self.mixed_precision = mixed_precision - self.policy = mixed_precision.Policy("mixed_float16") - mixed_precision.set_global_policy(self.policy) - self.init = tf.keras.initializers.he_uniform() - - def conv_util( - self, - inp, - filters, - kernel_size=(1, 3), - strides=(1, 1), - noise=False, - upsample=False, - padding="same", - bn=True, - ): - - x = inp - - if upsample: - x = tf.keras.layers.Conv2DTranspose( - filters, - kernel_size=kernel_size, - strides=strides, - activation="linear", - padding=padding, - kernel_initializer=self.init, - )(x) - else: - x = tf.keras.layers.Conv2D( - filters, - kernel_size=kernel_size, - strides=strides, - activation="linear", - padding=padding, - kernel_initializer=self.init, - )(x) - - if noise: - x = AddNoise(datatype=self.args.datatype)(x) - - if bn: - x = tf.keras.layers.BatchNormalization()(x) - - x = tf.keras.activations.swish(x) - - return x - - def adain(self, x, emb): - emb = tf.keras.layers.Conv2D( - x.shape[-1], - kernel_size=(1, 1), - strides=1, - activation="linear", - padding="same", - kernel_initializer=self.init, - use_bias=True, - )(emb) - x = x / (tf.math.reduce_std(x, -2, keepdims=True) + 1e-7) - return x * emb - - def se_layer(self, x, filters): - x = tf.reduce_mean(x, -2, keepdims=True) - x = tf.keras.layers.Conv2D( - filters, - kernel_size=(1, 1), - strides=(1, 1), - activation="linear", - padding="valid", - kernel_initializer=self.init, - use_bias=True, - )(x) - x = tf.keras.activations.swish(x) - return tf.keras.layers.Conv2D( - filters, - kernel_size=(1, 1), - strides=(1, 1), - activation="sigmoid", - padding="valid", - kernel_initializer=self.init, - use_bias=True, - )(x) - - def conv_util_gen( - self, - inp, - filters, - kernel_size=(1, 9), - strides=(1, 1), - noise=False, - upsample=False, - emb=None, - se1=None, - ): - - x = inp - - if upsample: - x = tf.keras.layers.Conv2DTranspose( - filters, - kernel_size=kernel_size, - strides=strides, - activation="linear", - padding="same", - kernel_initializer=self.init, - use_bias=True, - )(x) - else: - x = tf.keras.layers.Conv2D( - filters, - kernel_size=kernel_size, - strides=strides, - activation="linear", - padding="same", - kernel_initializer=self.init, - use_bias=True, - )(x) - - if noise: - x = AddNoise(datatype=self.args.datatype)(x) - - if emb is not None: - x = self.adain(x, emb) - else: - x = tf.keras.layers.BatchNormalization()(x) - - x1 = tf.keras.activations.swish(x) - - if se1 is not None: - x1 = x1 * se1 - - return x1 - - def res_block_disc(self, inp, filters, kernel_size=(1, 3), kernel_size_2=None, strides=(1, 1)): - - if kernel_size_2 is None: - kernel_size_2 = kernel_size - - x = tf.keras.layers.Conv2D( - inp.shape[-1], - kernel_size=kernel_size_2, - strides=1, - activation="linear", - padding="same", - kernel_initializer=self.init, - )(inp) - x = tf.keras.layers.LeakyReLU(0.2)(x) - x = tf.math.sqrt(tf.cast(0.5, self.args.datatype)) * x - x = tf.keras.layers.Conv2D( - filters, - kernel_size=kernel_size, - strides=strides, - activation="linear", - padding="same", - kernel_initializer=self.init, - )(x) - x = tf.keras.layers.LeakyReLU(0.2)(x) - x = tf.math.sqrt(tf.cast(0.5, self.args.datatype)) * x - - if strides != (1, 1): - inp = tf.keras.layers.AveragePooling2D(strides, padding="same")(inp) - - if inp.shape[-1] != filters: - inp = tf.keras.layers.Conv2D( - filters, - kernel_size=1, - strides=1, - activation="linear", - padding="same", - kernel_initializer=self.init, - use_bias=False, - )(inp) - - return x + inp - - def build_encoder2(self): - - dim = 128 - - inpf = Input((1, self.args.shape, dim)) - - inpfls = tf.split(inpf, 16, -2) - inpb = tf.concat(inpfls, 0) - - g0 = self.conv_util(inpb, 256, kernel_size=(1, 1), strides=(1, 1), padding="valid") - g1 = self.conv_util(g0, 256 + 256, kernel_size=(1, 3), strides=(1, 1), padding="valid") - g2 = self.conv_util(g1, 512 + 128, kernel_size=(1, 3), strides=(1, 1), padding="valid") - g3 = self.conv_util(g2, 512 + 128, kernel_size=(1, 1), strides=(1, 1), padding="valid") - g4 = self.conv_util(g3, 512 + 256, kernel_size=(1, 3), strides=(1, 1), padding="valid") - g5 = self.conv_util(g4, 512 + 256, kernel_size=(1, 2), strides=(1, 1), padding="valid") - - g = tf.keras.layers.Conv2D( - 64, - kernel_size=(1, 1), - strides=1, - padding="valid", - kernel_initializer=self.init, - name="cbottle", - activation="tanh", - )(g5) - - gls = tf.split(g, 16, 0) - g = tf.concat(gls, -2) - gls = tf.split(g, 2, -2) - g = tf.concat(gls, 0) - - gf = tf.cast(g, tf.float32) - return Model(inpf, gf, name="ENC2") - - def build_decoder2(self): - - dim = 128 - bottledim = 64 - - inpf = Input((1, self.args.shape // 16, bottledim)) - - g = inpf - - g = self.conv_util( - g, - 512 + 128 + 128, - kernel_size=(1, 4), - strides=(1, 1), - upsample=False, - noise=True, - ) - g = self.conv_util( - g, - 512 + 128 + 128, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - ) - g = self.conv_util(g, 512 + 128, kernel_size=(1, 4), strides=(1, 2), upsample=True, noise=True) - g = self.conv_util(g, 512, kernel_size=(1, 4), strides=(1, 1), upsample=False, noise=True) - g = self.conv_util(g, 256 + 128, kernel_size=(1, 4), strides=(1, 2), upsample=True, noise=True) - - gf = tf.keras.layers.Conv2D( - dim, - kernel_size=(1, 1), - strides=1, - padding="same", - activation="tanh", - kernel_initializer=self.init, - )(g) - - gfls = tf.split(gf, 2, 0) - gf = tf.concat(gfls, -2) - - gf = tf.cast(gf, tf.float32) - - return Model(inpf, gf, name="DEC2") - - def build_encoder(self): - - dim = ((4 * self.args.hop) // 2) + 1 - - inpf = Input((dim, self.args.shape, 1)) - - ginp = tf.transpose(inpf, [0, 3, 2, 1]) - - g0 = self.conv_util( - ginp, - self.args.hop * 2 + 32, - kernel_size=(1, 1), - strides=(1, 1), - padding="valid", - ) - - g = self.conv_util( - g0, - self.args.hop * 2 + 64, - kernel_size=(1, 1), - strides=(1, 1), - padding="valid", - ) - g = self.conv_util( - g, - self.args.hop * 2 + 64 + 64, - kernel_size=(1, 1), - strides=(1, 1), - padding="valid", - ) - g = self.conv_util( - g, - self.args.hop * 2 + 128 + 64, - kernel_size=(1, 1), - strides=(1, 1), - padding="valid", - ) - g = self.conv_util( - g, - self.args.hop * 2 + 128 + 128, - kernel_size=(1, 1), - strides=(1, 1), - padding="valid", - ) - - g = tf.keras.layers.Conv2D( - 128, - kernel_size=(1, 1), - strides=1, - padding="valid", - kernel_initializer=self.init, - )(g) - gb = tf.keras.activations.tanh(g) - - gbls = tf.split(gb, 2, -2) - gb = tf.concat(gbls, 0) - - gb = tf.cast(gb, tf.float32) - return Model(inpf, gb, name="ENC") - - def build_decoder(self): - - dim = ((4 * self.args.hop) // 2) + 1 - - inpf = Input((1, self.args.shape // 2, 128)) - - g = inpf - - g0 = self.conv_util(g, self.args.hop * 3, kernel_size=(1, 1), strides=(1, 1), noise=True) - - g1 = self.conv_util(g0, self.args.hop * 2, kernel_size=(1, 3), strides=(1, 2), noise=True) - g2 = self.conv_util( - g1, - self.args.hop + self.args.hop // 2, - kernel_size=(1, 3), - strides=(1, 2), - noise=True, - ) - g = self.conv_util( - g2, - self.args.hop + self.args.hop // 4, - kernel_size=(1, 3), - strides=(1, 2), - noise=True, - ) - - g = self.conv_util( - g, - self.args.hop + self.args.hop // 2, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - ) - g = self.conv_util( - g + g2, - self.args.hop * 2, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - ) - g = self.conv_util( - g + g1, - self.args.hop * 3, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - ) - - g = self.conv_util(g + g0, self.args.hop * 5, kernel_size=(1, 1), strides=(1, 1), noise=True) - - g = Conv2D( - dim * 2, - kernel_size=(1, 1), - strides=(1, 1), - kernel_initializer=self.init, - padding="same", - )(g) - g = tf.clip_by_value(g, -1.0, 1.0) - - gf, pf = tf.split(g, 2, -1) - - gfls = tf.split(gf, self.args.shape // self.args.window, 0) - gf = tf.concat(gfls, -2) - - pfls = tf.split(pf, self.args.shape // self.args.window, 0) - pf = tf.concat(pfls, -2) - - s = tf.transpose(gf, [0, 2, 3, 1]) - p = tf.transpose(pf, [0, 2, 3, 1]) - - s = tf.cast(tf.squeeze(s, -1), tf.float32) - p = tf.cast(tf.squeeze(p, -1), tf.float32) - - return Model(inpf, [s, p], name="DEC") - - def build_critic(self): - - if self.args.conditional: - sinp = Input(shape=(1, self.args.latlen, self.args.latdepth * 2 + 1)) - sinpf = sinp[:, :, :, :-1] - sinpc = sinp[:, :, :, -1:] - else: - sinp = Input(shape=(1, self.args.latlen, self.args.latdepth * 2)) - sinpf = sinp - - dim = 64 * 2 - - sf = tf.keras.layers.Conv2D( - self.args.latdepth * 4, - kernel_size=(1, 1), - strides=(1, 1), - activation="linear", - padding="valid", - kernel_initializer=self.init, - use_bias=False, - trainable=False, - )(sinpf) - - if self.args.conditional: - sf = tf.concat([sf, tf.cast(sinpc, self.args.datatype)], -1) - - sf = tf.keras.layers.Conv2D( - 256 + 128, - kernel_size=(1, 3), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - sf = self.res_block_disc(sf, 256 + 128 + 128, kernel_size=(1, 3), strides=(1, 2)) - sf = self.res_block_disc(sf, 512 + 128, kernel_size=(1, 3), strides=(1, 2)) - sf = self.res_block_disc(sf, 512 + 256, kernel_size=(1, 3), strides=(1, 2)) - sf = self.res_block_disc(sf, 512 + 128 + 256, kernel_size=(1, 3), strides=(1, 2)) - sfo = self.res_block_disc(sf, 512 + 512, kernel_size=(1, 3), strides=(1, 2), kernel_size_2=(1, 1)) - sf = sfo - - gf = tf.keras.layers.Dense(1, activation="linear", use_bias=True, kernel_initializer=self.init)(Flatten()(sf)) - - gf = tf.cast(gf, tf.float32) - sfo = tf.cast(sfo, tf.float32) - - return Model(sinp, [gf, sfo], name="C") - - def build_critic_rec(self): - - sinp = Input(shape=(1, self.args.latlen // 64, 512 + 512)) - - dim = self.args.latdepth * 2 - - sf = tf.keras.layers.Conv2DTranspose( - 512, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sinp) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - - sf = tf.keras.layers.Conv2DTranspose( - 256 + 128 + 64, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - sf = tf.keras.layers.Conv2DTranspose( - 256 + 128, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - sf = tf.keras.layers.Conv2DTranspose( - 256 + 64, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - sf = tf.keras.layers.Conv2DTranspose( - 256, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - sf = tf.keras.layers.Conv2DTranspose( - 128 + 64, - kernel_size=(1, 4), - strides=(1, 2), - activation="linear", - padding="same", - kernel_initializer=self.init, - )(sf) - sf = tf.keras.layers.LeakyReLU(0.2)(sf) - - gf = tf.keras.layers.Conv2D( - dim, - kernel_size=(1, 1), - strides=(1, 1), - activation="tanh", - padding="same", - kernel_initializer=self.init, - )(sf) - - gf = tf.cast(gf, tf.float32) - - return Model(sinp, gf, name="CR") - - def build_generator(self): - - dim = self.args.latdepth * 2 - - if self.args.conditional: - inpf = Input((self.args.latlen, self.args.latdepth * 2)) - else: - inpf = Input((self.args.latlen, self.args.latdepth * 2 + 1)) - - inpfls = tf.split(inpf, 2, -2) - inpb = tf.concat(inpfls, 0) - - inpg = tf.reduce_mean(inpb, -2) - inp1 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(tf.expand_dims(inpb, -3)) - inp2 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(inp1) - inp3 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(inp2) - inp4 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(inp3) - inp5 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(inp4) - inp6 = tf.keras.layers.AveragePooling2D((1, 2), padding="valid")(inp5) - - g = tf.keras.layers.Dense( - 4 * (512 + 256 + 128), - activation="linear", - use_bias=True, - kernel_initializer=self.init, - )(Flatten()(inp6)) - g = tf.keras.layers.Reshape((1, 4, 512 + 256 + 128))(g) - g = AddNoise(datatype=self.args.datatype)(g) - g = self.adain(g, inp5) - g = tf.keras.activations.swish(g) - - g = self.conv_util_gen( - g, - 512 + 256, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - emb=inp4, - ) - g1 = self.conv_util_gen( - g, - 512 + 256, - kernel_size=(1, 1), - strides=(1, 1), - upsample=False, - noise=True, - emb=inp4, - ) - g2 = self.conv_util_gen( - g1, - 512 + 128, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - emb=inp3, - ) - g2b = self.conv_util_gen( - g2, - 512 + 128, - kernel_size=(1, 3), - strides=(1, 1), - upsample=False, - noise=True, - emb=inp3, - ) - g3 = self.conv_util_gen( - g2b, - 256 + 256, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - emb=inp2, - se1=self.se_layer(g, 256 + 256), - ) - g3 = self.conv_util_gen( - g3, - 256 + 256, - kernel_size=(1, 3), - strides=(1, 1), - upsample=False, - noise=True, - emb=inp2, - se1=self.se_layer(g1, 256 + 256), - ) - g4 = self.conv_util_gen( - g3, - 256 + 128, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - emb=inp1, - se1=self.se_layer(g2, 256 + 128), - ) - g4 = self.conv_util_gen( - g4, - 256 + 128, - kernel_size=(1, 3), - strides=(1, 1), - upsample=False, - noise=True, - emb=inp1, - se1=self.se_layer(g2b, 256 + 128), - ) - g5 = self.conv_util_gen( - g4, - 256, - kernel_size=(1, 4), - strides=(1, 2), - upsample=True, - noise=True, - emb=tf.expand_dims(tf.cast(inpb, dtype=self.args.datatype), -3), - ) - - gf = tf.keras.layers.Conv2D( - dim, - kernel_size=(1, 1), - strides=(1, 1), - kernel_initializer=self.init, - padding="same", - activation="tanh", - )(g5) - - gfls = tf.split(gf, 2, 0) - gf = tf.concat(gfls, -2) - - gf = tf.cast(gf, tf.float32) - - return Model(inpf, gf, name="GEN") - - # Load past models from path to resume training or test - def load(self, path, load_dec=False): - gen = self.build_generator() - critic = self.build_critic() - enc = self.build_encoder() - dec = self.build_decoder() - enc2 = self.build_encoder2() - dec2 = self.build_decoder2() - critic_rec = self.build_critic_rec() - gen_ema = self.build_generator() - - if self.args.mixed_precision: - opt_disc = self.mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam(0.0001, 0.9)) - opt_dec = self.mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam(0.0001, 0.9)) - else: - opt_disc = tf.keras.optimizers.Adam(0.0001, 0.9) - opt_dec = tf.keras.optimizers.Adam(0.0001, 0.9) - - if load_dec: - dec.load_weights(self.args.dec_path + "/dec.h5") - dec2.load_weights(self.args.dec_path + "/dec2.h5") - enc.load_weights(self.args.dec_path + "/enc.h5") - enc2.load_weights(self.args.dec_path + "/enc2.h5") - - else: - grad_vars = critic.trainable_weights + critic_rec.trainable_weights - zero_grads = [tf.zeros_like(w) for w in grad_vars] - opt_disc.apply_gradients(zip(zero_grads, grad_vars)) - - grad_vars = gen.trainable_variables - zero_grads = [tf.zeros_like(w) for w in grad_vars] - opt_dec.apply_gradients(zip(zero_grads, grad_vars)) - - if not self.args.testing: - opt_disc.set_weights(np.load(path + "/opt_disc.npy", allow_pickle=True)) - opt_dec.set_weights(np.load(path + "/opt_dec.npy", allow_pickle=True)) - - if not self.args.testing: - critic.load_weights(path + "/critic.h5") - gen.load_weights(path + "/gen.h5") - # enc.load_weights(self.args.dec_path + "/enc.h5") - # enc2.load_weights(self.args.dec_path + "/enc2.h5") - critic_rec.load_weights(path + "/critic_rec.h5") - gen_ema.load_weights(path + "/gen_ema.h5") - dec.load_weights(self.args.dec_path + "/dec.h5") - dec2.load_weights(self.args.dec_path + "/dec2.h5") - enc.load_weights(self.args.dec_path + "/enc.h5") - enc2.load_weights(self.args.dec_path + "/enc2.h5") - - return ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) - - def build(self): - gen = self.build_generator() - critic = self.build_critic() - enc = self.build_encoder() - dec = self.build_decoder() - enc2 = self.build_encoder2() - dec2 = self.build_decoder2() - critic_rec = self.build_critic_rec() - gen_ema = self.build_generator() - - gen_ema = tf.keras.models.clone_model(gen) - gen_ema.set_weights(gen.get_weights()) - - if self.args.mixed_precision: - opt_disc = self.mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam(0.0001, 0.9)) - opt_dec = self.mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam(0.0001, 0.9)) - else: - opt_disc = tf.keras.optimizers.Adam(0.0001, 0.9) - opt_dec = tf.keras.optimizers.Adam(0.0001, 0.9) - - return ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) - - def get_networks(self): - if self.args.load_path != "None": - ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) = self.load(self.args.load_path, load_dec=False) - print(f"Networks loaded from {self.args.load_path}") - else: - ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) = self.load(self.args.dec_path, load_dec=True) - print(f"Encoders/Decoders loaded from {self.args.dec_path}") - print(f"Networks initialized") - - return ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) - - def initialize_networks(self): - - ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) = self.get_networks() - - print(f"Critic params: {count_params(critic.trainable_variables)}") - print(f"Generator params: {count_params(gen.trainable_variables)}") - - return ( - critic, - gen, - enc, - dec, - enc2, - dec2, - critic_rec, - gen_ema, - [opt_dec, opt_disc], - ) diff --git a/spaces/g4f/freegpt-webui/run.py b/spaces/g4f/freegpt-webui/run.py deleted file mode 100644 index 0f51b649137e966cd6912ca6e54f49ea49d7f3c6..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/run.py +++ /dev/null @@ -1,39 +0,0 @@ -from server.bp import bp -from server.website import Website -from server.backend import Backend_Api -from json import load -from flask import Flask - -if __name__ == '__main__': - - # Load configuration from config.json - config = load(open('config.json', 'r')) - site_config = config['site_config'] - url_prefix = config.pop('url_prefix') - - # Set up the website routes - site = Website(bp, url_prefix) - for route in site.routes: - bp.add_url_rule( - route, - view_func=site.routes[route]['function'], - methods=site.routes[route]['methods'], - ) - - # Set up the backend API routes - backend_api = Backend_Api(bp, config) - for route in backend_api.routes: - bp.add_url_rule( - route, - view_func=backend_api.routes[route]['function'], - methods=backend_api.routes[route]['methods'], - ) - - # Create the app and register the blueprint - app = Flask(__name__) - app.register_blueprint(bp, url_prefix=url_prefix) - - # Run the Flask server - print(f"Running on {site_config['port']}{url_prefix}") - app.run(**site_config) - print(f"Closing port {site_config['port']}") diff --git a/spaces/g4f/freegpt-webui/server/bp.py b/spaces/g4f/freegpt-webui/server/bp.py deleted file mode 100644 index 61d416797039dababd9e8222b4fc910ef65c40b9..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/server/bp.py +++ /dev/null @@ -1,6 +0,0 @@ -from flask import Blueprint - -bp = Blueprint('bp', __name__, - template_folder='./../client/html', - static_folder='./../client', - static_url_path='assets') diff --git a/spaces/ghlee94/MEDIAR/predict.sh b/spaces/ghlee94/MEDIAR/predict.sh deleted file mode 100644 index 62e2366e56d45f037b58d81de273fe38d94404b1..0000000000000000000000000000000000000000 --- a/spaces/ghlee94/MEDIAR/predict.sh +++ /dev/null @@ -1 +0,0 @@ -python predict.py -i "./inputs" -o "./outputs" --device "cuda:0" --model_path="./main_model.pt" --model_path2="./sub_model.pth" diff --git a/spaces/gligen/demo/gligen/ldm/modules/encoders/__init__.py b/spaces/gligen/demo/gligen/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/gligen/demo/gligen/ldm/modules/image_degradation/__init__.py b/spaces/gligen/demo/gligen/ldm/modules/image_degradation/__init__.py deleted file mode 100644 index 7836cada81f90ded99c58d5942eea4c3477f58fc..0000000000000000000000000000000000000000 --- a/spaces/gligen/demo/gligen/ldm/modules/image_degradation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/spaces/gossminn/fillmorle-app/sftp/data_reader/__init__.py b/spaces/gossminn/fillmorle-app/sftp/data_reader/__init__.py deleted file mode 100644 index 8072050fbd6abe0c3e2117f8a95bb2b7af63c5b9..0000000000000000000000000000000000000000 --- a/spaces/gossminn/fillmorle-app/sftp/data_reader/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .batch_sampler import MixSampler -from .better_reader import BetterDatasetReader -from .span_reader import SpanReader -from .srl_reader import SRLDatasetReader -from .concrete_srl import concrete_doc, concrete_doc_tokenized, collect_concrete_srl -from .concrete_reader import ConcreteDatasetReader diff --git a/spaces/gradio/HuBERT/fairseq/data/multi_corpus_sampled_dataset.py b/spaces/gradio/HuBERT/fairseq/data/multi_corpus_sampled_dataset.py deleted file mode 100644 index e2e9fdf004dd1da519a170a5e8bc225775776f72..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/data/multi_corpus_sampled_dataset.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import OrderedDict -from typing import Callable, Dict, List - -import numpy as np - -from . import FairseqDataset - - -def uniform_sampler(x): - # Sample from uniform distribution - return np.random.choice(x, 1).item() - - -class MultiCorpusSampledDataset(FairseqDataset): - """ - Stores multiple instances of FairseqDataset together and in every iteration - creates a batch by first sampling a dataset according to a specified - probability distribution and then getting instances from that dataset. - - Args: - datasets: an OrderedDict of FairseqDataset instances. - sampling_func: A function for sampling over list of dataset keys. - The default strategy is to sample uniformly. - """ - - def __init__( - self, - datasets: Dict[str, FairseqDataset], - sampling_func: Callable[[List], int] = None, - ): - super().__init__() - assert isinstance(datasets, OrderedDict) - self.datasets = datasets - if sampling_func is None: - sampling_func = uniform_sampler - self.sampling_func = sampling_func - - self.total_num_instances = 0 - for _, dataset in datasets.items(): - assert isinstance(dataset, FairseqDataset) - self.total_num_instances += len(dataset) - - self._ordered_indices = None - - def __len__(self): - """ - Length of this dataset is the sum of individual datasets - """ - return self.total_num_instances - - def ordered_indices(self): - """ - Ordered indices for batching. Here we call the underlying - dataset's ordered_indices() so that we get the same random ordering - as we would have from using the underlying dataset directly. - """ - if self._ordered_indices is None: - self._ordered_indices = OrderedDict( - [ - (key, dataset.ordered_indices()) - for key, dataset in self.datasets.items() - ] - ) - return np.arange(len(self)) - - def _map_index_to_dataset(self, key: int, index: int): - """ - Different underlying datasets have different lengths. In order to ensure - we are not accessing an index outside the range of the current dataset - size, we wrap around. This function should be called after we have - created an ordering for this and all underlying datasets. - """ - assert ( - self._ordered_indices is not None - ), "Must call MultiCorpusSampledDataset.ordered_indices() first" - mapped_index = index % len(self.datasets[key]) - return self._ordered_indices[key][mapped_index] - - def __getitem__(self, index: int): - """ - Get the item associated with index from each underlying dataset. - Since index is in the range of [0, TotalNumInstances], we need to - map the index to the dataset before retrieving the item. - """ - return OrderedDict( - [ - (key, dataset[self._map_index_to_dataset(key, index)]) - for key, dataset in self.datasets.items() - ] - ) - - def collater(self, samples: List[Dict]): - """ - Generate a mini-batch for this dataset. - To convert this into a regular mini-batch we use the following - logic: - 1. Select a dataset using the specified probability distribution. - 2. Call the collater function of the selected dataset. - """ - if len(samples) == 0: - return None - - selected_key = self.sampling_func(list(self.datasets.keys())) - selected_samples = [sample[selected_key] for sample in samples] - return self.datasets[selected_key].collater(selected_samples) - - def num_tokens(self, index: int): - """ - Return an example's length (number of tokens), used for batching. Here - we return the max across all examples at index across all underlying - datasets. - """ - return max( - dataset.num_tokens(self._map_index_to_dataset(key, index)) - for key, dataset in self.datasets.items() - ) - - def size(self, index: int): - """ - Return an example's size as a float or tuple. Here we return the max - across all underlying datasets. This value is used when filtering a - dataset with max-positions. - """ - return max( - dataset.size(self._map_index_to_dataset(key, index)) - for key, dataset in self.datasets.items() - ) - - @property - def supports_prefetch(self): - return all( - getattr(dataset, "supports_prefetch", False) - for dataset in self.datasets.values() - ) - - def prefetch(self, indices): - for key, dataset in self.datasets.items(): - dataset.prefetch( - [self._map_index_to_dataset(key, index) for index in indices] - ) - - @property - def supports_fetch_outside_dataloader(self): - return all( - self.datasets[key].supports_fetch_outside_dataloader - for key in self.datasets - ) diff --git a/spaces/gradio/HuBERT/fairseq/models/nat/nonautoregressive_ensembles.py b/spaces/gradio/HuBERT/fairseq/models/nat/nonautoregressive_ensembles.py deleted file mode 100644 index 705a04fb49658c91114a26efd411b4653c65b943..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/models/nat/nonautoregressive_ensembles.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch -import torch.nn.functional as F -from fairseq.models.nat import ( - _apply_del_words, - _apply_ins_masks, - _apply_ins_words, - _fill, - _skip, - _skip_encoder_out, -) - - -class _EnsembleModelEncoder(object): - def __init__(self, models): - self.models = models - - def reorder_encoder_out(self, encoder_outs, new_order): - encoder_outs = [ - model.encoder.reorder_encoder_out(encoder_out, new_order) - for model, encoder_out in zip(self.models, encoder_outs) - ] - return encoder_outs - - -class BasicEnsembleModel(torch.nn.Module): - """A wrapper around an ensemble of models.""" - - def __init__(self, models): - super().__init__() - self.models = torch.nn.ModuleList(models) - self.bos = self.models[0].decoder.dictionary.bos() - self.eos = self.models[0].decoder.dictionary.eos() - self.pad = self.models[0].decoder.dictionary.pad() - self.unk = self.models[0].decoder.dictionary.unk() - self.encoder = _EnsembleModelEncoder(self.models) - - def has_encoder(self): - return hasattr(self.models[0], "encoder") - - def max_decoder_positions(self): - return min(m.max_decoder_positions() for m in self.models) - - @torch.no_grad() - def forward_encoder(self, encoder_input): - if not self.has_encoder(): - return None - return [model.forward_encoder(encoder_input) for model in self.models] - - @torch.no_grad() - def forward_decoder(self, *inputs): - raise NotImplementedError - - def initialize_output_tokens(self, *inputs): - raise NotImplementedError - - -class EnsembleLevT(BasicEnsembleModel): - """A wrapper around an ensemble of models.""" - - def __init__(self, models): - super().__init__(models) - - @torch.no_grad() - def forward_decoder( - self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs - ): - # LevT ensembling - # A pipeline of three steps: deletion, placeholder, and word insertion. - # We need to average scores in each step in a pipeline way because of dependence. - # deletion - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - attn = decoder_out.attn - - bsz = output_tokens.size(0) - if max_ratio is None: - max_lens = output_tokens.new().fill_(255) - else: - if not encoder_outs[0]["encoder_padding_mask"]: - src_lens = ( - encoder_outs[0]["encoder_out"][0].new(bsz) - .fill_(encoder_outs[0]["encoder_out"][0].size(1)) - ) - else: - src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1) - max_lens = (src_lens * max_ratio).clamp(min=10).long() - - # delete words - # do not delete tokens if it is <s> </s> - can_del_word = output_tokens.ne(self.pad).sum(1) > 2 - if can_del_word.sum() != 0: # we cannot delete, skip - output_tokens, output_scores, attn = self.forward_word_del( - encoder_outs, - output_tokens, - output_scores, - attn, - can_del_word, - ) - - # insert placeholders - can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens - if can_ins_mask.sum() != 0: - output_tokens, output_scores = self.forward_mask_ins( - encoder_outs, - output_tokens, - output_scores, - can_ins_mask, - eos_penalty, - max_lens, - ) - - # insert words - can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 - if can_ins_word.sum() != 0: - output_tokens, output_scores, attn = self.forward_word_ins( - encoder_outs, - output_tokens, - output_scores, - attn, - can_ins_word, - ) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - attn = None if attn is None else attn[:, :cut_off, :] - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=attn, - history=None, - ) - - def forward_word_del( - self, encoder_outs, output_tokens, output_scores, attn, can_del_word - ): - word_del_score_avg = [] - word_del_attn_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - word_del_out, word_del_attn = model.decoder.forward_word_del( - _skip(output_tokens, can_del_word), - _skip_encoder_out(model.encoder, encoder_out, can_del_word), - ) - word_del_score = F.log_softmax(word_del_out, 2) - word_del_score_avg.append(word_del_score) - word_del_attn_avg.append(word_del_attn) - word_del_score_avg = torch.logsumexp( - torch.stack(word_del_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - word_del_pred = word_del_score_avg.max(-1)[1].bool() - if word_del_attn_avg[0] is not None: - word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models) - else: - word_del_attn_avg = None - - _tokens, _scores, _attn = _apply_del_words( - output_tokens[can_del_word], - output_scores[can_del_word], - word_del_attn_avg, - word_del_pred, - self.pad, - self.bos, - self.eos, - ) - output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_del_word, _scores, 0) - attn = _fill(attn, can_del_word, _attn, 0.0) - return output_tokens, output_scores, attn - - def forward_mask_ins( - self, - encoder_outs, - output_tokens, - output_scores, - can_ins_mask, - eos_penalty, - max_lens, - ): - mask_ins_score_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - mask_ins_out, _ = model.decoder.forward_mask_ins( - _skip(output_tokens, can_ins_mask), - _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), - ) - mask_ins_score = F.log_softmax(mask_ins_out, 2) - if eos_penalty > 0.0: - mask_ins_score[:, :, 0] -= eos_penalty - mask_ins_score_avg.append(mask_ins_score) - mask_ins_score_avg = torch.logsumexp( - torch.stack(mask_ins_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - mask_ins_pred = mask_ins_score_avg.max(-1)[1] - mask_ins_pred = torch.min( - mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) - ) - _tokens, _scores = _apply_ins_masks( - output_tokens[can_ins_mask], - output_scores[can_ins_mask], - mask_ins_pred, - self.pad, - self.unk, - self.eos, - ) - output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_mask, _scores, 0) - return output_tokens, output_scores - - def forward_word_ins( - self, encoder_outs, output_tokens, output_scores, attn, can_ins_word - ): - word_ins_score_avg = [] - word_ins_attn_avg = [] - for model, encoder_out in zip(self.models, encoder_outs): - word_ins_out, word_ins_attn = model.decoder.forward_word_ins( - _skip(output_tokens, can_ins_word), - _skip_encoder_out(model.encoder, encoder_out, can_ins_word), - ) - word_ins_score = F.log_softmax(word_ins_out, 2) - word_ins_score_avg.append(word_ins_score) - word_ins_attn_avg.append(word_ins_attn) - word_ins_score_avg = torch.logsumexp( - torch.stack(word_ins_score_avg, dim=0), dim=0 - ) - math.log(len(self.models)) - if word_ins_attn_avg[0] is not None: - word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models) - else: - word_ins_attn_avg = None - word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) - - _tokens, _scores = _apply_ins_words( - output_tokens[can_ins_word], - output_scores[can_ins_word], - word_ins_pred, - word_ins_score_max, - self.unk, - ) - - output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) - output_scores = _fill(output_scores, can_ins_word, _scores, 0) - attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) - return output_tokens, output_scores, attn - - def initialize_output_tokens(self, encoder_outs, src_tokens): - # LevT doesn't do length prediction. - return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens) diff --git a/spaces/gradio/HuBERT/fairseq/modules/cross_entropy.py b/spaces/gradio/HuBERT/fairseq/modules/cross_entropy.py deleted file mode 100644 index 6f33c24cb56e25f91595009af38e63784c2263a0..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/cross_entropy.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import torch -import torch.nn.functional as F - - -logger = logging.getLogger(__name__) - - -def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"): - lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) - return F.nll_loss( - lprobs, - target, - ignore_index=ignore_index, - reduction=reduction, - ) - - -try: - import xentropy_cuda - from apex.contrib import xentropy - - def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): - if logits.device == torch.device("cpu"): - return _cross_entropy_pytorch(logits, target, ignore_index, reduction) - else: - if not getattr(cross_entropy, "_has_logged_once", False): - logger.info("using fused cross entropy") - cross_entropy._has_logged_once = True - - half_to_float = logits.dtype == torch.half - losses = xentropy.SoftmaxCrossEntropyLoss.apply( - logits, - target, - 0.0, - ignore_index, - half_to_float, - ) - if reduction == "sum": - return losses.sum() - elif reduction == "mean": - if ignore_index >= 0: - return losses.sum() / target.ne(ignore_index).sum() - else: - return losses.mean() - elif reduction == "none": - return losses - else: - raise NotImplementedError - - -except ImportError: - - def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): - return _cross_entropy_pytorch(logits, target, ignore_index, reduction) diff --git a/spaces/gradio/streaming_stt/setup.sh b/spaces/gradio/streaming_stt/setup.sh deleted file mode 100644 index bf8358848e843de3d76bb38278ca1fa30c134506..0000000000000000000000000000000000000000 --- a/spaces/gradio/streaming_stt/setup.sh +++ /dev/null @@ -1,3 +0,0 @@ -wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm -wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer -apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg \ No newline at end of file diff --git a/spaces/gstaff/MagicGen/colab-data-test/css/mana.min.css b/spaces/gstaff/MagicGen/colab-data-test/css/mana.min.css deleted file mode 100644 index 08b82049561671dccb070ce22a2b059c2b18a871..0000000000000000000000000000000000000000 --- a/spaces/gstaff/MagicGen/colab-data-test/css/mana.min.css +++ /dev/null @@ -1 +0,0 @@ -@font-face{font-family:Mana;src:url(../fonts/mana.eot?v=0.6);src:url(../fonts/mana.eot?#iefix&v=0.6) format('embedded-opentype'),url(../fonts/mana.woff?v=0.6) format('woff'),url(../fonts/mana.ttf?v=0.6) format('truetype'),url(../fonts/mana.svg?v=0.6#mana) format('svg');font-weight:400;font-style:normal}@font-face{font-family:MPlantin;src:url(../fonts/mplantin.eot?v=0.6);src:url(../fonts/mplantin.eot?#iefix&v=0.6) format('embedded-opentype'),url(../fonts/mplantin.woff?v=0.6) format('woff'),url(../fonts/mplantin.ttf?v=0.6) format('truetype'),url(../fonts/mplantin.svg?v=0.6#mplantin) format('svg');font-weight:400;font-style:normal}.ms{display:inline-block;font:normal normal normal 14px/1 Mana;font-size:inherit;line-height:1em;text-rendering:auto;transform:translate(0,0);speak:none;text-transform:none;vertical-align:middle;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.ms-cost{background-color:#BEB9B2;border-radius:1em;color:#111;font-size:.95em;width:1.3em;height:1.3em;line-height:1.35em;text-align:center}.ms-cost.ms-w,.ms-cost.ms-wp{background-color:#F0F2C0}.ms-cost.ms-u,.ms-cost.ms-up{background-color:#B5CDE3}.ms-cost.ms-b,.ms-cost.ms-bp{background-color:#ACA29A}.ms-cost.ms-r,.ms-cost.ms-rp{background-color:#DB8664}.ms-cost.ms-g,.ms-cost.ms-gp{background-color:#93B483}.ms-cost.ms-wu{background:#edf2b0;background:-moz-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#a6c1dd 50%,#a6c1dd 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#edf2b0),color-stop(50%,#edf2b0),color-stop(50%,#a6c1dd),color-stop(100%,#a6c1dd));background:-webkit-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#a6c1dd 50%,#a6c1dd 100%);background:-o-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#a6c1dd 50%,#a6c1dd 100%);background:-ms-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#a6c1dd 50%,#a6c1dd 100%);background:linear-gradient(135deg,#edf2b0 0,#edf2b0 50%,#a6c1dd 50%,#a6c1dd 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#edf2b0', endColorstr='#a6c1dd', GradientType=1)}.ms-cost.ms-wb{background:#edf2b0;background:-moz-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#9c9188 50%,#9c9188 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#edf2b0),color-stop(50%,#edf2b0),color-stop(50%,#9c9188),color-stop(100%,#9c9188));background:-webkit-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#9c9188 50%,#9c9188 100%);background:-o-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#9c9188 50%,#9c9188 100%);background:-ms-linear-gradient(-45deg,#edf2b0 0,#edf2b0 50%,#9c9188 50%,#9c9188 100%);background:linear-gradient(135deg,#edf2b0 0,#edf2b0 50%,#9c9188 50%,#9c9188 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#edf2b0', endColorstr='#9c9188', GradientType=1)}.ms-cost.ms-ub{background:#a6c1dd;background:-moz-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#9c9188 50%,#9c9188 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#a6c1dd),color-stop(50%,#a6c1dd),color-stop(50%,#9c9188),color-stop(100%,#9c9188));background:-webkit-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#9c9188 50%,#9c9188 100%);background:-o-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#9c9188 50%,#9c9188 100%);background:-ms-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#9c9188 50%,#9c9188 100%);background:linear-gradient(135deg,#a6c1dd 0,#a6c1dd 50%,#9c9188 50%,#9c9188 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#a6c1dd', endColorstr='#9c9188', GradientType=1)}.ms-cost.ms-ur{background:#a6c1dd;background:-moz-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#db8664 50%,#db8664 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#a6c1dd),color-stop(50%,#a6c1dd),color-stop(50%,#db8664),color-stop(100%,#db8664));background:-webkit-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#db8664 50%,#db8664 100%);background:-o-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#db8664 50%,#db8664 100%);background:-ms-linear-gradient(-45deg,#a6c1dd 0,#a6c1dd 50%,#db8664 50%,#db8664 100%);background:linear-gradient(135deg,#a6c1dd 0,#a6c1dd 50%,#db8664 50%,#db8664 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#a6c1dd', endColorstr='#db8664', GradientType=1)}.ms-cost.ms-br{background:#aca29a;background:-moz-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#db8664 50%,#db8664 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#aca29a),color-stop(50%,#aca29a),color-stop(50%,#db8664),color-stop(100%,#db8664));background:-webkit-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#db8664 50%,#db8664 100%);background:-o-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#db8664 50%,#db8664 100%);background:-ms-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#db8664 50%,#db8664 100%);background:linear-gradient(135deg,#aca29a 0,#aca29a 50%,#db8664 50%,#db8664 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#aca29a', endColorstr='#db8664', GradientType=1)}.ms-cost.ms-bg{background:#aca29a;background:-moz-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#93b483 50%,#93b483 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#aca29a),color-stop(50%,#aca29a),color-stop(50%,#93b483),color-stop(100%,#93b483));background:-webkit-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#93b483 50%,#93b483 100%);background:-o-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#93b483 50%,#93b483 100%);background:-ms-linear-gradient(-45deg,#aca29a 0,#aca29a 50%,#93b483 50%,#93b483 100%);background:linear-gradient(135deg,#aca29a 0,#aca29a 50%,#93b483 50%,#93b483 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#aca29a', endColorstr='#93b483', GradientType=1)}.ms-cost.ms-rw{background:#db8664;background:-moz-linear-gradient(-45deg,#db8664 0,#db8664 50%,#edf2b0 50%,#edf2b0 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#db8664),color-stop(50%,#db8664),color-stop(50%,#edf2b0),color-stop(100%,#edf2b0));background:-webkit-linear-gradient(-45deg,#db8664 0,#db8664 50%,#edf2b0 50%,#edf2b0 100%);background:-o-linear-gradient(-45deg,#db8664 0,#db8664 50%,#edf2b0 50%,#edf2b0 100%);background:-ms-linear-gradient(-45deg,#db8664 0,#db8664 50%,#edf2b0 50%,#edf2b0 100%);background:linear-gradient(135deg,#db8664 0,#db8664 50%,#edf2b0 50%,#edf2b0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#db8664', endColorstr='#edf2b0', GradientType=1)}.ms-cost.ms-rg{background:#db8664;background:-moz-linear-gradient(-45deg,#db8664 0,#db8664 50%,#93b483 50%,#93b483 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#db8664),color-stop(50%,#db8664),color-stop(50%,#93b483),color-stop(100%,#93b483));background:-webkit-linear-gradient(-45deg,#db8664 0,#db8664 50%,#93b483 50%,#93b483 100%);background:-o-linear-gradient(-45deg,#db8664 0,#db8664 50%,#93b483 50%,#93b483 100%);background:-ms-linear-gradient(-45deg,#db8664 0,#db8664 50%,#93b483 50%,#93b483 100%);background:linear-gradient(135deg,#db8664 0,#db8664 50%,#93b483 50%,#93b483 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#db8664', endColorstr='#93b483', GradientType=1)}.ms-cost.ms-gw{background:#93b483;background:-moz-linear-gradient(-45deg,#93b483 0,#93b483 50%,#edf2b0 50%,#edf2b0 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#93b483),color-stop(50%,#93b483),color-stop(50%,#edf2b0),color-stop(100%,#edf2b0));background:-webkit-linear-gradient(-45deg,#93b483 0,#93b483 50%,#edf2b0 50%,#edf2b0 100%);background:-o-linear-gradient(-45deg,#93b483 0,#93b483 50%,#edf2b0 50%,#edf2b0 100%);background:-ms-linear-gradient(-45deg,#93b483 0,#93b483 50%,#edf2b0 50%,#edf2b0 100%);background:linear-gradient(135deg,#93b483 0,#93b483 50%,#edf2b0 50%,#edf2b0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#93b483', endColorstr='#edf2b0', GradientType=1)}.ms-cost.ms-gu{background:#93b483;background:-moz-linear-gradient(-45deg,#93b483 0,#93b483 50%,#b5cde3 50%,#b5cde3 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#93b483),color-stop(50%,#93b483),color-stop(50%,#b5cde3),color-stop(100%,#b5cde3));background:-webkit-linear-gradient(-45deg,#93b483 0,#93b483 50%,#b5cde3 50%,#b5cde3 100%);background:-o-linear-gradient(-45deg,#93b483 0,#93b483 50%,#b5cde3 50%,#b5cde3 100%);background:-ms-linear-gradient(-45deg,#93b483 0,#93b483 50%,#b5cde3 50%,#b5cde3 100%);background:linear-gradient(135deg,#93b483 0,#93b483 50%,#b5cde3 50%,#b5cde3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#93b483', endColorstr='#b5cde3', GradientType=1)}.ms-cost.ms-2w{background:#beb9b2;background:-moz-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#edf2b0 50%,#edf2b0 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#beb9b2),color-stop(50%,#beb9b2),color-stop(50%,#edf2b0),color-stop(100%,#edf2b0));background:-webkit-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#edf2b0 50%,#edf2b0 100%);background:-o-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#edf2b0 50%,#edf2b0 100%);background:-ms-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#edf2b0 50%,#edf2b0 100%);background:linear-gradient(135deg,#beb9b2 0,#beb9b2 50%,#edf2b0 50%,#edf2b0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#beb9b2', endColorstr='#edf2b0', GradientType=1)}.ms-cost.ms-2u{background:#beb9b2;background:-moz-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#b5cde3 50%,#b5cde3 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#beb9b2),color-stop(50%,#beb9b2),color-stop(50%,#b5cde3),color-stop(100%,#b5cde3));background:-webkit-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#b5cde3 50%,#b5cde3 100%);background:-o-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#b5cde3 50%,#b5cde3 100%);background:-ms-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#b5cde3 50%,#b5cde3 100%);background:linear-gradient(135deg,#beb9b2 0,#beb9b2 50%,#b5cde3 50%,#b5cde3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#beb9b2', endColorstr='#b5cde3', GradientType=1)}.ms-cost.ms-2b{background:#beb9b2;background:-moz-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#9c9188 50%,#9c9188 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#beb9b2),color-stop(50%,#beb9b2),color-stop(50%,#9c9188),color-stop(100%,#9c9188));background:-webkit-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#9c9188 50%,#9c9188 100%);background:-o-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#9c9188 50%,#9c9188 100%);background:-ms-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#9c9188 50%,#9c9188 100%);background:linear-gradient(135deg,#beb9b2 0,#beb9b2 50%,#9c9188 50%,#9c9188 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#beb9b2', endColorstr='#9c9188', GradientType=1)}.ms-cost.ms-2r{background:#beb9b2;background:-moz-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#db8664 50%,#db8664 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#beb9b2),color-stop(50%,#beb9b2),color-stop(50%,#db8664),color-stop(100%,#db8664));background:-webkit-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#db8664 50%,#db8664 100%);background:-o-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#db8664 50%,#db8664 100%);background:-ms-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#db8664 50%,#db8664 100%);background:linear-gradient(135deg,#beb9b2 0,#beb9b2 50%,#db8664 50%,#db8664 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#beb9b2', endColorstr='#db8664', GradientType=1)}.ms-cost.ms-2g{background:#beb9b2;background:-moz-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#93b483 50%,#93b483 100%);background:-webkit-gradient(linear,left top,right bottom,color-stop(0,#beb9b2),color-stop(50%,#beb9b2),color-stop(50%,#93b483),color-stop(100%,#93b483));background:-webkit-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#93b483 50%,#93b483 100%);background:-o-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#93b483 50%,#93b483 100%);background:-ms-linear-gradient(-45deg,#beb9b2 0,#beb9b2 50%,#93b483 50%,#93b483 100%);background:linear-gradient(135deg,#beb9b2 0,#beb9b2 50%,#93b483 50%,#93b483 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#beb9b2', endColorstr='#93b483', GradientType=1)}.ms-cost.ms-bp:before,.ms-cost.ms-gp:before,.ms-cost.ms-p:before,.ms-cost.ms-rp:before,.ms-cost.ms-up:before,.ms-cost.ms-wp:before{display:inline-block;-moz-transform:scale(1.2,1.2);-webkit-transform:scale(1.2,1.2);transform:scale(1.2,1.2)}.ms-cost.ms-tap-alt:before{display:inline-block;-moz-transform:scale(1.2,1.2);-webkit-transform:scale(1.2,1.2);transform:scale(1.2,1.2);padding-left:.06em;padding-bottom:.1em}.ms-cost.ms-s:before{color:#fff;-webkit-text-stroke:2px #fff;font-size:.85em;top:-.05em;position:relative;display:inline-block}.ms-cost.ms-s:after{content:"\e619";position:absolute;color:#333;margin-left:-.9em;font-size:1.1em}.ms-cost.ms-untap{background-color:#111;color:#fff}.ms-cost.ms-shadow{box-shadow:-.06em .07em 0 #111,0 .06em 0 #111}.ms-cost.ms-shadow.ms-untap{box-shadow:-.06em .07em 0 #fff,0 .06em 0 #fff}.ms-split{position:relative;width:1.3em;height:1.3em}.ms-split:after,.ms-split:before{font-size:.55em!important;position:absolute}.ms-split:before{top:-.38em;left:.28em}.ms-split:after{top:.5em;left:1em}.ms-half{width:.675em;overflow:hidden;display:inline-block;margin-left:.675em}.ms-half>.ms-cost{margin-left:-.675em}.ms-100{width:2.4em}.ms-100000{width:5.4em}.ms-loyalty-down,.ms-loyalty-start,.ms-loyalty-up,.ms-loyalty-zero{color:#111;font-size:1.5em;position:relative;text-align:center}.ms-loyalty-start{font-size:2em}.ms-loyalty-0:after,.ms-loyalty-10:after,.ms-loyalty-1:after,.ms-loyalty-2:after,.ms-loyalty-3:after,.ms-loyalty-4:after,.ms-loyalty-5:after,.ms-loyalty-6:after,.ms-loyalty-7:after,.ms-loyalty-8:after,.ms-loyalty-9:after,.ms-loyalty-x:after{color:#fff;display:inline-block;font-size:.5em;font-family:'MPlantin, Garamond, Palatino, ' Times New Roman ', Times, serif';position:absolute;left:0;line-height:1.75em;width:100%;text-align:center;-webkit-padding-before:.15em}.ms-loyalty-0:after{content:"0"}.ms-loyalty-up.ms-loyalty-1:after{content:"+1"}.ms-loyalty-up.ms-loyalty-2:after{content:"+2"}.ms-loyalty-up.ms-loyalty-3:after{content:"+3"}.ms-loyalty-up.ms-loyalty-4:after{content:"+4"}.ms-loyalty-up.ms-loyalty-5:after{content:"+5"}.ms-loyalty-up.ms-loyalty-6:after{content:"+6"}.ms-loyalty-up.ms-loyalty-7:after{content:"+7"}.ms-loyalty-up.ms-loyalty-8:after{content:"+8"}.ms-loyalty-up.ms-loyalty-9:after{content:"+9"}.ms-loyalty-up.ms-loyalty-10:after{content:"+10"}.ms-loyalty-up.ms-loyalty-x:after{content:"+X"}.ms-loyalty-start.ms-loyalty-1:after{content:"1"}.ms-loyalty-start.ms-loyalty-2:after{content:"2"}.ms-loyalty-start.ms-loyalty-3:after{content:"3"}.ms-loyalty-start.ms-loyalty-4:after{content:"4"}.ms-loyalty-start.ms-loyalty-5:after{content:"5"}.ms-loyalty-start.ms-loyalty-6:after{content:"6"}.ms-loyalty-start.ms-loyalty-7:after{content:"7"}.ms-loyalty-start.ms-loyalty-8:after{content:"8"}.ms-loyalty-start.ms-loyalty-9:after{content:"9"}.ms-loyalty-start.ms-loyalty-10:after{content:"10"}.ms-loyalty-start.ms-loyalty-x:after{content:"X"}.ms-loyalty-down:after{line-height:1.6em}.ms-loyalty-down.ms-loyalty-1:after{content:"-1"}.ms-loyalty-down.ms-loyalty-2:after{content:"-2"}.ms-loyalty-down.ms-loyalty-3:after{content:"-3"}.ms-loyalty-down.ms-loyalty-4:after{content:"-4"}.ms-loyalty-down.ms-loyalty-5:after{content:"-5"}.ms-loyalty-down.ms-loyalty-6:after{content:"-6"}.ms-loyalty-down.ms-loyalty-7:after{content:"-7"}.ms-loyalty-down.ms-loyalty-8:after{content:"-8"}.ms-loyalty-down.ms-loyalty-9:after{content:"-9"}.ms-loyalty-down.ms-loyalty-10:after{content:"-10"}.ms-loyalty-down.ms-loyalty-x:after{content:"-X"}.ms-dfc{color:#111;border:.05em solid #111;border-radius:2em;padding:.025em}.ms-2x{font-size:1.75em}.ms-3x{font-size:2.25em}.ms-4x{font-size:3em}.ms-5x{font-size:3.75em}.ms-6x{font-size:4.5em}.ms-w:before{content:"\e600"}.ms-u:before{content:"\e601"}.ms-b:before{content:"\e602"}.ms-r:before{content:"\e603"}.ms-g:before{content:"\e604"}.ms-0:before{content:"\e605"}.ms-1:before{content:"\e606"}.ms-2:before{content:"\e607"}.ms-3:before{content:"\e608"}.ms-4:before{content:"\e609"}.ms-5:before{content:"\e60a"}.ms-6:before{content:"\e60b"}.ms-7:before{content:"\e60c"}.ms-8:before{content:"\e60d"}.ms-9:before{content:"\e60e"}.ms-10:before{content:"\e60f"}.ms-11:before{content:"\e610"}.ms-12:before{content:"\e611"}.ms-13:before{content:"\e612"}.ms-14:before{content:"\e613"}.ms-15:before{content:"\e614"}.ms-16:before{content:"\e62a"}.ms-17:before{content:"\e62b"}.ms-18:before{content:"\e62c"}.ms-19:before{content:"\e62d"}.ms-20:before{content:"\e62e"}.ms-x:before{content:"\e615"}.ms-y:before{content:"\e616"}.ms-z:before{content:"\e617"}.ms-bp:before,.ms-gp:before,.ms-p:before,.ms-rp:before,.ms-up:before,.ms-wp:before{content:"\e618"}.ms-s:before{content:"\e619"}.ms-c:before{content:"\e904"}.ms-tap:before{content:"\e61a"}.ms-untap:before{content:"\e61b"}.ms-tap-alt:before{content:"\e61c"}.ms-chaos:before{content:"\e61d"}.ms-1-2:before{content:"\e902"}.ms-infinity:before{content:"\e903"}.ms-artifact:before{content:"\e61e"}.ms-creature:before{content:"\e61f"}.ms-enchantment:before{content:"\e620"}.ms-instant:before{content:"\e621"}.ms-land:before{content:"\e622"}.ms-planeswalker:before{content:"\e623"}.ms-sorcery:before{content:"\e624"}.ms-2w:after,.ms-gw:after,.ms-rw:after,.ms-wb:before,.ms-wu:before{content:"\e600"}.ms-2u:after,.ms-gu:after,.ms-ub:before,.ms-ur:before,.ms-wu:after{content:"\e601"}.ms-2b:after,.ms-bg:before,.ms-br:before,.ms-ub:after,.ms-wb:after{content:"\e602"}.ms-2r:after,.ms-br:after,.ms-rg:before,.ms-rw:before,.ms-ur:after{content:"\e603"}.ms-2g:after,.ms-bg:after,.ms-gu:before,.ms-gw:before,.ms-rg:after{content:"\e604"}.ms-2b:before,.ms-2g:before,.ms-2r:before,.ms-2u:before,.ms-2w:before{content:"\e607"}.ms-100:before{content:"\e900"}.ms-100000:before{content:"\e901"}.ms-loyalty-up:before{content:"\e627"}.ms-loyalty-down:before{content:"\e625"}.ms-loyalty-zero:before{content:"\e626"}.ms-loyalty-start:before{content:"\e628"}.ms-flashback:before{content:"\e629"}.ms-dfc-night:before{content:"\e905"}.ms-dfc-day:before{content:"\e906"} \ No newline at end of file diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/ranger.py b/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/ranger.py deleted file mode 100644 index 9442fd10d42fcc19f4e0dd798d1573b31ed2c0a0..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/ranger.py +++ /dev/null @@ -1,164 +0,0 @@ -# Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer. - -# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer -# and/or -# https://github.com/lessw2020/Best-Deep-Learning-Optimizers - -# Ranger has now been used to capture 12 records on the FastAI leaderboard. - -# This version = 20.4.11 - -# Credits: -# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization -# RAdam --> https://github.com/LiyuanLucasLiu/RAdam -# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code. -# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610 - -# summary of changes: -# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init. -# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights), -# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues. -# changes 8/31/19 - fix references to *self*.N_sma_threshold; -# changed eps to 1e-5 as better default than 1e-8. - -import math -import torch -from torch.optim.optimizer import Optimizer - - -class Ranger(Optimizer): - - def __init__(self, params, lr=1e-3, # lr - alpha=0.5, k=6, N_sma_threshhold=5, # Ranger configs - betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam configs - use_gc=True, gc_conv_only=False - # Gradient centralization on or off, applied to conv layers only or conv + fc layers - ): - - # parameter checks - if not 0.0 <= alpha <= 1.0: - raise ValueError(f'Invalid slow update rate: {alpha}') - if not 1 <= k: - raise ValueError(f'Invalid lookahead steps: {k}') - if not lr > 0: - raise ValueError(f'Invalid Learning Rate: {lr}') - if not eps > 0: - raise ValueError(f'Invalid eps: {eps}') - - # parameter comments: - # beta1 (momentum) of .95 seems to work better than .90... - # N_sma_threshold of 5 seems better in testing than 4. - # In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you. - - # prep defaults and init torch.optim base - defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, - eps=eps, weight_decay=weight_decay) - super().__init__(params, defaults) - - # adjustable threshold - self.N_sma_threshhold = N_sma_threshhold - - # look ahead params - - self.alpha = alpha - self.k = k - - # radam buffer for state - self.radam_buffer = [[None, None, None] for ind in range(10)] - - # gc on or off - self.use_gc = use_gc - - # level of gradient centralization - self.gc_gradient_threshold = 3 if gc_conv_only else 1 - - def __setstate__(self, state): - super(Ranger, self).__setstate__(state) - - def step(self, closure=None): - loss = None - - # Evaluate averages and grad, update param tensors - for group in self.param_groups: - - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data.float() - - if grad.is_sparse: - raise RuntimeError('Ranger optimizer does not support sparse gradients') - - p_data_fp32 = p.data.float() - - state = self.state[p] # get state dict for this param - - if len(state) == 0: # if first time to run...init dictionary with our desired entries - # if self.first_run_check==0: - # self.first_run_check=1 - # print("Initializing slow buffer...should not see this at load from saved model!") - state['step'] = 0 - state['exp_avg'] = torch.zeros_like(p_data_fp32) - state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) - - # look ahead weight storage now in state dict - state['slow_buffer'] = torch.empty_like(p.data) - state['slow_buffer'].copy_(p.data) - - else: - state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) - state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) - - # begin computations - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - # GC operation for Conv layers and FC layers - if grad.dim() > self.gc_gradient_threshold: - grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True)) - - state['step'] += 1 - - # compute variance mov avg - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - # compute mean moving avg - exp_avg.mul_(beta1).add_(1 - beta1, grad) - - buffered = self.radam_buffer[int(state['step'] % 10)] - - if state['step'] == buffered[0]: - N_sma, step_size = buffered[1], buffered[2] - else: - buffered[0] = state['step'] - beta2_t = beta2 ** state['step'] - N_sma_max = 2 / (1 - beta2) - 1 - N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) - buffered[1] = N_sma - if N_sma > self.N_sma_threshhold: - step_size = math.sqrt( - (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( - N_sma_max - 2)) / (1 - beta1 ** state['step']) - else: - step_size = 1.0 / (1 - beta1 ** state['step']) - buffered[2] = step_size - - if group['weight_decay'] != 0: - p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) - - # apply lr - if N_sma > self.N_sma_threshhold: - denom = exp_avg_sq.sqrt().add_(group['eps']) - p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) - else: - p_data_fp32.add_(-step_size * group['lr'], exp_avg) - - p.data.copy_(p_data_fp32) - - # integrated look ahead... - # we do it at the param level instead of group level - if state['step'] % group['k'] == 0: - slow_p = state['slow_buffer'] # get access to slow param tensor - slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha - p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor - - return loss \ No newline at end of file diff --git a/spaces/haakohu/deep_privacy2/gradio_demos/body_cse.py b/spaces/haakohu/deep_privacy2/gradio_demos/body_cse.py deleted file mode 100644 index 2ddf1859352841539fdf5c4b6e9d0f098264208d..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/gradio_demos/body_cse.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio -from dp2 import utils -from tops.config import instantiate -import gradio.inputs -from gradio_demos.modules import ExampleDemo, WebcamDemo - - -cfg_body = utils.load_config("configs/anonymizers/FB_cse.py") -anonymizer_body = instantiate(cfg_body.anonymizer, load_cache=False) -anonymizer_body.initialize_tracker(fps=1) - - -with gradio.Blocks() as demo: - gradio.Markdown("# <center> DeepPrivacy2 - Realistic Image Anonymization </center>") - gradio.Markdown("### <center> Håkon Hukkelås, Rudolf Mester, Frank Lindseth </center>") - with gradio.Tab("Full-Body CSE Anonymization"): - ExampleDemo(anonymizer_body) - with gradio.Tab("Full-body CSE Webcam"): - WebcamDemo(anonymizer_body) - - -demo.launch() - diff --git a/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/bias_act.py b/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/bias_act.py deleted file mode 100644 index 7c39717268055fafe737419486cf96f1f93f4fb5..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/bias_act.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient bias and activation.""" - -import os -import warnings -import numpy as np -import torch -import traceback - -from .. import custom_ops -from easydict import EasyDict -from torch.cuda.amp import custom_bwd, custom_fwd -#---------------------------------------------------------------------------- - -activation_funcs = { - 'linear': EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), - 'relu': EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), - 'lrelu': EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), - 'tanh': EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), - 'sigmoid': EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), - 'elu': EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), - 'selu': EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), - 'softplus': EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), - 'swish': EasyDict(func=lambda x, **_: torch.nn.functional.silu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), -} - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None -enabled = False -_null_tensor = torch.empty([0]) - -def _init(): - global _inited, _plugin - if not _inited: - _inited = True - sources = ['bias_act.cpp', 'bias_act.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc()) - return _plugin is not None - -#---------------------------------------------------------------------------- - -def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): - r"""Fused bias and activation function. - - Adds bias `b` to activation tensor `x`, evaluates activation function `act`, - and scales the result by `gain`. Each of the steps is optional. In most cases, - the fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports first and second order gradients, - but not third order gradients. - - Args: - x: Input activation tensor. Can be of any shape. - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The shape must be known, and it must match the dimension of `x` - corresponding to `dim`. - dim: The dimension in `x` corresponding to the elements of `b`. - The value of `dim` is ignored if `b` is not specified. - act: Name of the activation function to evaluate, or `"linear"` to disable. - Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. - See `activation_funcs` for a full list. `None` is not allowed. - alpha: Shape parameter for the activation function, or `None` to use the default. - gain: Scaling factor for the output tensor, or `None` to use default. - See `activation_funcs` for the default scaling of each activation function. - If unsure, consider specifying 1. - clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable - the clamping (default). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the same shape and datatype as `x`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and enabled and _init(): - return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) - return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) - -#---------------------------------------------------------------------------- - -def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Slow reference implementation of `bias_act()` using standard TensorFlow ops. - """ - assert isinstance(x, torch.Tensor) - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Add bias. - if b is not None: - assert isinstance(b, torch.Tensor) and b.ndim == 1 - assert 0 <= dim < x.ndim - assert b.shape[0] == x.shape[dim] - x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) - - # Evaluate activation function. - alpha = float(alpha) - x = spec.func(x, alpha=alpha) - - # Scale by gain. - gain = float(gain) - if gain != 1: - x = x * gain - - # Clamp. - if clamp >= 0: - x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type - return x - -#---------------------------------------------------------------------------- - -_bias_act_cuda_cache = dict() - -def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Fast CUDA implementation of `bias_act()` using custom ops. - """ - # Parse arguments. - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Lookup from cache. - key = (dim, act, alpha, gain, clamp) - if key in _bias_act_cuda_cache: - return _bias_act_cuda_cache[key] - - # Forward op. - class BiasActCuda(torch.autograd.Function): - @staticmethod - @custom_fwd(cast_inputs=torch.float16) - def forward(ctx, x, b): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format - x = x.contiguous(memory_format=ctx.memory_format) - b = b.contiguous() if b is not None else _null_tensor - y = x - if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: - y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - y if 'y' in spec.ref else _null_tensor) - return y - - @staticmethod - @custom_bwd - def backward(ctx, dy): # pylint: disable=arguments-differ - dy = dy.contiguous(memory_format=ctx.memory_format) - x, b, y = ctx.saved_tensors - dx = None - db = None - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - dx = dy - if act != 'linear' or gain != 1 or clamp >= 0: - dx = BiasActCudaGrad.apply(dy, x, b, y) - - if ctx.needs_input_grad[1]: - db = dx.sum([i for i in range(dx.ndim) if i != dim]) - - return dx, db - - # Backward op. - class BiasActCudaGrad(torch.autograd.Function): - @staticmethod - @custom_fwd(cast_inputs=torch.float16) - def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format - dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - dy if spec.has_2nd_grad else _null_tensor, - x, b, y) - return dx - - @staticmethod - @custom_bwd - def backward(ctx, d_dx): # pylint: disable=arguments-differ - d_dx = d_dx.contiguous(memory_format=ctx.memory_format) - dy, x, b, y = ctx.saved_tensors - d_dy = None - d_x = None - d_b = None - d_y = None - - if ctx.needs_input_grad[0]: - d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) - - if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): - d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) - - if spec.has_2nd_grad and ctx.needs_input_grad[2]: - d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) - - return d_dy, d_x, d_b, d_y - - # Add to cache. - _bias_act_cuda_cache[key] = BiasActCuda - return BiasActCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/dev/packaging/build_wheel.sh b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/dev/packaging/build_wheel.sh deleted file mode 100644 index bc80b56c6717edca7f11fe51de261beed7902514..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/dev/packaging/build_wheel.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -set -ex - -ldconfig # https://github.com/NVIDIA/nvidia-docker/issues/854 - -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -. "$script_dir/pkg_helpers.bash" - -echo "Build Settings:" -echo "CU_VERSION: $CU_VERSION" # e.g. cu101 -echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" -echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6 -echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 - -setup_cuda -setup_wheel_python -yum install ninja-build -y && ln -sv /usr/bin/ninja-build /usr/bin/ninja - -export TORCH_VERSION_SUFFIX="+$CU_VERSION" -if [[ "$CU_VERSION" == "cu102" ]]; then - export TORCH_VERSION_SUFFIX="" -fi -pip_install pip numpy -U -pip_install "torch==$PYTORCH_VERSION$TORCH_VERSION_SUFFIX" \ - -f https://download.pytorch.org/whl/$CU_VERSION/torch_stable.html - -# use separate directories to allow parallel build -BASE_BUILD_DIR=build/$CU_VERSION/$PYTHON_VERSION -python setup.py \ - build -b $BASE_BUILD_DIR \ - bdist_wheel -b $BASE_BUILD_DIR/build_dist -d wheels/$CU_VERSION diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/global_local_parsing/global_local_evaluate.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/global_local_parsing/global_local_evaluate.py deleted file mode 100644 index 288e3c8214f945d5a4f5fc6824b74b3d42e037b2..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/global_local_parsing/global_local_evaluate.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- - -""" -@Author : Peike Li -@Contact : peike.li@yahoo.com -@File : evaluate.py -@Time : 8/4/19 3:36 PM -@Desc : -@License : This source code is licensed under the license found in the - LICENSE file in the root directory of this source tree. -""" - -import os -import argparse -import numpy as np -import torch - -from torch.utils import data -from tqdm import tqdm -from PIL import Image as PILImage -import torchvision.transforms as transforms -import torch.backends.cudnn as cudnn - -import networks -from utils.miou import compute_mean_ioU -from utils.transforms import BGR2RGB_transform -from utils.transforms import transform_parsing, transform_logits -from mhp_extension.global_local_parsing.global_local_datasets import CropDataValSet - - -def get_arguments(): - """Parse all the arguments provided from the CLI. - - Returns: - A list of parsed arguments. - """ - parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") - - # Network Structure - parser.add_argument("--arch", type=str, default='resnet101') - # Data Preference - parser.add_argument("--data-dir", type=str, default='./data/LIP') - parser.add_argument("--batch-size", type=int, default=1) - parser.add_argument("--split-name", type=str, default='crop_pic') - parser.add_argument("--input-size", type=str, default='473,473') - parser.add_argument("--num-classes", type=int, default=20) - parser.add_argument("--ignore-label", type=int, default=255) - parser.add_argument("--random-mirror", action="store_true") - parser.add_argument("--random-scale", action="store_true") - # Evaluation Preference - parser.add_argument("--log-dir", type=str, default='./log') - parser.add_argument("--model-restore", type=str, default='./log/checkpoint.pth.tar') - parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") - parser.add_argument("--save-results", action="store_true", help="whether to save the results.") - parser.add_argument("--flip", action="store_true", help="random flip during the test.") - parser.add_argument("--multi-scales", type=str, default='1', help="multiple scales during the test") - return parser.parse_args() - - -def get_palette(num_cls): - """ Returns the color map for visualizing the segmentation mask. - Args: - num_cls: Number of classes - Returns: - The color map - """ - n = num_cls - palette = [0] * (n * 3) - for j in range(0, n): - lab = j - palette[j * 3 + 0] = 0 - palette[j * 3 + 1] = 0 - palette[j * 3 + 2] = 0 - i = 0 - while lab: - palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) - palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) - palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) - i += 1 - lab >>= 3 - return palette - - -def multi_scale_testing(model, batch_input_im, crop_size=[473, 473], flip=True, multi_scales=[1]): - flipped_idx = (15, 14, 17, 16, 19, 18) - if len(batch_input_im.shape) > 4: - batch_input_im = batch_input_im.squeeze() - if len(batch_input_im.shape) == 3: - batch_input_im = batch_input_im.unsqueeze(0) - - interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True) - ms_outputs = [] - for s in multi_scales: - interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True) - scaled_im = interp_im(batch_input_im) - parsing_output = model(scaled_im) - parsing_output = parsing_output[0][-1] - output = parsing_output[0] - if flip: - flipped_output = parsing_output[1] - flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :] - output += flipped_output.flip(dims=[-1]) - output *= 0.5 - output = interp(output.unsqueeze(0)) - ms_outputs.append(output[0]) - ms_fused_parsing_output = torch.stack(ms_outputs) - ms_fused_parsing_output = ms_fused_parsing_output.mean(0) - ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0) # HWC - parsing = torch.argmax(ms_fused_parsing_output, dim=2) - parsing = parsing.data.cpu().numpy() - ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy() - return parsing, ms_fused_parsing_output - - -def main(): - """Create the model and start the evaluation process.""" - args = get_arguments() - multi_scales = [float(i) for i in args.multi_scales.split(',')] - gpus = [int(i) for i in args.gpu.split(',')] - assert len(gpus) == 1 - if not args.gpu == 'None': - os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu - - cudnn.benchmark = True - cudnn.enabled = True - - h, w = map(int, args.input_size.split(',')) - input_size = [h, w] - - model = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=None) - - IMAGE_MEAN = model.mean - IMAGE_STD = model.std - INPUT_SPACE = model.input_space - print('image mean: {}'.format(IMAGE_MEAN)) - print('image std: {}'.format(IMAGE_STD)) - print('input space:{}'.format(INPUT_SPACE)) - if INPUT_SPACE == 'BGR': - print('BGR Transformation') - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=IMAGE_MEAN, - std=IMAGE_STD), - - ]) - if INPUT_SPACE == 'RGB': - print('RGB Transformation') - transform = transforms.Compose([ - transforms.ToTensor(), - BGR2RGB_transform(), - transforms.Normalize(mean=IMAGE_MEAN, - std=IMAGE_STD), - ]) - - # Data loader - lip_test_dataset = CropDataValSet(args.data_dir, args.split_name, crop_size=input_size, transform=transform, - flip=args.flip) - num_samples = len(lip_test_dataset) - print('Totoal testing sample numbers: {}'.format(num_samples)) - testloader = data.DataLoader(lip_test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True) - - # Load model weight - state_dict = torch.load(args.model_restore) - from collections import OrderedDict - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - name = k[7:] # remove `module.` - new_state_dict[name] = v - model.load_state_dict(new_state_dict) - model.cuda() - model.eval() - - sp_results_dir = os.path.join(args.log_dir, args.split_name + '_parsing') - if not os.path.exists(sp_results_dir): - os.makedirs(sp_results_dir) - - palette = get_palette(20) - parsing_preds = [] - scales = np.zeros((num_samples, 2), dtype=np.float32) - centers = np.zeros((num_samples, 2), dtype=np.int32) - with torch.no_grad(): - for idx, batch in enumerate(tqdm(testloader)): - image, meta = batch - if (len(image.shape) > 4): - image = image.squeeze() - im_name = meta['name'][0] - c = meta['center'].numpy()[0] - s = meta['scale'].numpy()[0] - w = meta['width'].numpy()[0] - h = meta['height'].numpy()[0] - scales[idx, :] = s - centers[idx, :] = c - parsing, logits = multi_scale_testing(model, image.cuda(), crop_size=input_size, flip=args.flip, - multi_scales=multi_scales) - if args.save_results: - parsing_result = transform_parsing(parsing, c, s, w, h, input_size) - parsing_result_path = os.path.join(sp_results_dir, im_name + '.png') - output_im = PILImage.fromarray(np.asarray(parsing_result, dtype=np.uint8)) - output_im.putpalette(palette) - output_im.save(parsing_result_path) - # save logits - logits_result = transform_logits(logits, c, s, w, h, input_size) - logits_result_path = os.path.join(sp_results_dir, im_name + '.npy') - np.save(logits_result_path, logits_result) - return - - -if __name__ == '__main__': - main() diff --git a/spaces/hdhzk/bingo/src/components/ui/sheet.tsx b/spaces/hdhzk/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/hdhzk/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - <SheetPrimitive.Portal - className={cn('fixed inset-0 z-50 flex', className)} - {...props} - > - {children} - </SheetPrimitive.Portal> -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef<typeof SheetPrimitive.Overlay>, - React.ComponentPropsWithoutRef<typeof SheetPrimitive.Overlay> ->(({ className, children, ...props }, ref) => ( - <SheetPrimitive.Overlay - className={cn( - 'fixed inset-0 z-50 transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in', - className - )} - {...props} - ref={ref} - /> -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef<typeof SheetPrimitive.Content>, - React.ComponentPropsWithoutRef<typeof SheetPrimitive.Content> ->(({ className, children, ...props }, ref) => ( - <SheetPortal> - <SheetPrimitive.Content - ref={ref} - className={cn( - 'fixed inset-y-0 left-0 z-50 h-full border-r bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left data-[state=closed]:duration-300 data-[state=open]:duration-500 sm:max-w-sm', - className - )} - {...props} - > - {children} - <SheetPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-secondary"> - <IconClose /> - <span className="sr-only">Close</span> - </SheetPrimitive.Close> - </SheetPrimitive.Content> - </SheetPortal> -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes<HTMLDivElement>) => ( - <div className={cn('flex flex-col space-y-2', className)} {...props} /> -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes<HTMLDivElement>) => ( - <div - className={cn( - 'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2', - className - )} - {...props} - /> -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef<typeof SheetPrimitive.Title>, - React.ComponentPropsWithoutRef<typeof SheetPrimitive.Title> ->(({ className, ...props }, ref) => ( - <SheetPrimitive.Title - ref={ref} - className={cn('text-lg font-semibold text-foreground', className)} - {...props} - /> -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef<typeof SheetPrimitive.Description>, - React.ComponentPropsWithoutRef<typeof SheetPrimitive.Description> ->(({ className, ...props }, ref) => ( - <SheetPrimitive.Description - ref={ref} - className={cn('text-sm text-muted-foreground', className)} - {...props} - /> -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/LINK.md b/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/LINK.md deleted file mode 100644 index 97e11e59339d5a69bb6a248c8787baba7f328c9f..0000000000000000000000000000000000000000 --- a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/LINK.md +++ /dev/null @@ -1,103 +0,0 @@ -## Psp Crack 6.61 Download - - - - - - ![Psp Crack 6.61 Download - - - - - -<p><b>LINK ---> \[https://conttooperting.blogspot.com/?l=2txRTC\](https://conttooperting.blogspot.com/?l=2txRTC)</b></p> - - - - - - - - - - - - - - - - - - - -<h1>How to Download and Install PSP Software Version 6.61</h1> -<p>If you have an old PSP 1000 and want to play games through the memory card, you will need to update your firmware to version 6.61 and install a custom firmware (CFW). This will allow you to run homebrew applications and games, as well as emulators for other consoles. In this article, we will show you how to download and install PSP software version 6.61 safely and easily.</p> -<h2>Step 1: Download PSP Software Version 6.61</h2> -<p>The first step is to download the official PSP software version 6.61 from Sony's website. You can find the link here\[^1^\]. Make sure you choose the right region for your PSP model. The file size is about 31 MB and it is a ZIP file.</p> -<h2>Step 2: Extract and Copy the Update File</h2> -<p>After downloading the ZIP file, you need to extract it using a program like WinRAR or 7-Zip. You will get a folder called ](https://wololo.net/wagic/wp-content/uploads/2016/02/IMG_0479.jpg) - -To copy the update file, you need to connect your PSP to your computer using a USB cable. Then, go to Settings > USB Connection on your PSP and press X to enter USB mode. Your computer should recognize your PSP as a removable drive. - - - -Open the drive and look for a folder called "PSP". If you don't have one, create it. Inside the "PSP" folder, look for another folder called "GAME". If you don't have one, create it too. Inside the "GAME" folder, paste the "UPDATE" folder that you extracted earlier. Make sure the path is like this: PSP/GAME/UPDATE/EBOOT.PBP. - - - -## Step 3: Run the Update File - - - -Now that you have copied the update file to your PSP memory card, you can disconnect your PSP from your computer and exit USB mode. Then, go to Game > Memory Stick on your PSP and look for an icon that says "PSP Update ver 6.61". Press X to run it. - - - -The update process will start and you will see a progress bar on your screen. Do not turn off your PSP or remove the memory card while the update is running. Wait until it finishes and your PSP will restart automatically. - - - -Congratulations! You have successfully updated your PSP firmware to version 6.61. You can check it by going to Settings > System Settings > System Information on your PSP. - - - -## Step 4: Install Custom Firmware - - - -The next step is to install a custom firmware (CFW) on your PSP. This will allow you to run homebrew applications and games, as well as emulators for other consoles. There are many CFWs available for PSP, but we recommend using Infinity PRO CFW, which is compatible with all PSP models and versions. - - - -To install Infinity PRO CFW, you will need to download two files: Infinity Firmware Builder and Infinity PRO CFW Installer. You can find the links here . The files are ZIP files that contain folders with files inside them. - - - -You need to extract both ZIP files and copy their folders to your PSP memory card, just like you did with the update file in step 2. Make sure the paths are like this: PSP/GAME/INFINITY/EBOOT.PBP and PSP/GAME/PROUPDATE/EBOOT.PBP. - - - -## Step 5: Run Infinity Firmware Builder - - - -After copying both folders to your PSP memory card, you can disconnect your PSP from your computer and exit USB mode. Then, go to Game > Memory Stick on your PSP and look for an icon that says "Infinity Firmware Builder". Press X to run it. - - - -The program will ask you to press X to start building the custom firmware. Do so and wait until it finishes. It will create a file called "660.PBP" in your memory card root directory. - - - -## Step 6: Run Infinity PRO CFW Installer - - - -The next step is to run Infinity PRO CFW Installer on your PSP. Go back to Game > Memory Stick on your PSP and look for an - - dfd1c89656 - - - - - diff --git a/spaces/hirol/controlnetOverMask/script.js b/spaces/hirol/controlnetOverMask/script.js deleted file mode 100644 index 1b9a443f9d75d5cbd474735fcf15544f253f68a0..0000000000000000000000000000000000000000 --- a/spaces/hirol/controlnetOverMask/script.js +++ /dev/null @@ -1,104 +0,0 @@ -function gradioApp() { - const elems = document.getElementsByTagName('gradio-app') - const elem = elems.length == 0 ? document : elems[0] - - if (elem !== document) elem.getElementById = function(id){ return document.getElementById(id) } - return elem.shadowRoot ? elem.shadowRoot : elem -} - -function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs button:not(.border-transparent)') -} - -function get_uiCurrentTabContent() { - return gradioApp().querySelector('.tabitem[id^=tab_]:not([style*="display: none"])') -} - -uiUpdateCallbacks = [] -uiLoadedCallbacks = [] -uiTabChangeCallbacks = [] -optionsChangedCallbacks = [] -let uiCurrentTab = null - -function onUiUpdate(callback){ - uiUpdateCallbacks.push(callback) -} -function onUiLoaded(callback){ - uiLoadedCallbacks.push(callback) -} -function onUiTabChange(callback){ - uiTabChangeCallbacks.push(callback) -} -function onOptionsChanged(callback){ - optionsChangedCallbacks.push(callback) -} - -function runCallback(x, m){ - try { - x(m) - } catch (e) { - (console.error || console.log).call(console, e.message, e); - } -} -function executeCallbacks(queue, m) { - queue.forEach(function(x){runCallback(x, m)}) -} - -var executedOnLoaded = false; - -document.addEventListener("DOMContentLoaded", function() { - var mutationObserver = new MutationObserver(function(m){ - if(!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')){ - executedOnLoaded = true; - executeCallbacks(uiLoadedCallbacks); - } - - executeCallbacks(uiUpdateCallbacks, m); - const newTab = get_uiCurrentTab(); - if ( newTab && ( newTab !== uiCurrentTab ) ) { - uiCurrentTab = newTab; - executeCallbacks(uiTabChangeCallbacks); - } - }); - mutationObserver.observe( gradioApp(), { childList:true, subtree:true }) -}); - -/** - * Add a ctrl+enter as a shortcut to start a generation - */ -document.addEventListener('keydown', function(e) { - var handled = false; - if (e.key !== undefined) { - if((e.key == "Enter" && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; - } else if (e.keyCode !== undefined) { - if((e.keyCode == 13 && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; - } - if (handled) { - button = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); - if (button) { - button.click(); - } - e.preventDefault(); - } -}) - -/** - * checks that a UI element is not in another hidden element or tab content - */ -function uiElementIsVisible(el) { - let isVisible = !el.closest('.\\!hidden'); - if ( ! isVisible ) { - return false; - } - - while( isVisible = el.closest('.tabitem')?.style.display !== 'none' ) { - if ( ! isVisible ) { - return false; - } else if ( el.parentElement ) { - el = el.parentElement - } else { - break; - } - } - return isVisible; -} diff --git a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/indexes/__init__.py b/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/indexes/__init__.py deleted file mode 100644 index 462ceedd7df4aadc27a2527f9b6613077f17f331..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/indexes/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from streamlit_langchain_chat.customized_langchain.indexes.graph import GraphIndexCreator -# from streamlit_langchain_chat.customized_langchain.vectorstore import VectorstoreIndexCreator - -__all__ = [ - "GraphIndexCreator", - # "VectorstoreIndexCreator" -] \ No newline at end of file diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/summarize_plans.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/summarize_plans.py deleted file mode 100644 index 1c58c39f42a070f26bb8ee651a6c664c6d73553e..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/summarize_plans.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from batchgenerators.utilities.file_and_folder_operations import * -from nnunet.paths import preprocessing_output_dir - - -# This file is intended to double check nnUNets design choices. It is intended to be used for developent purposes only -def summarize_plans(file): - plans = load_pickle(file) - print("num_classes: ", plans['num_classes']) - print("modalities: ", plans['modalities']) - print("use_mask_for_norm", plans['use_mask_for_norm']) - print("keep_only_largest_region", plans['keep_only_largest_region']) - print("min_region_size_per_class", plans['min_region_size_per_class']) - print("min_size_per_class", plans['min_size_per_class']) - print("normalization_schemes", plans['normalization_schemes']) - print("stages...\n") - - for i in range(len(plans['plans_per_stage'])): - print("stage: ", i) - print(plans['plans_per_stage'][i]) - print("") - - -def write_plans_to_file(f, plans_file): - print(plans_file) - a = load_pickle(plans_file) - stages = list(a['plans_per_stage'].keys()) - stages.sort() - for stage in stages: - patch_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['patch_size'], - a['plans_per_stage'][stages[stage]]['current_spacing'])] - median_patient_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels'], - a['plans_per_stage'][stages[stage]]['current_spacing'])] - f.write(plans_file.split("/")[-2]) - f.write(";%s" % plans_file.split("/")[-1]) - f.write(";%d" % stage) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['batch_size'])) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['num_pool_per_axis'])) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['patch_size'])) - f.write(";%s" % str([str("%03.2f" % i) for i in patch_size_in_mm])) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels'])) - f.write(";%s" % str([str("%03.2f" % i) for i in median_patient_size_in_mm])) - f.write(";%s" % str([str("%03.2f" % i) for i in a['plans_per_stage'][stages[stage]]['current_spacing']])) - f.write(";%s" % str([str("%03.2f" % i) for i in a['plans_per_stage'][stages[stage]]['original_spacing']])) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['pool_op_kernel_sizes'])) - f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['conv_kernel_sizes'])) - f.write(";%s" % str(a['data_identifier'])) - f.write("\n") - - -if __name__ == "__main__": - base_dir = './'#preprocessing_output_dir'' - task_dirs = [i for i in subdirs(base_dir, join=False, prefix="Task") if i.find("BrainTumor") == -1 and i.find("MSSeg") == -1] - print("found %d tasks" % len(task_dirs)) - - with open("2019_02_06_plans_summary.csv", 'w') as f: - f.write("task;plans_file;stage;batch_size;num_pool_per_axis;patch_size;patch_size(mm);median_patient_size_in_voxels;median_patient_size_in_mm;current_spacing;original_spacing;pool_op_kernel_sizes;conv_kernel_sizes\n") - for t in task_dirs: - print(t) - tmp = join(base_dir, t) - plans_files = [i for i in subfiles(tmp, suffix=".pkl", join=False) if i.find("_plans_") != -1 and i.find("Dgx2") == -1] - for p in plans_files: - write_plans_to_file(f, join(tmp, p)) - f.write("\n") - - diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/generate_zone_boundaries.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/generate_zone_boundaries.py deleted file mode 100644 index 99b907c6284e41f001b3689f8fe8652b7e7f0305..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/utils_new/generate_zone_boundaries.py +++ /dev/null @@ -1,58 +0,0 @@ -from skimage import io -import numpy as np -from batchgenerators.utilities.file_and_folder_operations import * -import argparse - -def extract_boundaries(sample_path): - zone_orig = io.imread(sample_path) - - zone_pad = np.pad(zone_orig, ((1,1),(1,1)), 'reflect') - zone_b = np.pad(zone_orig, ((2,0),(1,1)), "reflect") - zone_t = np.pad(zone_orig, ((0,2),(1,1)), "reflect") - zone_r = np.pad(zone_orig, ((1,1),(0,2)), "reflect") - zone_l = np.pad(zone_orig, ((1,1),(2,0)), "reflect") - - boundaries = np.zeros_like(zone_pad) - isboundary = np.logical_or.reduce((zone_pad != zone_b, zone_pad != zone_t, zone_pad != zone_r, zone_pad != zone_l)) - boundaries[np.logical_and(zone_pad == 127, isboundary)] = 255 - - return boundaries[1:-1, 1:-1] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("-data_path", - help="percentage of the dataset used for training validation and test") - args = parser.parse_args() - data_path = args.data_path - output_path = join(data_path, 'boundaries') - - train_data_path = join(data_path, 'zones', 'train') - test_data_path = join(data_path,'zones', 'test') - - train_output_path = join(output_path, 'train') - test_output_path = join(output_path, 'test') - - maybe_mkdir_p(output_path) - maybe_mkdir_p(train_output_path) - maybe_mkdir_p(test_output_path) - - # Train - for train_file in os.listdir(train_data_path): - print(train_file) - # load image - sample_path = join(train_data_path, train_file) - boundary = extract_boundaries(sample_path) - output_file = train_file[:-len('zones.png')] + 'boundary.png' - output_path = join(train_output_path, output_file) - io.imsave(output_path, boundary) - - # Test - for test_file in os.listdir(test_data_path): - print(test_file) - # load image - sample_path = join(test_data_path, test_file) - boundary = extract_boundaries(sample_path) - output_file = test_file[:-len('zones.png')]+ 'boundary.png' - output_path = join(test_output_path, output_file) - io.imsave(output_path, boundary) \ No newline at end of file diff --git a/spaces/huggingface-timeseries/time-series-score/src/score.py b/spaces/huggingface-timeseries/time-series-score/src/score.py deleted file mode 100644 index 624dd0f7bd29f65916e182eb13b3ba29c2dd2a2f..0000000000000000000000000000000000000000 --- a/spaces/huggingface-timeseries/time-series-score/src/score.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List - -from gluonts.dataset.split import split -from gluonts.dataset.common import Dataset -from gluonts.model.forecast import Forecast -from gluonts.evaluation.backtest import _to_dataframe, Evaluator - - -def score_predictions( - dataset: Dataset, - predictions: List[Forecast], - prediction_length: int, - seasonality: int, -): - _, test_template = split(dataset, offset=-prediction_length) - test_data = test_template.generate_instances(prediction_length) - ts_iterator = map(_to_dataframe, test_data) - evaluator = Evaluator( - quantiles=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], seasonality=seasonality - ) - metrics, _ = evaluator(ts_iterator=ts_iterator, fcst_iterator=predictions) - return metrics diff --git a/spaces/hysts/Shap-E/app_text_to_3d.py b/spaces/hysts/Shap-E/app_text_to_3d.py deleted file mode 100644 index b45adfb68e60d53b652c7369dc4631f70bfc70f3..0000000000000000000000000000000000000000 --- a/spaces/hysts/Shap-E/app_text_to_3d.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr -import spaces - -from model import Model -from settings import CACHE_EXAMPLES, MAX_SEED -from utils import randomize_seed_fn - - -def create_demo(model: Model) -> gr.Blocks: - examples = [ - "A chair that looks like an avocado", - "An airplane that looks like a banana", - "A spaceship", - "A birthday cupcake", - "A chair that looks like a tree", - "A green boot", - "A penguin", - "Ube ice cream cone", - "A bowl of vegetables", - ] - - @spaces.GPU - def process_example_fn(prompt: str) -> str: - return model.run_text(prompt) - - @spaces.GPU - def run(prompt: str, seed: int, guidance_scale: float, num_inference_steps: int) -> str: - return model.run_text(prompt, seed, guidance_scale, num_inference_steps) - - with gr.Blocks() as demo: - with gr.Group(): - with gr.Row(elem_id="prompt-container"): - prompt = gr.Text( - label="Prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - container=False, - ) - run_button = gr.Button("Run", scale=0) - result = gr.Model3D(label="Result", show_label=False) - with gr.Accordion("Advanced options", open=False): - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=MAX_SEED, - step=1, - value=0, - ) - randomize_seed = gr.Checkbox(label="Randomize seed", value=True) - guidance_scale = gr.Slider( - label="Guidance scale", - minimum=1, - maximum=20, - step=0.1, - value=15.0, - ) - num_inference_steps = gr.Slider( - label="Number of inference steps", - minimum=2, - maximum=100, - step=1, - value=64, - ) - - gr.Examples( - examples=examples, - inputs=prompt, - outputs=result, - fn=process_example_fn, - cache_examples=CACHE_EXAMPLES, - ) - - gr.on( - triggers=[prompt.submit, run_button.click], - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - api_name=False, - concurrency_limit=None, - ).then( - fn=run, - inputs=[ - prompt, - seed, - guidance_scale, - num_inference_steps, - ], - outputs=result, - api_name="text-to-3d", - concurrency_id="gpu", - concurrency_limit=1, - ) - return demo diff --git a/spaces/hzy123/bingo/src/components/providers.tsx b/spaces/hzy123/bingo/src/components/providers.tsx deleted file mode 100644 index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/src/components/providers.tsx +++ /dev/null @@ -1,15 +0,0 @@ -'use client' - -import * as React from 'react' -import { ThemeProvider as NextThemesProvider } from 'next-themes' -import { ThemeProviderProps } from 'next-themes/dist/types' - -import { TooltipProvider } from '@/components/ui/tooltip' - -export function Providers({ children, ...props }: ThemeProviderProps) { - return ( - <NextThemesProvider {...props}> - <TooltipProvider>{children}</TooltipProvider> - </NextThemesProvider> - ) -} diff --git a/spaces/iSky/Speech-audio-to-text-with-grammar-correction/app.py b/spaces/iSky/Speech-audio-to-text-with-grammar-correction/app.py deleted file mode 100644 index 69d566ae24663a52db8aad34470a1a4d0534d8ca..0000000000000000000000000000000000000000 --- a/spaces/iSky/Speech-audio-to-text-with-grammar-correction/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr - -from transformers import pipeline - -s2t=gr.Interface.load('huggingface/facebook/s2t-medium-librispeech-asr') - -grammar = gr.Interface.load('huggingface/prithivida/grammar_error_correcter_v1') - - -def out(audio1,audio2): - if (audio1==None) and (audio2==None): - return "no audio","no audio" - elif audio1==None: - - x=s2t(audio2) - return x, grammar(x) - else: - x=s2t(audio1) - return x, grammar(x) - -iface = gr.Interface( - fn=out, - title="Speech Audio to text (with corrected grammar)", - description="2 possible input methods of audio file, transformed to text (as an output) and corrected grammar after(another output)!", - inputs=[gr.inputs.Audio(source="upload", type="filepath", label=None, optional=True), - gr.inputs.Audio(source="microphone", type="filepath", label=None, optional=True)], - examples=[["Grammar-Correct-Sample.mp3"], ["Grammar-Wrong-Sample.mp3"],], - outputs=['text','text'] -) - -iface.launch(enable_queue=True,show_error=True) \ No newline at end of file diff --git a/spaces/imseldrith/BookTODataset/README.md b/spaces/imseldrith/BookTODataset/README.md deleted file mode 100644 index 12882d21ac53d170d2719c5e9f61dddf5fa717cd..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/BookTODataset/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: BookTODataset -emoji: 🐠 -colorFrom: green -colorTo: red -sdk: streamlit -#sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/benchmark.py b/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/benchmark.py deleted file mode 100644 index 450cdd0dc82cf74fa203698b66b8860d913917a8..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/benchmark.py +++ /dev/null @@ -1,116 +0,0 @@ -from typing import Any, Optional, List -import time -import tempfile -import statistics -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame_total -from DeepFakeAI.core import conditional_process -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import normalize_output_path, clear_temp - -BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None -BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None -BENCHMARK_START_BUTTON : Optional[gradio.Button] = None -BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None - - -def render() -> None: - global BENCHMARK_RESULT_DATAFRAME - global BENCHMARK_CYCLES_SLIDER - global BENCHMARK_START_BUTTON - global BENCHMARK_CLEAR_BUTTON - - with gradio.Box(): - BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe( - label = wording.get('benchmark_result_dataframe_label'), - headers = - [ - 'target_path', - 'benchmark_cycles', - 'average_run', - 'fastest_run', - 'slowest_run', - 'relative_fps' - ], - col_count = (6, 'fixed'), - row_count = (7, 'fixed'), - datatype = - [ - 'str', - 'number', - 'number', - 'number', - 'number', - 'number' - ] - ) - BENCHMARK_CYCLES_SLIDER = gradio.Slider( - label = wording.get('benchmark_cycles_slider_label'), - minimum = 1, - step = 1, - value = 3, - maximum = 10 - ) - with gradio.Row(): - BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label')) - BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) - - -def listen() -> None: - BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME) - BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME) - - -def update(benchmark_cycles : int) -> Update: - DeepFakeAI.globals.source_path = '.assets/examples/source.jpg' - target_paths =\ - [ - '.assets/examples/target-240p.mp4', - '.assets/examples/target-360p.mp4', - '.assets/examples/target-540p.mp4', - '.assets/examples/target-720p.mp4', - '.assets/examples/target-1080p.mp4', - '.assets/examples/target-1440p.mp4', - '.assets/examples/target-2160p.mp4' - ] - value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ] - return gradio.update(value = value) - - -def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: - process_times = [] - total_fps = 0.0 - for i in range(benchmark_cycles + 1): - DeepFakeAI.globals.target_path = target_path - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, tempfile.gettempdir()) - video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) - start_time = time.perf_counter() - conditional_process() - end_time = time.perf_counter() - process_time = end_time - start_time - fps = video_frame_total / process_time - if i > 0: - process_times.append(process_time) - total_fps += fps - average_run = round(statistics.mean(process_times), 2) - fastest_run = round(min(process_times), 2) - slowest_run = round(max(process_times), 2) - relative_fps = round(total_fps / benchmark_cycles, 2) - return\ - [ - DeepFakeAI.globals.target_path, - benchmark_cycles, - average_run, - fastest_run, - slowest_run, - relative_fps - ] - - -def clear() -> Update: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - return gradio.update(value = None) diff --git a/spaces/inamXcontru/PoeticTTS/Autocad 2015 activation code free download crack for 81 Tips and tricks to bypass Autodesk 2015 activation process.md b/spaces/inamXcontru/PoeticTTS/Autocad 2015 activation code free download crack for 81 Tips and tricks to bypass Autodesk 2015 activation process.md deleted file mode 100644 index 2c6935afef5553b0657434b4257c41688f23c74b..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Autocad 2015 activation code free download crack for 81 Tips and tricks to bypass Autodesk 2015 activation process.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>autocad 2015 activation code free download crack for 81</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash; <a href="https://gohhs.com/2uz2NT">https://gohhs.com/2uz2NT</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Envisioneer8torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Envisioneer8torrent.md deleted file mode 100644 index b7b851da6172a8a6a84d67a354a2960316985ddb..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Envisioneer8torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>envisioneer8torrent</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://urlin.us/2uEwLG">https://urlin.us/2uEwLG</a></b></p><br /><br /> - -Envisioneer 8 Torrent mirror 1 mirror 2 mirror 3 &. Envisioneer Express 7 7.0.. Please visit the main page of Envisioneer Express 7 on Software ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inreVtussa/clothingai/Examples/Autodata340frtorrent.md b/spaces/inreVtussa/clothingai/Examples/Autodata340frtorrent.md deleted file mode 100644 index 079041fbe1e60622b044cfab34093b5c82590143..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Autodata340frtorrent.md +++ /dev/null @@ -1,8 +0,0 @@ - -<p>XToolBox 2012 Keygen <br> PSX Video Game Download Site <br> Seagate Backup Plus Saver Wi-Fi Desktop Drive MP3 Player Portable HD Screen 7-in-1 <br> DS 4 Online <br> Oculus Rift Core Open Source Driver for Ubuntu Linux <br> Creamy Dynamics Voice of Romance VST Plugin Collection for.8 <br> 5CrackAir 3-14 <br> How to Transfer Between Android and iPhone Software Download <br> Bakugan Battle Planet Rebirth v.0.01.0 FULL Crack <br> Autodata340frtorrent <br> FULL AcmeBarGig Guitar FX VST Plugin Collection <br> Sachin Tendulkar 200 Runs Full Video Free Download <br></p> -<p>MyNewNetwork.zip <br> xBox360LiveAndPhone.rar <br> Seagate Backup Plus Saver Wi-Fi Desktop Drive MP3 Player Portable HD Screen 7-in-1 <br> DS 4 Online <br> Oculus Rift Core Open Source Driver for Ubuntu Linux <br> Creamy Dynamics Voice of Romance VST Plugin Collection for.8 <br> 5CrackAir 3-14 <br> How to Transfer Between Android and iPhone Software Download <br> Bakugan Battle Planet Rebirth v.0.01.0 FULL Crack <br> Autodata340frtorrent <br> FULL AcmeBarGig Guitar FX VST Plugin Collection <br> Sachin Tendulkar 200 Runs Full Video Free Download <br></p> -<h2>Autodata340frtorrent</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://tiurll.com/2uCkSD">https://tiurll.com/2uCkSD</a></b></p><br /><br /> -<p> Why should I download Autodata340frtorrent? Autodata340frtorrent is a website that allows you to download vehicle data for. Autodata340frtorrent also offers Autodata software for various vehicles. Autodata340frtorrent FULL AcmeBarGig Guitar FX VST Plugin Collection Sachin Tendulkar 200 Runs Full Video Free Download. </p> -<p> progmatic 7 full indir <br> Biomedical Instrumentation By Dr M Arumugam Torrent <br> SimCity.2013.NO.DRM.CRACKED.1.5-VULPESZEDRA[rarbg] Generator Online <br> symantec ghost solution suite 2.5.1 license keygen <br> dj models arah 62lkjh <br> Crack CarryMap V 2 3 123 <br> Install-google-play-allwinner.rarl <br> Autodata340frtorrent <br> FULL AcmeBarGig Guitar FX VST Plugin Collection <br> Sachin Tendulkar 200 Runs Full Video Free Download <br></p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Borderlands2dlcTOP Downloadxbox360.md b/spaces/inreVtussa/clothingai/Examples/Borderlands2dlcTOP Downloadxbox360.md deleted file mode 100644 index 478cc38bfdd34cb4171af978ad4e985baa4eef0b..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Borderlands2dlcTOP Downloadxbox360.md +++ /dev/null @@ -1,11 +0,0 @@ -<br /> -<p>borderlands2dlcdownloadxbox360 bd86983c93 tamicha. at 7:50 pm Reply. raqucutt says: Essilor Kappa Ctd Manual mararoz 7b17bfd26b https://trello.com/c/Spdp20ml/60-verified-borderlands2dlcdownloadxbox360. bernleve </p> -<h2>borderlands2dlcdownloadxbox360</h2><br /><p><b><b>Download File</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://tiurll.com/2uCj8r">https://tiurll.com/2uCj8r</a></b></p><br /><br /> -<p>https://coub.com/stories/2938409-install-borderlands2dlcdownloadxbox360 https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-new ..thingiverse.com/assets/3d/52/78/df/24/emmaquan876.htmlhttps://cdn.thingiverse.com/assets/71/eb/a9/79/f4/borderlands2dlcdownloadxbox360.htmlhttps://cdn. </p> -<p>https://coub.com/stories/2938409-install-borderlands2dlcdownloadxbox360 https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-. https://trello.com/c/YiQqEnzL/32-2013-pthc-3gb-lolita-torrent-link https://trello.com/c/Spdp20ml/60-verified-borderlands2dlcdownloadxbox360 </p> -<p>it starts to download as it normally does, but nothing happens after https://coub.com/stories/2938409-install-borderlands2dlcdownloadxbox360 https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-new .designer.io/borderlands2dlcdownloadxbox360 </p> -<p></p> -<p>https://coub.com/stories/2938409-install-borderlands2dlcdownloadxbox360 https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-new <b>Links</b> tremesoft.net/tremorvideos/24/porta. It starts to download as it normally does, but nothing happens after https://coub.com/stories/2938409-install-borderlands2dlcdownloadxbox360 https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-new .designer.io/borderlands2dlcdownloadxbox360 </p> -<p>https://coub.com/stories/2938407-dilber-ay-buyuk-kumar-direk-izle-new https://trello.com/c/YiQqEnzL/32-2013-pthc-3gb-lolita-torrent-link https://trello.com/c/Spdp20ml/60-verified-borderlands2dlcdownloadxbox360 </p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Diljale Movie In Hindi Download 720p.md b/spaces/inreVtussa/clothingai/Examples/Diljale Movie In Hindi Download 720p.md deleted file mode 100644 index 2e7ceb1c8ca69238d9b72cbdf9ded67f894f0aa7..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Diljale Movie In Hindi Download 720p.md +++ /dev/null @@ -1,20 +0,0 @@ - -<h1>Diljale Movie in Hindi Download 720p: A Romantic Action Film with Ajay Devgn and Sonali Bendre</h1> -<p>If you are looking for a movie that combines romance, action and drama, then you should watch Diljale (1996), a Hindi film directed by Harry Baweja and starring Ajay Devgn, Sonali Bendre and Madhoo. Diljale is a story of love, betrayal and patriotism set in the backdrop of the Indian independence movement.</p> -<h2>Diljale movie in hindi download 720p</h2><br /><p><b><b>Download</b> &#9733;&#9733;&#9733; <a href="https://tiurll.com/2uCkVR">https://tiurll.com/2uCkVR</a></b></p><br /><br /> -<p>In this article, we will tell you more about the plot, the cast and the songs of Diljale, and how you can download Diljale movie in Hindi 720p quality from a reliable source.</p> -<h2>What is Diljale Movie About?</h2> -<p>Diljale is a movie that revolves around Shaka (Ajay Devgn), a young man who falls in love with Radhika (Sonali Bendre), the daughter of a police officer. However, their love faces many obstacles as Shaka is falsely accused of being a terrorist and is imprisoned by Radhika's father. Shaka escapes from jail and joins a group of freedom fighters led by Dara (Amrish Puri), who are fighting against the British rule in India.</p> -<p>Meanwhile, Radhika is forced to marry Rajeev (Parmeet Sethi), a wealthy businessman who is secretly working for the British. Radhika still loves Shaka and hopes to reunite with him someday. But fate has other plans for them as Shaka becomes a ruthless rebel who is ready to sacrifice everything for his country, while Radhika becomes a victim of Rajeev's cruelty and violence.</p> -<p>Will Shaka and Radhika ever find their happiness together? Will Shaka succeed in his mission to free India from the British? Will Rajeev get his deserved punishment? Watch Diljale movie to find out!</p> -<p></p> -<h2>Who are the Cast Members of Diljale Movie?</h2> -<p>Diljale movie features some of the most popular actors of Bollywood in the 90s. Here are some of them:</p> -<ul> -<li>Ajay Devgn as Shaka: Ajay Devgn is one of the most versatile and successful actors in Hindi cinema. He has won four Filmfare Awards and two National Film Awards for his performances in various genres. Some of his notable films include Phool Aur Kaante (1991), Zakhm (1998), The Legend of Bhagat Singh (2002), Singham (2011) and Tanhaji (2020).</li> -<li>Sonali Bendre as Radhika: Sonali Bendre is a former model and actress who has appeared in many Hindi, Telugu, Tamil and Kannada films. She is known for her roles in films like Sarfarosh (1999), Hum Saath-Saath Hain (1999), Kal Ho Naa Ho (2003) and Once Upon a Time in Mumbai Dobaara! (2013). She has also been a judge on reality shows like India's Got Talent and India's Best Dramebaaz.</li> -<li>Madhoo as Shabnam: Madhoo is an actress who has worked in Hindi, Tamil, Telugu, Malayalam and Kannada films. She made her debut with Phool Aur Kaante (1991) opposite Ajay Devgn. She is also known for her roles in films like Roja (1992), Diljale (1996), Pehchaan (1993) and Ziddi (1997).</li> -<li>Amrish Puri as Dara: Amrish Puri was one of the most iconic villains of Indian cinema. He played memorable roles in films like Mr. India (1987), Nayak (2001), Karan Arjun (1995) and Indiana Jones and the Temple of Doom (1984). He also played positive roles in films like Dilwale Dulhania Le Jayenge (1995), Ghatak (1996) and Virasat (1997).</li> -<li>Parmeet Sethi as Rajeev: Parmeet Sethi is an actor, director and writer who has worked in Hindi, Punjabi and English films. He made his debut with Dilwale Dulhania Le Jay</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/israelgonzalezb/stable-diffusion/app.py b/spaces/israelgonzalezb/stable-diffusion/app.py deleted file mode 100644 index 8a58a8118555867086cf9c75536243af9d0992f7..0000000000000000000000000000000000000000 --- a/spaces/israelgonzalezb/stable-diffusion/app.py +++ /dev/null @@ -1,366 +0,0 @@ -import gradio as gr -import torch -from torch import autocast -from diffusers import StableDiffusionPipeline -from datasets import load_dataset -from PIL import Image -from io import BytesIO -import base64 -import re -import os -import requests - - -from share_btn import community_icon_html, loading_icon_html, share_js - -model_id = "CompVis/stable-diffusion-v1-4" -device = "cuda" - -word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True) -word_list = word_list_dataset["train"]['text'] - -is_gpu_busy = False -def infer(prompt): - global is_gpu_busy - samples = 4 - steps = 50 - scale = 7.5 - #When running locally you can also remove this filter - for filter in word_list: - if re.search(rf"\b{filter}\b", prompt): - raise gr.Error("Unsafe content found. Please try again with different prompts.") - - generator = torch.Generator(device=device).manual_seed(seed) - print("Is GPU busy? ", is_gpu_busy) - images = [] - if(not is_gpu_busy): - is_gpu_busy = True - images_list = pipe( - [prompt] * samples, - num_inference_steps=steps, - guidance_scale=scale, - #generator=generator, - ) - is_gpu_busy = False - safe_image = Image.open(r"unsafe.png") - for i, image in enumerate(images_list["sample"]): - if(images_list["nsfw_content_detected"][i]): - images.append(safe_image) - else: - images.append(image) - else: - url = os.getenv('JAX_BACKEND_URL') - payload = {'prompt': prompt} - images_request = requests.post(url, json = payload) - for image in images_request.json()["images"]: - image_b64 = (f"data:image/jpeg;base64,{image}") - images.append(image_b64) - - return images - - -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: black; - background: black; - } - input[type='range'] { - accent-color: black; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - #container-advanced-btns{ - display: flex; - flex-wrap: wrap; - justify-content: space-between; - align-items: center; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - .gr-form{ - flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; - } - #prompt-container{ - gap: 0; - } -""" - -block = gr.Blocks(css=css) - -examples = [ - [ - 'A high tech solarpunk utopia in the Amazon rainforest', -# 4, -# 45, -# 7.5, -# 1024, - ], - [ - 'A pikachu fine dining with a view to the Eiffel Tower', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'A mecha robot in a favela in expressionist style', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'an insect robot preparing a delicious meal', -# 4, -# 45, -# 7, -# 1024, - ], - [ - "A small cabin on top of a snowy mountain in the style of Disney, artstation", -# 4, -# 45, -# 7, -# 1024, - ], -] - - -with block: - gr.HTML( - """ - <div style="text-align: center; max-width: 650px; margin: 0 auto;"> - <div - style=" - display: inline-flex; - align-items: center; - gap: 0.8rem; - font-size: 1.75rem; - " - > - <svg - width="0.65em" - height="0.65em" - viewBox="0 0 115 115" - fill="none" - xmlns="http://www.w3.org/2000/svg" - > - <rect width="23" height="23" fill="white"></rect> - <rect y="69" width="23" height="23" fill="white"></rect> - <rect x="23" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="46" width="23" height="23" fill="white"></rect> - <rect x="46" y="69" width="23" height="23" fill="white"></rect> - <rect x="69" width="23" height="23" fill="black"></rect> - <rect x="69" y="69" width="23" height="23" fill="black"></rect> - <rect x="92" width="23" height="23" fill="#D9D9D9"></rect> - <rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="115" y="46" width="23" height="23" fill="white"></rect> - <rect x="115" y="115" width="23" height="23" fill="white"></rect> - <rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect> - <rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="92" y="69" width="23" height="23" fill="white"></rect> - <rect x="69" y="46" width="23" height="23" fill="white"></rect> - <rect x="69" y="115" width="23" height="23" fill="white"></rect> - <rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect> - <rect x="46" y="46" width="23" height="23" fill="black"></rect> - <rect x="46" y="115" width="23" height="23" fill="black"></rect> - <rect x="46" y="69" width="23" height="23" fill="black"></rect> - <rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect> - <rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect> - <rect x="23" y="69" width="23" height="23" fill="black"></rect> - </svg> - <h1 style="font-weight: 900; margin-bottom: 7px;"> - Stable Diffusion Demo - </h1> - </div> - <p style="margin-bottom: 10px; font-size: 94%"> - Stable Diffusion is a state of the art text-to-image model that generates - images from text.<br>For faster generation and API - access you can try - <a - href="http://beta.dreamstudio.ai/" - style="text-decoration: underline;" - target="_blank" - >DreamStudio Beta</a - > - </p> - </div> - """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - btn = gr.Button("Generate image").style( - margin=False, - rounded=(False, True, True, False), - full_width=False, - ) - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - - with gr.Group(elem_id="container-advanced-btns"): - advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - with gr.Row(elem_id="advanced-options"): - gr.Markdown("Advanced settings are temporarily unavailable") - samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1) - steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1) - scale = gr.Slider( - label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1 - ) - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=2147483647, - step=1, - randomize=True, - ) - - ex = gr.Examples(examples=examples, fn=infer, inputs=text, outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False) - ex.dataset.headers = [""] - - text.submit(infer, inputs=text, outputs=[gallery], postprocess=False) - btn.click(infer, inputs=text, outputs=[gallery], postprocess=False) - - advanced_button.click( - None, - [], - text, - _js=""" - () => { - const options = document.querySelector("body > gradio-app").querySelector("#advanced-options"); - options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none"; - }""", - ) - share_button.click( - None, - [], - [], - _js=share_js, - ) - gr.HTML( - """ - <div class="footer"> - <p>Model by <a href="https://huggingface.co/CompVis" style="text-decoration: underline;" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">Stability AI</a> - backend running JAX on TPUs due to generous support of <a href="https://sites.research.google/trc/about/" style="text-decoration: underline;" target="_blank">Google TRC program</a> - Gradio Demo by 🤗 Hugging Face - </p> - </div> - <div class="acknowledgments"> - <p><h4>LICENSE</h4> -The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" style="text-decoration: underline;" target="_blank">CreativeML Open RAIL-M</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p> - <p><h4>Biases and content acknowledgment</h4> -Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">model card</a></p> - </div> - """ - ) - -block.queue(concurrency_count=40, max_size=20).launch(max_threads=150) \ No newline at end of file diff --git a/spaces/jackycedar/pdfs/chat.py b/spaces/jackycedar/pdfs/chat.py deleted file mode 100644 index cef9169c8cb66fc2fed5770464be426a117e1cd4..0000000000000000000000000000000000000000 --- a/spaces/jackycedar/pdfs/chat.py +++ /dev/null @@ -1,186 +0,0 @@ -import gradio as gr -import os -import sys -import json -import requests - -MODEL = "gpt-3.5-turbo" -API_URL = os.getenv("API_URL") -DISABLED = os.getenv("DISABLED") == 'True' -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -def exception_handler(exception_type, exception, traceback): - print("%s: %s" % (exception_type.__name__, exception)) -sys.excepthook = exception_handler -sys.tracebacklimit = 0 - -#https://github.com/gradio-app/gradio/issues/3531#issuecomment-1484029099 -def parse_codeblock(text): - lines = text.split("\n") - for i, line in enumerate(lines): - if "```" in line: - if line != "```": - lines[i] = f'<pre><code class="{lines[i][3:]}">' - else: - lines[i] = '</code></pre>' - else: - if i > 0: - lines[i] = "<br/>" + line.replace("<", "&lt;").replace(">", "&gt;") - return "".join(lines) - -def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - payload = { - "model": MODEL, - "messages": [{"role": "user", "content": f"{inputs}"}], - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - - # print(f"chat_counter - {chat_counter}") - if chat_counter != 0 : - messages = [] - for i, data in enumerate(history): - if i % 2 == 0: - role = 'user' - else: - role = 'assistant' - message = {} - message["role"] = role - message["content"] = data - messages.append(message) - - message = {} - message["role"] = "user" - message["content"] = inputs - messages.append(message) - payload = { - "model": MODEL, - "messages": messages, - "temperature" : temperature, - "top_p": top_p, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - chat_counter+=1 - history.append(inputs) - token_counter = 0 - partial_words = "" - counter = 0 - - try: - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - response_code = f"{response}" - #if response_code.strip() != "<Response [200]>": - # #print(f"response code - {response}") - # raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}") - - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter += 1 - continue - #counter+=1 - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - token_counter += 1 - yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history} - except Exception as e: - print (f'error found: {e}') - yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True) - print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter})) - - -def reset_textbox(): - return gr.update(value='', interactive=False), gr.update(interactive=False) - -title = """<h1 align="center">GPT-3.5 Chatbot</h1>""" -if DISABLED: - title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.</h1>""" -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: -``` -User: <utterance> -Assistant: <utterance> -User: <utterance> -Assistant: <utterance> -... -``` -In this app, you can explore the outputs of a gpt-3.5 LLM. -""" - -theme = gr.themes.Default(primary_hue="green") - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""<h3 align="center">This app provides you full access to GPT-3.5 (4096 token limit). You don't need any OPENAI API key.</h1>""") - #gr.HTML('''<center><a href="https://huggingface.co/spaces/yuntian-deng/ChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''') - with gr.Column(elem_id = "col_container", visible=False) as main_block: - #API Key is provided by OpenAI - #openai_api_key = gr.Textbox(type='password', label="Enter only your OpenAI API key here") - chatbot = gr.Chatbot(elem_id='chatbot') #c - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t - state = gr.State([]) #s - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button(visible=not DISABLED).style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - with gr.Column(elem_id = "user_consent_container") as user_consent_block: - # Get user consent - with gr.Accordion("User Consent for Data Collection, Use, and Sharing", open=True): - gr.HTML(""" - <div> - <p>By using our app, which is powered by OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:</p> - <ol> - <li><strong>Collection:</strong> We may collect information, including the inputs you type into our app and the outputs generated by OpenAI's API.</li> - <li><strong>Use:</strong> We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications.</li> - <li><strong>Sharing and Publication:</strong> Your data may be published, shared with third parties, or used for analysis and reporting purposes.</li> - <li><strong>Data Retention:</strong> We may retain your data for as long as necessary.</li> - </ol> - <p>By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app.</p> - </div> - """) - accept_button = gr.Button("I Agree") - - def enable_inputs(): - return user_consent_block.update(visible=False), main_block.update(visible=True) - - accept_button.click(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False) - - inputs.submit(reset_textbox, [], [inputs, b1], queue=False) - inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key - b1.click(reset_textbox, [], [inputs, b1], queue=False) - b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key - - demo.queue(max_size=20, concurrency_count=10, api_open=False).launch() \ No newline at end of file diff --git a/spaces/jax-diffusers-event/canny_coyo1m/app.py b/spaces/jax-diffusers-event/canny_coyo1m/app.py deleted file mode 100644 index 1d443658fa36c38c223cf36e1299765275231315..0000000000000000000000000000000000000000 --- a/spaces/jax-diffusers-event/canny_coyo1m/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import gradio as gr -import jax -import numpy as np -import jax.numpy as jnp -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from PIL import Image -from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel -import cv2 - -def create_key(seed=0): - return jax.random.PRNGKey(seed) - -def canny_filter(image): - gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - blurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0) - edges_image = cv2.Canny(blurred_image, 50, 150) - return edges_image - -# load control net and stable diffusion v1-5 -controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( - "jax-diffusers-event/canny-coyo1m", dtype=jnp.bfloat16 -) -pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.bfloat16 -) - -def infer(prompts, negative_prompts, image): - params["controlnet"] = controlnet_params - - num_samples = 1 #jax.device_count() - rng = create_key(0) - rng = jax.random.split(rng, jax.device_count()) - im = canny_filter(image) - canny_image = Image.fromarray(im) - - prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) - negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) - processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) - - p_params = replicate(params) - prompt_ids = shard(prompt_ids) - negative_prompt_ids = shard(negative_prompt_ids) - processed_image = shard(processed_image) - - output = pipe( - prompt_ids=prompt_ids, - image=processed_image, - params=p_params, - prng_seed=rng, - num_inference_steps=50, - neg_prompt_ids=negative_prompt_ids, - jit=True, - ).images - - output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) - return output_images - -gr.Interface(infer, inputs=["text", "text", "image"], outputs="gallery").launch() diff --git a/spaces/jbilcke-hf/VideoQuest/next.config.js b/spaces/jbilcke-hf/VideoQuest/next.config.js deleted file mode 100644 index b699464f86c30db1e6786ce8f42e54a208ebad5a..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/VideoQuest/next.config.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - output: 'standalone', - - experimental: { - serverActions: true, - }, -} - -module.exports = nextConfig diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/label.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/label.tsx deleted file mode 100644 index 772d93b2087d03f44a8d35c426d96b1e1158fe81..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/label.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import { ReactNode } from "react" - -export function Label({ children }: { children: ReactNode }) { - return ( - <label className="text-base font-semibold text-zinc-700">{children}</label> - ) -} \ No newline at end of file diff --git a/spaces/jcenaa/Segment-Any-RGBD/datasets/DATASETS.md b/spaces/jcenaa/Segment-Any-RGBD/datasets/DATASETS.md deleted file mode 100644 index 30d30ba314c9842098c5c38d0a47ce780283d9d9..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/datasets/DATASETS.md +++ /dev/null @@ -1,122 +0,0 @@ -## Prepare Datasets for OVSeg - -This doc is a modification/extension of [MaskFormer](https://github.com/facebookresearch/MaskFormer/blob/main/datasets/README.md) following [Detectron2 fromat](https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html). - -A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) -for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). -This document explains how to setup the builtin datasets so they can be used by the above APIs. -[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, -and how to add new datasets to them. - -OVSeg has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 will look for datasets in the structure described below, if needed. -``` -$DETECTRON2_DATASETS/ - coco/ # COCOStuff-171 - ADEChallengeData2016/ # ADE20K-150 - ADE20K_2021_17_01/ # ADE20K-847 - VOCdevkit/ - VOC2012/ # PASCALVOC-20 - VOC2010/ # PASCALContext-59, PASCALContext-459 -``` - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -Without specific notifications, our model is trained on COCOStuff-171 and evlauted on ADE20K-150, ADE20K-847, PASCALVOC-20, PASCALContext-59 and PASCALContext-459. - -| dataset | split | # images | # categories | -|:--------------:|:---------:|:--------:|:------------:| -| COCO Stuff | train2017 | 118K | 171 | -| ADE20K | val | 2K | 150/847 | -| Pascal VOC | val | 1.5K | 20 | -| Pascal Context | val | 5K | 59/459 | - - -### Expected dataset structure for [COCO Stuff](https://github.com/nightrome/cocostuff): -``` -coco/ - train2017/ # http://images.cocodataset.org/zips/train2017.zip - annotations/ # http://images.cocodataset.org/annotations/annotations_trainval2017.zip - stuffthingmaps/ - stuffthingmaps_trainval2017.zip # http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip - train2017/ - # below are generated - stuffthingmaps_detectron2/ - train2017/ -``` - -The directory `stuffthingmaps_detectron2` is generated by running `python datasets/prepare_coco_stuff_sem_seg.py`. - - - -### Expected dataset structure for [ADE20k Scene Parsing (ADE20K-150)](http://sceneparsing.csail.mit.edu/): -``` -ADEChallengeData2016/ - annotations/ - images/ - objectInfo150.txt - # below are generated - annotations_detectron2/ -``` -The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`. - - -### Expected dataset structure for [ADE20k-Full (ADE20K-847)](https://github.com/CSAILVision/ADE20K#download): -``` -ADE20K_2021_17_01/ - images/ - index_ade20k.pkl - objects.txt - # below are generated - images_detectron2/ - annotations_detectron2/ -``` -The directories `images_detectron2` and `annotations_detectron2` are generated by running `python datasets/prepare_ade20k_full_sem_seg.py`. - -### Expected dataset structure for [Pascal VOC 2012 (PASCALVOC-20)](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit): -``` -VOCdevkit/VOC2012/ - Annotations/ - ImageSets/ - JPEGImages/ - SegmentationClass/ - SegmentationObject/ - SegmentationClassAug/ # https://github.com/kazuto1011/deeplab-pytorch/blob/master/data/datasets/voc12/README.md - # below are generated - images_detectron2/ - annotations_detectron2/ -``` - -It starts with a tar file `VOCtrainval_11-May-2012.tar`. - -We use SBD augmentated training data as `SegmentationClassAug` following [Deeplab](https://github.com/kazuto1011/deeplab-pytorch/blob/master/data/datasets/voc12/README.md) - -The directories `images_detectron2` and `annotations_detectron2` are generated by running `python datasets/prepare_voc_sem_seg.py`. - - -### Expected dataset structure for [Pascal Context](https://www.cs.stanford.edu/~roozbeh/pascal-context/): - -``` -VOCdevkit/VOC2010/ - Annotations/ - ImageSets/ - JPEGImages/ - SegmentationClass/ - SegmentationObject/ - # below are from https://www.cs.stanford.edu/~roozbeh/pascal-context/trainval.tar.gz - trainval/ - labels.txt - 59_labels.txt # https://www.cs.stanford.edu/~roozbeh/pascal-context/59_labels.txt - pascalcontext_val.txt # https://drive.google.com/file/d/1BCbiOKtLvozjVnlTJX51koIveUZHCcUh/view?usp=sharing - # below are generated - annotations_detectron2/ - pc459_val - pc59_val -``` -It starts with a tar file `VOCtrainval_03-May-2010.tar`. You may want to download the 5K validation set [here](https://drive.google.com/file/d/1BCbiOKtLvozjVnlTJX51koIveUZHCcUh/view?usp=sharing). - -The directory `annotations_detectron2` is generated by running `python datasets/prepare_pascal_context.py`. - diff --git a/spaces/jeanmidev/marvel_snap_related_items_recsys/app.py b/spaces/jeanmidev/marvel_snap_related_items_recsys/app.py deleted file mode 100644 index ed2d13ce0b32e2ab798d1b134f20760256c02305..0000000000000000000000000000000000000000 --- a/spaces/jeanmidev/marvel_snap_related_items_recsys/app.py +++ /dev/null @@ -1,75 +0,0 @@ -import gradio as gr -import pandas as pd -from PIL import Image -import requests - -dfp_matrice_association = pd.read_csv("./data/association.csv", index_col=0) -dfp_matrice_distance_from_encoding = pd.read_csv("./data/ed_from_encoding.csv", index_col=0) -dfp_matrice_distance_from_association = pd.read_csv("./data/ed_from_association.csv", index_col=0) -dfp_cards = pd.read_csv("./data/cards.csv") -dfp_cards.sort_values("cname", inplace=True) - -mapping_cname_cid = dfp_cards[["cname", "cid"]].reset_index() -mapping_cname_cid = mapping_cname_cid.set_index("cname").to_dict(orient="index") - -dfp_cards.set_index(["cid"], inplace=True) - -def clean_recommendations(cid, recommendations): - if cid in recommendations: - recommendations.remove(cid) - if str(cid) in recommendations: - recommendations.remove(str(cid)) - return recommendations - -def get_recommendations(cid, dfp_, ascending=False, k=5): - recommendations = dfp_.loc[cid].sort_values(ascending=ascending).index.tolist() - clean_recommendations(cid, recommendations) - return recommendations[:k] - -def display_recommendations(cname, use_euclidian_distance, use_description_embedding, k): - - cid = int(mapping_cname_cid[cname]["cid"]) - # Collect the recommendations - dfp_dist = dfp_matrice_distance_from_association if use_euclidian_distance else dfp_matrice_association - - if (use_description_embedding == False): - recommendations = get_recommendations(cid, dfp_dist, use_euclidian_distance, k=25) - else: - closest_card = get_recommendations(cid, dfp_matrice_distance_from_encoding, True, k=1)[0] - recommendations = get_recommendations(int(closest_card), dfp_dist, use_euclidian_distance, k=25) - - recommendations = recommendations[:k] - recommendations_string = [dfp_cards.loc[int(cid_r)]["cname"] for cid_r in recommendations] - recommendations_image = [Image.open(requests.get(dfp_cards.loc[int(cid_r)]["art"], stream=True).raw).resize((240,300)) for cid_r in recommendations] - - block_text = "\n" - for idx, cid_r in enumerate(recommendations): - block_text += f"{idx+1}){dfp_cards.loc[int(cid_r)]['cname']} : {dfp_cards.loc[int(cid_r)]['ability']}\n" -# block_text += f"{idx+1})\n" - - text_output = f""" - Recommended cards:{block_text} - """ - return text_output, recommendations_image - -title = "Marvel Snap deck starter" -description = """Gradio demo for Marvel Snap deck starter \n -To use it, simply select the card in the dropdown (cname) that you want to use to kickstart the deck and after use the type of recommender system:\n -1) use_euclidian_distance: Use the euclidian distance or the normalized association \n -2) use_description_embedding: Use the description (name + stats + ability X transformers encoder) as first filtering before the model in 1)\n -More details in this article https://www.the-odd-dataguy.com/2023/01/15/marvel_snap_cold_start/ -""" -article = "<div style='text-align: center;'>Marvel Snap deck starter by <a href='https://www.linkedin.com/in/jeanmicheldaignan/' target='_blank'>Jean-Michel D</a> | <a href='https://www.the-odd-dataguy.com/2023/01/15/marvel_snap_cold_start/' target='_blank'>Article</a>" - -demo = gr.Interface( - fn=display_recommendations, - inputs=[gr.inputs.Dropdown(dfp_cards["cname"].tolist()), - "checkbox", - "checkbox", - gr.Slider(minimum=1, maximum=20, step=1, value=5)], - outputs=["text", gr.Gallery(label="Recommendations")], - title=title, - description=description, - article=article, -) -demo.launch() \ No newline at end of file diff --git a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/diffusionmodules/util.py b/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index 753ddfbdd20fdfbf9ce72d960fadf76abfbca6d7..0000000000000000000000000000000000000000 --- a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,277 +0,0 @@ -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - - -class FourierEmbedder(): - def __init__(self, num_freqs=64, temperature=100): - - self.num_freqs = num_freqs - self.temperature = temperature - self.freq_bands = temperature ** ( torch.arange(num_freqs) / num_freqs ) - - @ torch.no_grad() - def __call__(self, x, cat_dim=-1): - "x: arbitrary shape of tensor. dim: cat dim" - out = [] - for freq in self.freq_bands: - out.append( torch.sin( freq*x ) ) - out.append( torch.cos( freq*x ) ) - return torch.cat(out, cat_dim) - - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - #return super().forward(x).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/spaces/jesherjoshua/faceai/faceaiapi.py b/spaces/jesherjoshua/faceai/faceaiapi.py deleted file mode 100644 index dc6505ae3e5154204f3e11dec01c91edf4d31fa7..0000000000000000000000000000000000000000 --- a/spaces/jesherjoshua/faceai/faceaiapi.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/python3 -import mtcnn -import numpy as np -from keras import backend as K -from keras.utils.data_utils import get_file -from scipy import spatial -import os -import sys -import matplotlib.pyplot as plt -import cv2 -import numpy as np -import PIL -import shutil -import tensorflow as tf - - - -model=tf.keras.models.load_model("./faceai.h5") - - - -def preprocess_input(x, data_format=None, version=1): - x_temp = np.copy(x) - if data_format is None: - data_format = K.image_data_format() - assert data_format in {'channels_last', 'channels_first'} - - if version == 1: - if data_format == 'channels_first': - x_temp = x_temp[:, ::-1, ...] - x_temp[:, 0, :, :] -= 93.5940 - x_temp[:, 1, :, :] -= 104.7624 - x_temp[:, 2, :, :] -= 129.1863 - else: - x_temp = x_temp[..., ::-1] - x_temp[..., 0] -= 93.5940 - x_temp[..., 1] -= 104.7624 - x_temp[..., 2] -= 129.1863 - - elif version == 2: - if data_format == 'channels_first': - x_temp = x_temp[:, ::-1, ...] - x_temp[:, 0, :, :] -= 91.4953 - x_temp[:, 1, :, :] -= 103.8827 - x_temp[:, 2, :, :] -= 131.0912 - else: - x_temp = x_temp[..., ::-1] - x_temp[..., 0] -= 91.4953 - x_temp[..., 1] -= 103.8827 - x_temp[..., 2] -= 131.0912 - else: - raise NotImplementedError - - return x_temp - - -def face_extractor(path): - face_arr=[] - detect=mtcnn.MTCNN() - img=cv2.imread(path) - n=len(detect.detect_faces(img)) -# plt.figure(figsize=(10,10)) - for i in range(n): - x,y,w,h=detect.detect_faces(img)[i]['box'] - dis=img[y:y+h,x:x+w] - dis=PIL.Image.fromarray(dis).resize((224,224)) - dis=np.asarray(dis,dtype='float32') - face_arr.append(dis) - return face_arr - -def generate_embeddings(fpath='./encode/'): - files=os.listdir(fpath) - files=[fpath+f for f in files] - faces=[] - for i in files: - faces.extend(face_extractor(i)) - faces=preprocess_input(faces,version=2) - embeddings_known=model.predict(faces) - return embeddings_known - - -def compare(embedding_known,embedding_unknown,limit=0.45): - dist=spatial.distance.cosine(embedding_known.flatten(),embedding_unknown.flatten()) - if dist>limit: - return 0 - else: - return 1 - -def group(embeddings_known,fpath): - files=os.listdir(fpath) - files=[fpath+f for f in files] - faces=[] - for i in files: - l=face_extractor(i) - for j in l: - j=preprocess_input(j,version=2) - j=model.predict(np.expand_dims(j,axis=0)) - for k in embeddings_known: - result=compare(j,k) - if result==1: - if not os.path.exists('grouped'): - os.makedirs('grouped') - shutil.copy(i,'grouped') - break; -#def main(): -# embeddings_known=generate_embeddings('./encode/') -# group(embeddings_known,'./source/') - - -#if __name__=="__main__": -# main() diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/masks/countless/test.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/masks/countless/test.py deleted file mode 100644 index 7809beb7aeeb3bcb10d03093a564917b1f2b4786..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/masks/countless/test.py +++ /dev/null @@ -1,195 +0,0 @@ -from copy import deepcopy - -import numpy as np - -import countless2d -import countless3d - -def test_countless2d(): - def test_all_cases(fn, test_zero): - case1 = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) # all different - case2 = np.array([ [ 1, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case1z = np.array([ [ 0, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # all different - case2z = np.array([ [ 0, 0 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case3 = np.array([ [ 1, 1 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # two groups are same - case4 = np.array([ [ 1, 2 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # 3 are the same - case5 = np.array([ [ 5, 5 ], [ 5, 5 ] ]).reshape((2,2,1,1)) # all are the same - - is_255_handled = np.array([ [ 255, 255 ], [ 1, 2 ] ], dtype=np.uint8).reshape((2,2,1,1)) - - test = lambda case: fn(case) - - if test_zero: - assert test(case1z) == [[[[3]]]] # d - assert test(case2z) == [[[[0]]]] # a==b - else: - assert test(case1) == [[[[4]]]] # d - assert test(case2) == [[[[1]]]] # a==b - - assert test(case3) == [[[[1]]]] # a==b - assert test(case4) == [[[[2]]]] # b==c - assert test(case5) == [[[[5]]]] # a==b - - assert test(is_255_handled) == [[[[255]]]] - - assert fn(case1).dtype == case1.dtype - - test_all_cases(countless2d.simplest_countless, False) - test_all_cases(countless2d.quick_countless, False) - test_all_cases(countless2d.quickest_countless, False) - test_all_cases(countless2d.stippled_countless, False) - - - - methods = [ - countless2d.zero_corrected_countless, - countless2d.countless, - countless2d.countless_if, - # countless2d.counting, # counting doesn't respect order so harder to write a test - ] - - for fn in methods: - print(fn.__name__) - test_all_cases(fn, True) - -def test_stippled_countless2d(): - a = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - b = np.array([ [ 0, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - c = np.array([ [ 1, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - d = np.array([ [ 1, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - e = np.array([ [ 1, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - f = np.array([ [ 0, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - g = np.array([ [ 0, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - h = np.array([ [ 0, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - i = np.array([ [ 1, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - j = np.array([ [ 1, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - k = np.array([ [ 1, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - l = np.array([ [ 1, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - m = np.array([ [ 0, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - n = np.array([ [ 0, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - o = np.array([ [ 0, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - z = np.array([ [ 0, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - - test = countless2d.stippled_countless - - # Note: We only tested non-matching cases above, - # cases f,g,h,i,j,k prove their duals work as well - # b/c if two pixels are black, either one can be chosen - # if they are different or the same. - - assert test(a) == [[[[4]]]] - assert test(b) == [[[[4]]]] - assert test(c) == [[[[4]]]] - assert test(d) == [[[[4]]]] - assert test(e) == [[[[1]]]] - assert test(f) == [[[[4]]]] - assert test(g) == [[[[4]]]] - assert test(h) == [[[[2]]]] - assert test(i) == [[[[4]]]] - assert test(j) == [[[[1]]]] - assert test(k) == [[[[1]]]] - assert test(l) == [[[[1]]]] - assert test(m) == [[[[2]]]] - assert test(n) == [[[[3]]]] - assert test(o) == [[[[4]]]] - assert test(z) == [[[[0]]]] - - bc = np.array([ [ 0, 2 ], [ 2, 4 ] ]).reshape((2,2,1,1)) - bd = np.array([ [ 0, 2 ], [ 3, 2 ] ]).reshape((2,2,1,1)) - cd = np.array([ [ 0, 2 ], [ 3, 3 ] ]).reshape((2,2,1,1)) - - assert test(bc) == [[[[2]]]] - assert test(bd) == [[[[2]]]] - assert test(cd) == [[[[3]]]] - - ab = np.array([ [ 1, 1 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - ac = np.array([ [ 1, 2 ], [ 1, 0 ] ]).reshape((2,2,1,1)) - ad = np.array([ [ 1, 0 ], [ 3, 1 ] ]).reshape((2,2,1,1)) - - assert test(ab) == [[[[1]]]] - assert test(ac) == [[[[1]]]] - assert test(ad) == [[[[1]]]] - -def test_countless3d(): - def test_all_cases(fn): - alldifferent = [ - [ - [1,2], - [3,4], - ], - [ - [5,6], - [7,8] - ] - ] - allsame = [ - [ - [1,1], - [1,1], - ], - [ - [1,1], - [1,1] - ] - ] - - assert fn(np.array(alldifferent)) == [[[8]]] - assert fn(np.array(allsame)) == [[[1]]] - - twosame = deepcopy(alldifferent) - twosame[1][1][0] = 2 - - assert fn(np.array(twosame)) == [[[2]]] - - threemixed = [ - [ - [3,3], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - assert fn(np.array(threemixed)) == [[[3]]] - - foursame = [ - [ - [4,4], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - - assert fn(np.array(foursame)) == [[[4]]] - - fivesame = [ - [ - [5,4], - [5,5], - ], - [ - [2,4], - [5,5] - ] - ] - - assert fn(np.array(fivesame)) == [[[5]]] - - def countless3d_generalized(img): - return countless3d.countless_generalized(img, (2,2,2)) - def countless3d_dynamic_generalized(img): - return countless3d.dynamic_countless_generalized(img, (2,2,2)) - - methods = [ - countless3d.countless3d, - countless3d.dynamic_countless3d, - countless3d_generalized, - countless3d_dynamic_generalized, - ] - - for fn in methods: - test_all_cases(fn) \ No newline at end of file diff --git a/spaces/jie1/jie_test4/Sort_Scores.py b/spaces/jie1/jie_test4/Sort_Scores.py deleted file mode 100644 index c90648dfc2e9d090879a75d5978d99ced946f903..0000000000000000000000000000000000000000 --- a/spaces/jie1/jie_test4/Sort_Scores.py +++ /dev/null @@ -1,36 +0,0 @@ -import re -from tname import * -from Rfile import * - - -def Sort_Scores(file): - scores = [] - contents = j_reads(file.name) - - for i in range(2, len(contents)): - if i % 2 == 0: - # 使用正则表达式 - content = re.match('.*score=(\d.\d+?),', contents[i]) - if content: - score = content.group(1) - scores.append(float(score)) - - na = Name() - na = na + r"scores_sort.tsv" # 结果文件名称 - - # 按列表scores中元素的值进行排序,并返回元素对应索引序列 - sorted_id = [] - sorted_id = sorted(range(len(scores)), key=lambda k: scores[k], reverse=True) - - # 第一条序列和其他序列格式不一样,且第一条序列不需要排序,单独写入 - with open(na, "a") as f1: - f1.write(contents[0]) - f1.write(contents[1]) - - for i in range(0, len(scores)): - with open(na, "a") as f: - f.write(contents[sorted_id[i] * 2 + 2]) - # 由于文件前两行未参与排序,所以索引要+2 - f.write(contents[sorted_id[i] * 2 + 2 + 1]) - results = j_reads(na) - return str(results) diff --git a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/Fig3c_Mutacorrelation.py b/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/Fig3c_Mutacorrelation.py deleted file mode 100644 index 75d3228f036430ea5e482502bf510a1678b6a4ab..0000000000000000000000000000000000000000 --- a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/Fig3c_Mutacorrelation.py +++ /dev/null @@ -1,461 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# Author: LE YUAN - -import os -import math -import model -import torch -import json -import pickle -import numpy as np -from rdkit import Chem -from Bio import SeqIO -from collections import Counter -from collections import defaultdict -import matplotlib.pyplot as plt -import matplotlib.pyplot as plt -from matplotlib import rc -from scipy import stats -import seaborn as sns -import pandas as pd -from scipy.stats import ranksums -from sklearn.metrics import mean_squared_error,r2_score - - -fingerprint_dict = model.load_pickle('../../Data/input/fingerprint_dict.pickle') -atom_dict = model.load_pickle('../../Data/input/atom_dict.pickle') -bond_dict = model.load_pickle('../../Data/input/bond_dict.pickle') -edge_dict = model.load_pickle('../../Data/input/edge_dict.pickle') -word_dict = model.load_pickle('../../Data/input/sequence_dict.pickle') - -def split_sequence(sequence, ngram): - sequence = '-' + sequence + '=' - # print(sequence) - # words = [word_dict[sequence[i:i+ngram]] for i in range(len(sequence)-ngram+1)] - - words = list() - for i in range(len(sequence)-ngram+1) : - try : - words.append(word_dict[sequence[i:i+ngram]]) - except : - word_dict[sequence[i:i+ngram]] = 0 - words.append(word_dict[sequence[i:i+ngram]]) - - return np.array(words) - # return word_dict - -def create_atoms(mol): - """Create a list of atom (e.g., hydrogen and oxygen) IDs - considering the aromaticity.""" - # atom_dict = defaultdict(lambda: len(atom_dict)) - atoms = [a.GetSymbol() for a in mol.GetAtoms()] - # print(atoms) - for a in mol.GetAromaticAtoms(): - i = a.GetIdx() - atoms[i] = (atoms[i], 'aromatic') - atoms = [atom_dict[a] for a in atoms] - # atoms = list() - # for a in atoms : - # try: - # atoms.append(atom_dict[a]) - # except : - # atom_dict[a] = 0 - # atoms.append(atom_dict[a]) - - return np.array(atoms) - -def create_ijbonddict(mol): - """Create a dictionary, which each key is a node ID - and each value is the tuples of its neighboring node - and bond (e.g., single and double) IDs.""" - # bond_dict = defaultdict(lambda: len(bond_dict)) - i_jbond_dict = defaultdict(lambda: []) - for b in mol.GetBonds(): - i, j = b.GetBeginAtomIdx(), b.GetEndAtomIdx() - bond = bond_dict[str(b.GetBondType())] - i_jbond_dict[i].append((j, bond)) - i_jbond_dict[j].append((i, bond)) - return i_jbond_dict - -def extract_fingerprints(atoms, i_jbond_dict, radius): - """Extract the r-radius subgraphs (i.e., fingerprints) - from a molecular graph using Weisfeiler-Lehman algorithm.""" - - # fingerprint_dict = defaultdict(lambda: len(fingerprint_dict)) - # edge_dict = defaultdict(lambda: len(edge_dict)) - - if (len(atoms) == 1) or (radius == 0): - fingerprints = [fingerprint_dict[a] for a in atoms] - - else: - nodes = atoms - i_jedge_dict = i_jbond_dict - - for _ in range(radius): - - """Update each node ID considering its neighboring nodes and edges - (i.e., r-radius subgraphs or fingerprints).""" - fingerprints = [] - for i, j_edge in i_jedge_dict.items(): - neighbors = [(nodes[j], edge) for j, edge in j_edge] - fingerprint = (nodes[i], tuple(sorted(neighbors))) - # fingerprints.append(fingerprint_dict[fingerprint]) - # fingerprints.append(fingerprint_dict.get(fingerprint)) - try : - fingerprints.append(fingerprint_dict[fingerprint]) - except : - fingerprint_dict[fingerprint] = 0 - fingerprints.append(fingerprint_dict[fingerprint]) - - nodes = fingerprints - - """Also update each edge ID considering two nodes - on its both sides.""" - _i_jedge_dict = defaultdict(lambda: []) - for i, j_edge in i_jedge_dict.items(): - for j, edge in j_edge: - both_side = tuple(sorted((nodes[i], nodes[j]))) - # edge = edge_dict[(both_side, edge)] - # edge = edge_dict.get((both_side, edge)) - try : - edge = edge_dict[(both_side, edge)] - except : - edge_dict[(both_side, edge)] = 0 - edge = edge_dict[(both_side, edge)] - - _i_jedge_dict[i].append((j, edge)) - i_jedge_dict = _i_jedge_dict - - return np.array(fingerprints) - -def create_adjacency(mol): - adjacency = Chem.GetAdjacencyMatrix(mol) - return np.array(adjacency) - -def dump_dictionary(dictionary, filename): - with open(filename, 'wb') as file: - pickle.dump(dict(dictionary), file) - -def load_tensor(file_name, dtype): - return [dtype(d).to(device) for d in np.load(file_name + '.npy', allow_pickle=True)] - -class Predictor(object): - def __init__(self, model): - self.model = model - - def predict(self, data): - predicted_value = self.model.forward(data) - - return predicted_value - -def extract_wildtype_mutant() : - with open('../../Data/database/Kcat_combination_0918_wildtype_mutant.json', 'r') as infile : - Kcat_data = json.load(infile) - - entry_keys = list() - for data in Kcat_data : - # print(data['ECNumber']) - # print(data['Substrate']) - # print(data['Organism']) - - substrate = data['Substrate'] - organism = data['Organism'] - EC = data['ECNumber'] - entry_key = substrate + '&' + organism + '&' + EC - # print(entry_key.lower()) - entry_keys.append(entry_key) - - entry_dict = dict(Counter(entry_keys)) - # print(entry_dict) - - duplicated_keys = [key for key, value in entry_dict.items() if value > 1] - # print(duplicated_keys) - - duplicated_dict = {key:value for key, value in entry_dict.items() if value > 1} - # print(duplicated_dict) - # https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value - # print(sorted(duplicated_dict.items(), key=lambda x: x[1], reverse=True)[:30]) - duplicated_list = sorted(duplicated_dict.items(), key=lambda x: x[1], reverse=True)[:30] - - for duplicated in duplicated_list[:1] : - # print('The subtrate name:', duplicated[0]) - for data in Kcat_data : - # duplicated_one_entry = duplicated_list[0].split('&') - substrate = data['Substrate'] - organism = data['Organism'] - EC = data['ECNumber'] - one_entry = substrate + '&' + organism + '&' + EC - if one_entry == duplicated[0] : - enzyme_type = data['Type'] - Kcat_value = data['Value'] - # print('Substrate:', substrate) - # print('%s enzyme: %s' %(enzyme_type, Kcat_value)) - # print('----'*15+'\n') - - return duplicated_list - -def extract_wildtype_kcat(entry) : - with open('../../Data/database/Kcat_combination_0918_wildtype_mutant.json', 'r') as infile : - Kcat_data = json.load(infile) - - for data in Kcat_data : - substrate = data['Substrate'] - organism = data['Organism'] - EC = data['ECNumber'] - one_entry = substrate + '&' + organism + '&' + EC - if one_entry == entry : - enzyme_type = data['Type'] - if enzyme_type == 'wildtype' : - wildtype_kcat = float(data['Value']) - - if wildtype_kcat : - return wildtype_kcat - else : - return None - -def compare_prediction_wildtype_mutant() : - with open('../../Data/database/Kcat_combination_0918_wildtype_mutant.json', 'r') as infile : - Kcat_data = json.load(infile) - - wildtype_mutant_entries = extract_wildtype_mutant() - - # with open('../species/Saccharomyces_cerevisiaeForKcatPrediction2.txt', 'r') as infile : - # lines = infile.readlines()[1:] - - # print(len(lines)) # 6291 - # # print(lines[1]) - - # # proteinSeq = get_refSeq() - - fingerprint_dict = model.load_pickle('../../Data/input/fingerprint_dict.pickle') - atom_dict = model.load_pickle('../../Data/input/atom_dict.pickle') - bond_dict = model.load_pickle('../../Data/input/bond_dict.pickle') - word_dict = model.load_pickle('../../Data/input/sequence_dict.pickle') - n_fingerprint = len(fingerprint_dict) - n_word = len(word_dict) - # print(n_fingerprint) # 3958 - # print(n_word) # 8542 - - radius=2 - ngram=3 - - dim=10 - layer_gnn=3 - side=5 - window=11 - layer_cnn=3 - layer_output=3 - lr=1e-3 - lr_decay=0.5 - decay_interval=10 - weight_decay=1e-6 - iteration=100 - - if torch.cuda.is_available(): - device = torch.device('cuda') - else: - device = torch.device('cpu') - - # torch.manual_seed(1234) - Kcat_model = model.KcatPrediction(device, n_fingerprint, n_word, 2*dim, layer_gnn, window, layer_cnn, layer_output).to(device) - Kcat_model.load_state_dict(torch.load('../../Results/output/all--radius2--ngram3--dim20--layer_gnn3--window11--layer_cnn3--layer_output3--lr1e-3--lr_decay0.5--decay_interval10--weight_decay1e-6--iteration50', map_location=device)) - # print(state_dict.keys()) - # model.eval() - predictor = Predictor(Kcat_model) - - print('It\'s time to start the prediction!') - print('-----------------------------------') - - # prediction = predictor.predict(inputs) - - i = 0 - alldata = dict() - alldata['substrate'] = list() - alldata['experimental'] = list() - alldata['predicted'] = list() - - experimental_values = list() - predicted_values = list() - - substrate_enzymes = { - '7,8-Dihydrofolate': 'DHFR', - 'Glycerate 3-phosphate': 'PGDH', - 'L-Aspartate': 'AKIII', - 'Penicillin G': 'DAOCS', - 'Inosine': 'PNP', - 'Isopentenyl diphosphate': 'GGPPs' - } - - for wildtype_mutant_entry in wildtype_mutant_entries : - entry_names = wildtype_mutant_entry[0].split('&') - # print('This entry is:', entry_names) - # print('The total amount of wildtype and variant enzymes in the entry is:', wildtype_mutant_entry[1]) - - # experimental_values = list() - # predicted_values = list() - # wildtype_like = list() - # wildtype_decreased = list() - - if entry_names[0] in ['7,8-Dihydrofolate', 'Glycerate 3-phosphate', 'L-Aspartate', 'Penicillin G', 'Inosine', 'Isopentenyl diphosphate'] : - print('This entry is:', entry_names) - for data in Kcat_data : - # print(data) - # print(data['Substrate']) - substrate = data['Substrate'] - organism = data['Organism'] - EC = data['ECNumber'] - entry = substrate + '&' + organism + '&' + EC - - if entry == wildtype_mutant_entry[0] : - substrate_name = entry_names[0] - # alldata['substrate'].append(entry_names[0]) - alldata['substrate'].append(substrate_enzymes[substrate_name] + ' & ' + substrate_name) - wildtype_kcat = extract_wildtype_kcat(entry) - # print('wildtype kcat:', wildtype_kcat) - # print(data) - # if wildtype_kcat : - i += 1 - # print('This is', i, '---------------------------------------') - smiles = data['Smiles'] - sequence = data['Sequence'] - enzyme_type = data['Type'] - Kcat = data['Value'] - if "." not in smiles and float(Kcat) > 0: - # i += 1 - # print('This is',i) - - mol = Chem.AddHs(Chem.MolFromSmiles(smiles)) - atoms = create_atoms(mol) - # print(atoms) - i_jbond_dict = create_ijbonddict(mol) - # print(i_jbond_dict) - - fingerprints = extract_fingerprints(atoms, i_jbond_dict, radius) - # print(fingerprints) - # compounds.append(fingerprints) - - adjacency = create_adjacency(mol) - # print(adjacency) - # adjacencies.append(adjacency) - - words = split_sequence(sequence,ngram) - # print(words) - # proteins.append(words) - - fingerprints = torch.LongTensor(fingerprints) - adjacency = torch.FloatTensor(adjacency) - words = torch.LongTensor(words) - - inputs = [fingerprints, adjacency, words] - - value = float(data['Value']) - # print('Current kcat value:', value) - normalized_value = value/wildtype_kcat - # print('%.2f' % normalized_value) - # print(type(value)) - # print(type(normalized_value)) - experimental_values.append(math.log10(value)) - alldata['experimental'].append(math.log10(value)) - - prediction = predictor.predict(inputs) - Kcat_log_value = prediction.item() - Kcat_value = math.pow(2,Kcat_log_value) - # print(Kcat_value) - # print('%.2f' % normalized_value) - # print(type(Kcat_value)) - predicted_values.append(math.log10(Kcat_value)) - alldata['predicted'].append(math.log10(Kcat_value)) - - # correlation1, p_value1 = stats.pearsonr(experimental_values, predicted_values) - - # # https://blog.csdn.net/u012735708/article/details/84337262?utm_medium=distribute.pc_relevant.none- - # # task-blog-BlogCommendFromMachineLearnPai2-1.pc_relevant_is_cache&depth_1-utm_source= - # # distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.pc_relevant_is_cache - # r2 = r2_score(experimental_values,predicted_values) - # rmse = np.sqrt(mean_squared_error(experimental_values,predicted_values)) - - # print('r is %.4f' % correlation1) - # print('P value is', p_value1) - # # print('R2 is %.4f' % r2) - # # print('RMSE is %.4f' % rmse) - # # print('-----'*10 + '\n') - - - correlation, p_value = stats.pearsonr(experimental_values, predicted_values) - r2 = r2_score(experimental_values,predicted_values) - rmse = np.sqrt(mean_squared_error(experimental_values,predicted_values)) - - print('The overall r is %.4f' % correlation) - print('The overall P value is', p_value) - print('The overall R2 is %.4f' % r2) - print('The overall RMSE is %.4f' % rmse) - - # The overall r is 0.9418 - # The overall P value is 7.040747950580316e-92 - # The overall R2 is 0.8830 - # The overall RMSE is 0.4214 - - - # https://blog.csdn.net/weixin_38753213/article/details/109831543 - allData = pd.DataFrame(alldata) - - # fig, ax = plt.subplots(figsize=(4.0,2.8)) - # fig, ax = plt.subplots(figsize=(1.5,1.5)) - # ax = plt.figure(figsize=(1.5,1.5)) - plt.figure(figsize=(1.5,1.5)) - - # To solve the 'Helvetica' font cannot be used in PDF file - # https://stackoverflow.com/questions/59845568/the-pdf-backend-does-not-currently-support-the-selected-font - # rc('text', usetex=True) - rc('font',**{'family':'serif','serif':['Helvetica']}) - plt.rcParams['pdf.fonttype'] = 42 - # plt.rc('text', usetex=True) - - plt.axes([0.12,0.12,0.83,0.83]) - - plt.tick_params(direction='in') - plt.tick_params(which='major',length=1.5) - plt.tick_params(which='major',width=0.4) - - palette = ("#FF8C00", "#A034F0", "#159090", "#1051D6", '#0AB944', '#DF16B7') - - # scatter = sns.scatterplot(data=allData, x='experimental', y='predicted', hue='substrate', - # palette=palette, legend='full', ec='white', sizes=(1.5, 1.5, 1.5, 1.5, 1.5, 1.5), alpha=.7, ax=ax) - - # scatter = sns.scatterplot(data=allData, x='experimental', y='predicted', hue='substrate', - # palette=palette, ec='white', sizes=(1.5, 1.5, 1.5, 1.5, 1.5, 1.5), alpha=.7, ax=ax) - - scatter = sns.scatterplot(data=allData, x='experimental', y='predicted', hue='substrate', - palette=palette, ec='white', s=8, alpha=.7) - - scatter.get_legend().remove() - - plt.rcParams['font.family'] = 'Helvetica' - - scatter.set_xlabel("Experimental $k$$_\mathregular{cat}$ value", fontdict={'weight': 'normal', 'fontname': 'Helvetica', 'size': 7}, fontsize=7) - scatter.set_ylabel('Predicted $k$$_\mathregular{cat}$ value',fontdict={'weight': 'normal', 'fontname': 'Helvetica', 'size': 7},fontsize=7) - - plt.xticks([-5, -3, -1, 1, 3]) - plt.yticks([-5, -3, -1, 1, 3]) - plt.xticks(fontsize=6) - plt.yticks(fontsize=6) - - plt.plot([-5, -3, -1, 1, 3],[-5, -3, -1, 1, 3],color='b',linestyle='dashed',linewidth=1) # linewidth=0.75 - - ax = plt.gca() - ax.spines['bottom'].set_linewidth(0.5) - ax.spines['left'].set_linewidth(0.5) - ax.spines['top'].set_linewidth(0.5) - ax.spines['right'].set_linewidth(0.5) - - # scatter.legend(loc='best') - plt.legend(bbox_to_anchor=(1.01,1), frameon=False, fontsize=6) - plt.tight_layout() - - plt.savefig("../../Results/figures/Fig3c.pdf", dpi=400, bbox_inches = 'tight') - - -if __name__ == '__main__' : - compare_prediction_wildtype_mutant() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_gcm.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_gcm.py deleted file mode 100644 index 0519510769e6a9e24a3165e770b9c67747509ee2..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_gcm.py +++ /dev/null @@ -1,620 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin <helderijs@gmail.com> -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -""" -Galois/Counter Mode (GCM). -""" - -__all__ = ['GcmMode'] - -from binascii import unhexlify - -from Crypto.Util.py3compat import bord, _copy_bytes - -from Crypto.Util._raw_api import is_buffer - -from Crypto.Util.number import long_to_bytes, bytes_to_long -from Crypto.Hash import BLAKE2s -from Crypto.Random import get_random_bytes - -from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer, - create_string_buffer, get_raw_buffer, - SmartPointer, c_size_t, c_uint8_ptr) - -from Crypto.Util import _cpu_features - - -# C API by module implementing GHASH -_ghash_api_template = """ - int ghash_%imp%(uint8_t y_out[16], - const uint8_t block_data[], - size_t len, - const uint8_t y_in[16], - const void *exp_key); - int ghash_expand_%imp%(const uint8_t h[16], - void **ghash_tables); - int ghash_destroy_%imp%(void *ghash_tables); -""" - -def _build_impl(lib, postfix): - from collections import namedtuple - - funcs = ( "ghash", "ghash_expand", "ghash_destroy" ) - GHASH_Imp = namedtuple('_GHash_Imp', funcs) - try: - imp_funcs = [ getattr(lib, x + "_" + postfix) for x in funcs ] - except AttributeError: # Make sphinx stop complaining with its mocklib - imp_funcs = [ None ] * 3 - params = dict(zip(funcs, imp_funcs)) - return GHASH_Imp(**params) - - -def _get_ghash_portable(): - api = _ghash_api_template.replace("%imp%", "portable") - lib = load_pycryptodome_raw_lib("Crypto.Hash._ghash_portable", api) - result = _build_impl(lib, "portable") - return result -_ghash_portable = _get_ghash_portable() - - -def _get_ghash_clmul(): - """Return None if CLMUL implementation is not available""" - - if not _cpu_features.have_clmul(): - return None - try: - api = _ghash_api_template.replace("%imp%", "clmul") - lib = load_pycryptodome_raw_lib("Crypto.Hash._ghash_clmul", api) - result = _build_impl(lib, "clmul") - except OSError: - result = None - return result -_ghash_clmul = _get_ghash_clmul() - - -class _GHASH(object): - """GHASH function defined in NIST SP 800-38D, Algorithm 2. - - If X_1, X_2, .. X_m are the blocks of input data, the function - computes: - - X_1*H^{m} + X_2*H^{m-1} + ... + X_m*H - - in the Galois field GF(2^256) using the reducing polynomial - (x^128 + x^7 + x^2 + x + 1). - """ - - def __init__(self, subkey, ghash_c): - assert len(subkey) == 16 - - self.ghash_c = ghash_c - - self._exp_key = VoidPointer() - result = ghash_c.ghash_expand(c_uint8_ptr(subkey), - self._exp_key.address_of()) - if result: - raise ValueError("Error %d while expanding the GHASH key" % result) - - self._exp_key = SmartPointer(self._exp_key.get(), - ghash_c.ghash_destroy) - - # create_string_buffer always returns a string of zeroes - self._last_y = create_string_buffer(16) - - def update(self, block_data): - assert len(block_data) % 16 == 0 - - result = self.ghash_c.ghash(self._last_y, - c_uint8_ptr(block_data), - c_size_t(len(block_data)), - self._last_y, - self._exp_key.get()) - if result: - raise ValueError("Error %d while updating GHASH" % result) - - return self - - def digest(self): - return get_raw_buffer(self._last_y) - - -def enum(**enums): - return type('Enum', (), enums) - - -MacStatus = enum(PROCESSING_AUTH_DATA=1, PROCESSING_CIPHERTEXT=2) - - -class GcmMode(object): - """Galois Counter Mode (GCM). - - This is an Authenticated Encryption with Associated Data (`AEAD`_) mode. - It provides both confidentiality and authenticity. - - The header of the message may be left in the clear, if needed, and it will - still be subject to authentication. The decryption step tells the receiver - if the message comes from a source that really knowns the secret key. - Additionally, decryption detects if any part of the message - including the - header - has been modified or corrupted. - - This mode requires a *nonce*. - - This mode is only available for ciphers that operate on 128 bits blocks - (e.g. AES but not TDES). - - See `NIST SP800-38D`_. - - .. _`NIST SP800-38D`: http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf - .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html - - :undocumented: __init__ - """ - - def __init__(self, factory, key, nonce, mac_len, cipher_params, ghash_c): - - self.block_size = factory.block_size - if self.block_size != 16: - raise ValueError("GCM mode is only available for ciphers" - " that operate on 128 bits blocks") - - if len(nonce) == 0: - raise ValueError("Nonce cannot be empty") - - if not is_buffer(nonce): - raise TypeError("Nonce must be bytes, bytearray or memoryview") - - # See NIST SP 800 38D, 5.2.1.1 - if len(nonce) > 2**64 - 1: - raise ValueError("Nonce exceeds maximum length") - - - self.nonce = _copy_bytes(None, None, nonce) - """Nonce""" - - self._factory = factory - self._key = _copy_bytes(None, None, key) - self._tag = None # Cache for MAC tag - - self._mac_len = mac_len - if not (4 <= mac_len <= 16): - raise ValueError("Parameter 'mac_len' must be in the range 4..16") - - # Allowed transitions after initialization - self._next = ["update", "encrypt", "decrypt", - "digest", "verify"] - - self._no_more_assoc_data = False - - # Length of associated data - self._auth_len = 0 - - # Length of the ciphertext or plaintext - self._msg_len = 0 - - # Step 1 in SP800-38D, Algorithm 4 (encryption) - Compute H - # See also Algorithm 5 (decryption) - hash_subkey = factory.new(key, - self._factory.MODE_ECB, - **cipher_params - ).encrypt(b'\x00' * 16) - - # Step 2 - Compute J0 - if len(self.nonce) == 12: - j0 = self.nonce + b"\x00\x00\x00\x01" - else: - fill = (16 - (len(self.nonce) % 16)) % 16 + 8 - ghash_in = (self.nonce + - b'\x00' * fill + - long_to_bytes(8 * len(self.nonce), 8)) - j0 = _GHASH(hash_subkey, ghash_c).update(ghash_in).digest() - - # Step 3 - Prepare GCTR cipher for encryption/decryption - nonce_ctr = j0[:12] - iv_ctr = (bytes_to_long(j0) + 1) & 0xFFFFFFFF - self._cipher = factory.new(key, - self._factory.MODE_CTR, - initial_value=iv_ctr, - nonce=nonce_ctr, - **cipher_params) - - # Step 5 - Bootstrat GHASH - self._signer = _GHASH(hash_subkey, ghash_c) - - # Step 6 - Prepare GCTR cipher for GMAC - self._tag_cipher = factory.new(key, - self._factory.MODE_CTR, - initial_value=j0, - nonce=b"", - **cipher_params) - - # Cache for data to authenticate - self._cache = b"" - - self._status = MacStatus.PROCESSING_AUTH_DATA - - def update(self, assoc_data): - """Protect associated data - - If there is any associated data, the caller has to invoke - this function one or more times, before using - ``decrypt`` or ``encrypt``. - - By *associated data* it is meant any data (e.g. packet headers) that - will not be encrypted and will be transmitted in the clear. - However, the receiver is still able to detect any modification to it. - In GCM, the *associated data* is also called - *additional authenticated data* (AAD). - - If there is no associated data, this method must not be called. - - The caller may split associated data in segments of any size, and - invoke this method multiple times, each time with the next segment. - - :Parameters: - assoc_data : bytes/bytearray/memoryview - A piece of associated data. There are no restrictions on its size. - """ - - if "update" not in self._next: - raise TypeError("update() can only be called" - " immediately after initialization") - - self._next = ["update", "encrypt", "decrypt", - "digest", "verify"] - - self._update(assoc_data) - self._auth_len += len(assoc_data) - - # See NIST SP 800 38D, 5.2.1.1 - if self._auth_len > 2**64 - 1: - raise ValueError("Additional Authenticated Data exceeds maximum length") - - return self - - def _update(self, data): - assert(len(self._cache) < 16) - - if len(self._cache) > 0: - filler = min(16 - len(self._cache), len(data)) - self._cache += _copy_bytes(None, filler, data) - data = data[filler:] - - if len(self._cache) < 16: - return - - # The cache is exactly one block - self._signer.update(self._cache) - self._cache = b"" - - update_len = len(data) // 16 * 16 - self._cache = _copy_bytes(update_len, None, data) - if update_len > 0: - self._signer.update(data[:update_len]) - - def _pad_cache_and_update(self): - assert(len(self._cache) < 16) - - # The authenticated data A is concatenated to the minimum - # number of zero bytes (possibly none) such that the - # - ciphertext C is aligned to the 16 byte boundary. - # See step 5 in section 7.1 - # - ciphertext C is aligned to the 16 byte boundary. - # See step 6 in section 7.2 - len_cache = len(self._cache) - if len_cache > 0: - self._update(b'\x00' * (16 - len_cache)) - - def encrypt(self, plaintext, output=None): - """Encrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have encrypted a message - you cannot encrypt (or decrypt) another message using the same - object. - - The data to encrypt can be broken up in two or - more pieces and `encrypt` can be called multiple times. - - That is, the statement: - - >>> c.encrypt(a) + c.encrypt(b) - - is equivalent to: - - >>> c.encrypt(a+b) - - This function does not add any padding to the plaintext. - - :Parameters: - plaintext : bytes/bytearray/memoryview - The piece of data to encrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the ciphertext must be written to. - If ``None``, the ciphertext is returned. - :Return: - If ``output`` is ``None``, the ciphertext as ``bytes``. - Otherwise, ``None``. - """ - - if "encrypt" not in self._next: - raise TypeError("encrypt() can only be called after" - " initialization or an update()") - self._next = ["encrypt", "digest"] - - ciphertext = self._cipher.encrypt(plaintext, output=output) - - if self._status == MacStatus.PROCESSING_AUTH_DATA: - self._pad_cache_and_update() - self._status = MacStatus.PROCESSING_CIPHERTEXT - - self._update(ciphertext if output is None else output) - self._msg_len += len(plaintext) - - # See NIST SP 800 38D, 5.2.1.1 - if self._msg_len > 2**39 - 256: - raise ValueError("Plaintext exceeds maximum length") - - return ciphertext - - def decrypt(self, ciphertext, output=None): - """Decrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have decrypted a message - you cannot decrypt (or encrypt) another message with the same - object. - - The data to decrypt can be broken up in two or - more pieces and `decrypt` can be called multiple times. - - That is, the statement: - - >>> c.decrypt(a) + c.decrypt(b) - - is equivalent to: - - >>> c.decrypt(a+b) - - This function does not remove any padding from the plaintext. - - :Parameters: - ciphertext : bytes/bytearray/memoryview - The piece of data to decrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the plaintext must be written to. - If ``None``, the plaintext is returned. - :Return: - If ``output`` is ``None``, the plaintext as ``bytes``. - Otherwise, ``None``. - """ - - if "decrypt" not in self._next: - raise TypeError("decrypt() can only be called" - " after initialization or an update()") - self._next = ["decrypt", "verify"] - - if self._status == MacStatus.PROCESSING_AUTH_DATA: - self._pad_cache_and_update() - self._status = MacStatus.PROCESSING_CIPHERTEXT - - self._update(ciphertext) - self._msg_len += len(ciphertext) - - return self._cipher.decrypt(ciphertext, output=output) - - def digest(self): - """Compute the *binary* MAC tag in an AEAD mode. - - The caller invokes this function at the very end. - - This method returns the MAC that shall be sent to the receiver, - together with the ciphertext. - - :Return: the MAC, as a byte string. - """ - - if "digest" not in self._next: - raise TypeError("digest() cannot be called when decrypting" - " or validating a message") - self._next = ["digest"] - - return self._compute_mac() - - def _compute_mac(self): - """Compute MAC without any FSM checks.""" - - if self._tag: - return self._tag - - # Step 5 in NIST SP 800-38D, Algorithm 4 - Compute S - self._pad_cache_and_update() - self._update(long_to_bytes(8 * self._auth_len, 8)) - self._update(long_to_bytes(8 * self._msg_len, 8)) - s_tag = self._signer.digest() - - # Step 6 - Compute T - self._tag = self._tag_cipher.encrypt(s_tag)[:self._mac_len] - - return self._tag - - def hexdigest(self): - """Compute the *printable* MAC tag. - - This method is like `digest`. - - :Return: the MAC, as a hexadecimal string. - """ - return "".join(["%02x" % bord(x) for x in self.digest()]) - - def verify(self, received_mac_tag): - """Validate the *binary* MAC tag. - - The caller invokes this function at the very end. - - This method checks if the decrypted message is indeed valid - (that is, if the key is correct) and it has not been - tampered with while in transit. - - :Parameters: - received_mac_tag : bytes/bytearray/memoryview - This is the *binary* MAC, as received from the sender. - :Raises ValueError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - if "verify" not in self._next: - raise TypeError("verify() cannot be called" - " when encrypting a message") - self._next = ["verify"] - - secret = get_random_bytes(16) - - mac1 = BLAKE2s.new(digest_bits=160, key=secret, - data=self._compute_mac()) - mac2 = BLAKE2s.new(digest_bits=160, key=secret, - data=received_mac_tag) - - if mac1.digest() != mac2.digest(): - raise ValueError("MAC check failed") - - def hexverify(self, hex_mac_tag): - """Validate the *printable* MAC tag. - - This method is like `verify`. - - :Parameters: - hex_mac_tag : string - This is the *printable* MAC, as received from the sender. - :Raises ValueError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - self.verify(unhexlify(hex_mac_tag)) - - def encrypt_and_digest(self, plaintext, output=None): - """Perform encrypt() and digest() in one step. - - :Parameters: - plaintext : bytes/bytearray/memoryview - The piece of data to encrypt. - :Keywords: - output : bytearray/memoryview - The location where the ciphertext must be written to. - If ``None``, the ciphertext is returned. - :Return: - a tuple with two items: - - - the ciphertext, as ``bytes`` - - the MAC tag, as ``bytes`` - - The first item becomes ``None`` when the ``output`` parameter - specified a location for the result. - """ - - return self.encrypt(plaintext, output=output), self.digest() - - def decrypt_and_verify(self, ciphertext, received_mac_tag, output=None): - """Perform decrypt() and verify() in one step. - - :Parameters: - ciphertext : bytes/bytearray/memoryview - The piece of data to decrypt. - received_mac_tag : byte string - This is the *binary* MAC, as received from the sender. - :Keywords: - output : bytearray/memoryview - The location where the plaintext must be written to. - If ``None``, the plaintext is returned. - :Return: the plaintext as ``bytes`` or ``None`` when the ``output`` - parameter specified a location for the result. - :Raises ValueError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - plaintext = self.decrypt(ciphertext, output=output) - self.verify(received_mac_tag) - return plaintext - - -def _create_gcm_cipher(factory, **kwargs): - """Create a new block cipher, configured in Galois Counter Mode (GCM). - - :Parameters: - factory : module - A block cipher module, taken from `Crypto.Cipher`. - The cipher must have block length of 16 bytes. - GCM has been only defined for `Crypto.Cipher.AES`. - - :Keywords: - key : bytes/bytearray/memoryview - The secret key to use in the symmetric cipher. - It must be 16 (e.g. *AES-128*), 24 (e.g. *AES-192*) - or 32 (e.g. *AES-256*) bytes long. - - nonce : bytes/bytearray/memoryview - A value that must never be reused for any other encryption. - - There are no restrictions on its length, - but it is recommended to use at least 16 bytes. - - The nonce shall never repeat for two - different messages encrypted with the same key, - but it does not need to be random. - - If not provided, a 16 byte nonce will be randomly created. - - mac_len : integer - Length of the MAC, in bytes. - It must be no larger than 16 bytes (which is the default). - """ - - try: - key = kwargs.pop("key") - except KeyError as e: - raise TypeError("Missing parameter:" + str(e)) - - nonce = kwargs.pop("nonce", None) - if nonce is None: - nonce = get_random_bytes(16) - mac_len = kwargs.pop("mac_len", 16) - - # Not documented - only used for testing - use_clmul = kwargs.pop("use_clmul", True) - if use_clmul and _ghash_clmul: - ghash_c = _ghash_clmul - else: - ghash_c = _ghash_portable - - return GcmMode(factory, key, nonce, mac_len, kwargs, ghash_c) diff --git a/spaces/joheras/glove-relations/README.md b/spaces/joheras/glove-relations/README.md deleted file mode 100644 index 9dd5ad358beaa8a84e1e0e93cb028bc85c1a3f59..0000000000000000000000000000000000000000 --- a/spaces/joheras/glove-relations/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Glove Relations -emoji: 💻 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/modules/lstm.py b/spaces/jordonpeter01/MusicGen2/audiocraft/modules/lstm.py deleted file mode 100644 index c0866175950c1ca4f6cca98649525e6481853bba..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/modules/lstm.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from torch import nn - - -class StreamableLSTM(nn.Module): - """LSTM without worrying about the hidden state, nor the layout of the data. - Expects input as convolutional layout. - """ - def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True): - super().__init__() - self.skip = skip - self.lstm = nn.LSTM(dimension, dimension, num_layers) - - def forward(self, x): - x = x.permute(2, 0, 1) - y, _ = self.lstm(x) - if self.skip: - y = y + x - y = y.permute(1, 2, 0) - return y diff --git a/spaces/jotap12/enso/README.md b/spaces/jotap12/enso/README.md deleted file mode 100644 index 61361dc66cce66a5ca2530219be9edcf18a0f212..0000000000000000000000000000000000000000 --- a/spaces/jotap12/enso/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Enso -emoji: 📉 -colorFrom: red -colorTo: yellow -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/julien-c/sveltekit-demo/build/_app/assets/pages/index.svelte-c63fe1c6.css b/spaces/julien-c/sveltekit-demo/build/_app/assets/pages/index.svelte-c63fe1c6.css deleted file mode 100644 index 1b45f8a7fb99f4f657a11c0ea30eac9ee8ba1cf1..0000000000000000000000000000000000000000 --- a/spaces/julien-c/sveltekit-demo/build/_app/assets/pages/index.svelte-c63fe1c6.css +++ /dev/null @@ -1 +0,0 @@ -.counter.svelte-139m1ow.svelte-139m1ow{display:flex;border-top:1px solid rgba(0,0,0,.1);border-bottom:1px solid rgba(0,0,0,.1);margin:1rem 0}.counter.svelte-139m1ow button.svelte-139m1ow{width:2em;padding:0;display:flex;align-items:center;justify-content:center;border:0;background-color:transparent;color:var(--text-color);font-size:2rem}.counter.svelte-139m1ow button.svelte-139m1ow:hover{background-color:var(--secondary-color)}svg.svelte-139m1ow.svelte-139m1ow{width:25%;height:25%}path.svelte-139m1ow.svelte-139m1ow{vector-effect:non-scaling-stroke;stroke-width:2px;stroke:var(--text-color)}.counter-viewport.svelte-139m1ow.svelte-139m1ow{width:8em;height:4em;overflow:hidden;text-align:center;position:relative}.counter-viewport.svelte-139m1ow strong.svelte-139m1ow{position:absolute;display:flex;width:100%;height:100%;font-weight:400;color:var(--accent-color);font-size:4rem;align-items:center;justify-content:center}.counter-digits.svelte-139m1ow.svelte-139m1ow{position:absolute;width:100%;height:100%}section.svelte-mjk9ig.svelte-mjk9ig{display:flex;flex-direction:column;justify-content:center;align-items:center;flex:1}h1.svelte-mjk9ig.svelte-mjk9ig{width:100%}.welcome.svelte-mjk9ig.svelte-mjk9ig{position:relative;width:100%;height:0;padding:0 0 24.16992%}.welcome.svelte-mjk9ig img.svelte-mjk9ig{position:absolute;width:100%;height:100%;top:0;display:block} diff --git a/spaces/ka1kuk/fastapi/g4f/__init__.py b/spaces/ka1kuk/fastapi/g4f/__init__.py deleted file mode 100644 index a0b4bac6aa4de9c0449095a3874c2cb9716169d7..0000000000000000000000000000000000000000 --- a/spaces/ka1kuk/fastapi/g4f/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -import sys -from . import Provider -from g4f.models import Model, ModelUtils - - -class ChatCompletion: - @staticmethod - def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and provider.needs_auth and not auth: - print( - f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - if isinstance(model, str): - try: - model = ModelUtils.convert[model] - except KeyError: - raise Exception(f'The model: {model} does not exist') - - engine = model.best_provider if not provider else provider - - if not engine.supports_stream and stream == True: - print( - f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr) - sys.exit(1) - - print(f'Using {engine.__name__} provider') - - return (engine._create_completion(model.name, messages, stream, **kwargs) - if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) diff --git a/spaces/keras-io/WGAN-GP/app.py b/spaces/keras-io/WGAN-GP/app.py deleted file mode 100644 index 7fd92960d7c67ca1c505e0f87c249b23c1b502c3..0000000000000000000000000000000000000000 --- a/spaces/keras-io/WGAN-GP/app.py +++ /dev/null @@ -1,45 +0,0 @@ -from huggingface_hub import from_pretrained_keras -import matplotlib.pyplot as plt -from math import sqrt, ceil -import tensorflow as tf -import gradio as gr -import numpy as np - -model1 = tf.keras.models.load_model("mnist.h5", compile=False) -model2 = from_pretrained_keras("keras-io/WGAN-GP") - -title = "WGAN-GP" -description = "Image Generation(Fashion Mnist and Handwritten Digits) Using WGAN" -article = """ -<p style='text-align: center'> - <a href='https://keras.io/examples/generative/wgan_gp/' target='_blank'>Keras Example given by A_K_Nain</a> - <br> - Space by Gitesh Chawda - </p> - """ - -def Predict(model, num_images): - random_latent_vectors = tf.random.normal(shape=(int(num_images), 128)) - predictions = model(random_latent_vectors) - num = ceil(sqrt(num_images)) - images = np.zeros((28*num, 28*num), dtype=float) - n = 0 - for i in range(num): - for j in range(num): - if n == num_images: - break - images[i* 28 : (i+1)*28, j*28 : (j+1)*28] = predictions[n, :, :, 0] - n += 1 - return images - -def inference(num_images, Choose: str): - if Choose == 'Fashion_mnist': - result = Predict(model2, num_images) - else: - result = Predict(model1, num_images) - return result - -inputs = [gr.inputs.Number(label="number of images"), gr.inputs.Radio(['Fashion_mnist', 'Handwritten_digits_mnist'])] -outputs = gr.outputs.Image(label="Output Image") -examples = [[4,"Handwritten_digits_mnist"], [6,"Handwritten_digits_mnist"],[10,"Fashion_mnist"]] -gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch() \ No newline at end of file diff --git a/spaces/kevinwang676/Bark-with-Voice-Cloning/app.py b/spaces/kevinwang676/Bark-with-Voice-Cloning/app.py deleted file mode 100644 index 4c9401f32554685297dcd8a22aff248418d65060..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bark-with-Voice-Cloning/app.py +++ /dev/null @@ -1,674 +0,0 @@ -from cProfile import label -import dataclasses -from distutils.command.check import check -from doctest import Example -import gradio as gr -import os -import sys -import numpy as np -import logging -import torch -import pytorch_seed -import time - - -import math -import tempfile -from typing import Optional, Tuple, Union - -import matplotlib.pyplot as plt -from loguru import logger -from PIL import Image -from torch import Tensor -from torchaudio.backend.common import AudioMetaData - -from df import config -from df.enhance import enhance, init_df, load_audio, save_audio -from df.io import resample - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model, df, _ = init_df("./DeepFilterNet2", config_allow_defaults=True) -model = model.to(device=device).eval() - -fig_noisy: plt.Figure -fig_enh: plt.Figure -ax_noisy: plt.Axes -ax_enh: plt.Axes -fig_noisy, ax_noisy = plt.subplots(figsize=(15.2, 4)) -fig_noisy.set_tight_layout(True) -fig_enh, ax_enh = plt.subplots(figsize=(15.2, 4)) -fig_enh.set_tight_layout(True) - -NOISES = { - "None": None, - "Kitchen": "samples/dkitchen.wav", - "Living Room": "samples/dliving.wav", - "River": "samples/nriver.wav", - "Cafe": "samples/scafe.wav", -} - - -from xml.sax import saxutils -from bark.api import generate_with_settings -from bark.api import save_as_prompt -from util.settings import Settings -#import nltk - -from bark import SAMPLE_RATE -from cloning.clonevoice import clone_voice -from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode -from scipy.io.wavfile import write as write_wav -from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml -from datetime import datetime -from tqdm.auto import tqdm -from util.helper import create_filename, add_id3_tag -from swap_voice import swap_voice_from_audio -from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics -from training.train import training_prepare_files, train - - -# Denoise - -def mix_at_snr(clean, noise, snr, eps=1e-10): - """Mix clean and noise signal at a given SNR. - Args: - clean: 1D Tensor with the clean signal to mix. - noise: 1D Tensor of shape. - snr: Signal to noise ratio. - Returns: - clean: 1D Tensor with gain changed according to the snr. - noise: 1D Tensor with the combined noise channels. - mix: 1D Tensor with added clean and noise signals. - """ - clean = torch.as_tensor(clean).mean(0, keepdim=True) - noise = torch.as_tensor(noise).mean(0, keepdim=True) - if noise.shape[1] < clean.shape[1]: - noise = noise.repeat((1, int(math.ceil(clean.shape[1] / noise.shape[1])))) - max_start = int(noise.shape[1] - clean.shape[1]) - start = torch.randint(0, max_start, ()).item() if max_start > 0 else 0 - logger.debug(f"start: {start}, {clean.shape}") - noise = noise[:, start : start + clean.shape[1]] - E_speech = torch.mean(clean.pow(2)) + eps - E_noise = torch.mean(noise.pow(2)) - K = torch.sqrt((E_noise / E_speech) * 10 ** (snr / 10) + eps) - noise = noise / K - mixture = clean + noise - logger.debug("mixture: {mixture.shape}") - assert torch.isfinite(mixture).all() - max_m = mixture.abs().max() - if max_m > 1: - logger.warning(f"Clipping detected during mixing. Reducing gain by {1/max_m}") - clean, noise, mixture = clean / max_m, noise / max_m, mixture / max_m - return clean, noise, mixture - - -def load_audio_gradio( - audio_or_file: Union[None, str, Tuple[int, np.ndarray]], sr: int -) -> Optional[Tuple[Tensor, AudioMetaData]]: - if audio_or_file is None: - return None - if isinstance(audio_or_file, str): - if audio_or_file.lower() == "none": - return None - # First try default format - audio, meta = load_audio(audio_or_file, sr) - else: - meta = AudioMetaData(-1, -1, -1, -1, "") - assert isinstance(audio_or_file, (tuple, list)) - meta.sample_rate, audio_np = audio_or_file - # Gradio documentation says, the shape is [samples, 2], but apparently sometimes its not. - audio_np = audio_np.reshape(audio_np.shape[0], -1).T - if audio_np.dtype == np.int16: - audio_np = (audio_np / (1 << 15)).astype(np.float32) - elif audio_np.dtype == np.int32: - audio_np = (audio_np / (1 << 31)).astype(np.float32) - audio = resample(torch.from_numpy(audio_np), meta.sample_rate, sr) - return audio, meta - - -def demo_fn(speech_upl: str, noise_type: str, snr: int, mic_input: str): - if mic_input: - speech_upl = mic_input - sr = config("sr", 48000, int, section="df") - logger.info(f"Got parameters speech_upl: {speech_upl}, noise: {noise_type}, snr: {snr}") - snr = int(snr) - noise_fn = NOISES[noise_type] - meta = AudioMetaData(-1, -1, -1, -1, "") - max_s = 1000 # limit to 10 seconds - if speech_upl is not None: - sample, meta = load_audio(speech_upl, sr) - max_len = max_s * sr - if sample.shape[-1] > max_len: - start = torch.randint(0, sample.shape[-1] - max_len, ()).item() - sample = sample[..., start : start + max_len] - else: - sample, meta = load_audio("samples/p232_013_clean.wav", sr) - sample = sample[..., : max_s * sr] - if sample.dim() > 1 and sample.shape[0] > 1: - assert ( - sample.shape[1] > sample.shape[0] - ), f"Expecting channels first, but got {sample.shape}" - sample = sample.mean(dim=0, keepdim=True) - logger.info(f"Loaded sample with shape {sample.shape}") - if noise_fn is not None: - noise, _ = load_audio(noise_fn, sr) # type: ignore - logger.info(f"Loaded noise with shape {noise.shape}") - _, _, sample = mix_at_snr(sample, noise, snr) - logger.info("Start denoising audio") - enhanced = enhance(model, df, sample) - logger.info("Denoising finished") - lim = torch.linspace(0.0, 1.0, int(sr * 0.15)).unsqueeze(0) - lim = torch.cat((lim, torch.ones(1, enhanced.shape[1] - lim.shape[1])), dim=1) - enhanced = enhanced * lim - if meta.sample_rate != sr: - enhanced = resample(enhanced, sr, meta.sample_rate) - sample = resample(sample, sr, meta.sample_rate) - sr = meta.sample_rate - enhanced_wav = tempfile.NamedTemporaryFile(suffix="enhanced.wav", delete=False).name - save_audio(enhanced_wav, enhanced, sr) - logger.info(f"saved audios: {enhanced_wav}") - ax_noisy.clear() - ax_enh.clear() - # noisy_wav = gr.make_waveform(noisy_fn, bar_count=200) - # enh_wav = gr.make_waveform(enhanced_fn, bar_count=200) - return enhanced_wav - - -def specshow( - spec, - ax=None, - title=None, - xlabel=None, - ylabel=None, - sr=48000, - n_fft=None, - hop=None, - t=None, - f=None, - vmin=-100, - vmax=0, - xlim=None, - ylim=None, - cmap="inferno", -): - """Plots a spectrogram of shape [F, T]""" - spec_np = spec.cpu().numpy() if isinstance(spec, torch.Tensor) else spec - if ax is not None: - set_title = ax.set_title - set_xlabel = ax.set_xlabel - set_ylabel = ax.set_ylabel - set_xlim = ax.set_xlim - set_ylim = ax.set_ylim - else: - ax = plt - set_title = plt.title - set_xlabel = plt.xlabel - set_ylabel = plt.ylabel - set_xlim = plt.xlim - set_ylim = plt.ylim - if n_fft is None: - if spec.shape[0] % 2 == 0: - n_fft = spec.shape[0] * 2 - else: - n_fft = (spec.shape[0] - 1) * 2 - hop = hop or n_fft // 4 - if t is None: - t = np.arange(0, spec_np.shape[-1]) * hop / sr - if f is None: - f = np.arange(0, spec_np.shape[0]) * sr // 2 / (n_fft // 2) / 1000 - im = ax.pcolormesh( - t, f, spec_np, rasterized=True, shading="auto", vmin=vmin, vmax=vmax, cmap=cmap - ) - if title is not None: - set_title(title) - if xlabel is not None: - set_xlabel(xlabel) - if ylabel is not None: - set_ylabel(ylabel) - if xlim is not None: - set_xlim(xlim) - if ylim is not None: - set_ylim(ylim) - return im - - -def spec_im( - audio: torch.Tensor, - figsize=(15, 5), - colorbar=False, - colorbar_format=None, - figure=None, - labels=True, - **kwargs, -) -> Image: - audio = torch.as_tensor(audio) - if labels: - kwargs.setdefault("xlabel", "Time [s]") - kwargs.setdefault("ylabel", "Frequency [Hz]") - n_fft = kwargs.setdefault("n_fft", 1024) - hop = kwargs.setdefault("hop", 512) - w = torch.hann_window(n_fft, device=audio.device) - spec = torch.stft(audio, n_fft, hop, window=w, return_complex=False) - spec = spec.div_(w.pow(2).sum()) - spec = torch.view_as_complex(spec).abs().clamp_min(1e-12).log10().mul(10) - kwargs.setdefault("vmax", max(0.0, spec.max().item())) - - if figure is None: - figure = plt.figure(figsize=figsize) - figure.set_tight_layout(True) - if spec.dim() > 2: - spec = spec.squeeze(0) - im = specshow(spec, **kwargs) - if colorbar: - ckwargs = {} - if "ax" in kwargs: - if colorbar_format is None: - if kwargs.get("vmin", None) is not None or kwargs.get("vmax", None) is not None: - colorbar_format = "%+2.0f dB" - ckwargs = {"ax": kwargs["ax"]} - plt.colorbar(im, format=colorbar_format, **ckwargs) - figure.canvas.draw() - return Image.frombytes("RGB", figure.canvas.get_width_height(), figure.canvas.tostring_rgb()) - - -def toggle(choice): - if choice == "mic": - return gr.update(visible=True, value=None), gr.update(visible=False, value=None) - else: - return gr.update(visible=False, value=None), gr.update(visible=True, value=None) - -# Bark - -settings = Settings('config.yaml') - -def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)): - # Chunk the text into smaller pieces then combine the generated audio - - # generation settings - if selected_speaker == 'None': - selected_speaker = None - - voice_name = selected_speaker - - if text == None or len(text) < 1: - if selected_speaker == None: - raise gr.Error('No text entered!') - - # Extract audio data from speaker if no text and speaker selected - voicedata = _load_history_prompt(voice_name) - audio_arr = codec_decode(voicedata["fine_prompt"]) - result = create_filename(settings.output_folder_path, "None", "extract",".wav") - save_wav(audio_arr, result) - return result - - if batchcount < 1: - batchcount = 1 - - - silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence - silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence - use_last_generation_as_history = "Use last generation as history" in complete_settings - save_last_generation = "Save generation as Voice" in complete_settings - for l in range(batchcount): - currentseed = seed - if seed != None and seed > 2**32 - 1: - logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random") - currentseed = None - if currentseed == None or currentseed <= 0: - currentseed = np.random.default_rng().integers(1, 2**32 - 1) - assert(0 < currentseed and currentseed < 2**32) - - progress(0, desc="Generating") - - full_generation = None - - all_parts = [] - complete_text = "" - text = text.lstrip() - if is_ssml(text): - list_speak = create_clips_from_ssml(text) - prev_speaker = None - for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)): - selected_speaker = clip[0] - # Add pause break between speakers - if i > 0 and selected_speaker != prev_speaker: - all_parts += [silencelong.copy()] - prev_speaker = selected_speaker - text = clip[1] - text = saxutils.unescape(text) - if selected_speaker == "None": - selected_speaker = None - - print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`") - complete_text += text - with pytorch_seed.SavedRNG(currentseed): - audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - currentseed = torch.random.initial_seed() - if len(list_speak) > 1: - filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav") - save_wav(audio_array, filename) - add_id3_tag(filename, text, selected_speaker, currentseed) - - all_parts += [audio_array] - else: - texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length) - for i, text in tqdm(enumerate(texts), total=len(texts)): - print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`") - complete_text += text - if quick_generation == True: - with pytorch_seed.SavedRNG(currentseed): - audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - currentseed = torch.random.initial_seed() - else: - full_output = use_last_generation_as_history or save_last_generation - if full_output: - full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True) - else: - audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - - # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format - # audio_array = (audio_array * 32767).astype(np.int16) - - if len(texts) > 1: - filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav") - save_wav(audio_array, filename) - add_id3_tag(filename, text, selected_speaker, currentseed) - - if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True): - # save to npz - voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz") - save_as_prompt(voice_name, full_generation) - if use_last_generation_as_history: - selected_speaker = voice_name - - all_parts += [audio_array] - # Add short pause between sentences - if text[-1] in "!?.\n" and i > 1: - all_parts += [silenceshort.copy()] - - # save & play audio - result = create_filename(settings.output_folder_path, currentseed, "final",".wav") - save_wav(np.concatenate(all_parts), result) - # write id3 tag with text truncated to 60 chars, as a precaution... - add_id3_tag(result, complete_text, selected_speaker, currentseed) - - return result - - - -def save_wav(audio_array, filename): - write_wav(filename, SAMPLE_RATE, audio_array) - -def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt): - np.savez_compressed( - filename, - semantic_prompt=semantic_prompt, - coarse_prompt=coarse_prompt, - fine_prompt=fine_prompt - ) - - -def on_quick_gen_changed(checkbox): - if checkbox == False: - return gr.CheckboxGroup.update(visible=True) - return gr.CheckboxGroup.update(visible=False) - -def delete_output_files(checkbox_state): - if checkbox_state: - outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path) - if os.path.exists(outputs_folder): - purgedir(outputs_folder) - return False - - -# https://stackoverflow.com/a/54494779 -def purgedir(parent): - for root, dirs, files in os.walk(parent): - for item in files: - # Delete subordinate files - filespec = os.path.join(root, item) - os.unlink(filespec) - for item in dirs: - # Recursively perform this operation for subordinate directories - purgedir(os.path.join(root, item)) - -def convert_text_to_ssml(text, selected_speaker): - return build_ssml(text, selected_speaker) - - -def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)): - if selected_step == prepare_training_list[0]: - prepare_semantics_from_text() - else: - prepare_wavs_from_semantics() - return None - - -def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)): - training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt") - train("./training/data/", save_model_epoch, max_epochs) - return None - - - -def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker): - settings.selected_theme = themes - settings.server_name = input_server_name - settings.server_port = input_server_port - settings.server_share = input_server_public - settings.input_text_desired_length = input_desired_len - settings.input_text_max_length = input_max_len - settings.silence_sentence = input_silence_break - settings.silence_speaker = input_silence_speaker - settings.save() - -def restart(): - global restart_server - restart_server = True - - -def create_version_html(): - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - versions_html = f""" -python: <span title="{sys.version}">{python_version}</span> - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -gradio: {gr.__version__} -""" - return versions_html - - - -logger = logging.getLogger(__name__) -APPTITLE = "Bark Voice Cloning UI" - - -autolaunch = False - -if len(sys.argv) > 1: - autolaunch = "-autolaunch" in sys.argv - -if torch.cuda.is_available() == False: - os.environ['BARK_FORCE_CPU'] = 'True' - logger.warning("No CUDA detected, fallback to CPU!") - -print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}') -print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}') -print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}') -print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}') -print(f'autolaunch={autolaunch}\n\n') - -#print("Updating nltk\n") -#nltk.download('punkt') - -print("Preloading Models\n") -preload_models() - -available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"] -tokenizer_language_list = ["de","en", "pl"] -prepare_training_list = ["Step 1: Semantics from Text","Step 2: WAV from Semantics"] - -seed = -1 -server_name = settings.server_name -if len(server_name) < 1: - server_name = None -server_port = settings.server_port -if server_port <= 0: - server_port = None -global run_server -global restart_server - -run_server = True - -while run_server: - # Collect all existing speakers/voices in dir - speakers_list = [] - - for root, dirs, files in os.walk("./bark/assets/prompts"): - for file in files: - if file.endswith(".npz"): - pathpart = root.replace("./bark/assets/prompts", "") - name = os.path.join(pathpart, file[:-4]) - if name.startswith("/") or name.startswith("\\"): - name = name[1:] - speakers_list.append(name) - - speakers_list = sorted(speakers_list, key=lambda x: x.lower()) - speakers_list.insert(0, 'None') - - print(f'Launching {APPTITLE} Server') - - # Create Gradio Blocks - - with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui: - gr.Markdown("# <center>🐶🎶⭐ - Bark Voice Cloning</center>") - gr.Markdown("## <center>🤗 - If you like this space, please star my [github repo](https://github.com/KevinWang676/Bark-Voice-Cloning)</center>") - gr.Markdown("### <center>🎡 - Based on [bark-gui](https://github.com/C0untFloyd/bark-gui)</center>") - gr.Markdown(f""" You can duplicate and use it with a GPU: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a> - or open in [Colab](https://colab.research.google.com/github/KevinWang676/Bark-Voice-Cloning/blob/main/Bark_Voice_Cloning.ipynb) for quick start 🌟 P.S. Voice cloning needs a GPU, but TTS doesn't 😄 - """) - - with gr.Tab("🎙️ - Clone Voice"): - with gr.Row(): - input_audio_filename = gr.Audio(label="Input audio.wav", source="upload", type="filepath") - #transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...") - with gr.Row(): - with gr.Column(): - initialname = "/home/user/app/bark/assets/prompts/file" - output_voice = gr.Textbox(label="Filename of trained Voice (do not change the initial name)", lines=1, placeholder=initialname, value=initialname, visible=False) - with gr.Column(): - tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1], visible=False) - with gr.Row(): - clone_voice_button = gr.Button("Create Voice", variant="primary") - with gr.Row(): - dummy = gr.Text(label="Progress") - npz_file = gr.File(label=".npz file") - speakers_list.insert(0, npz_file) # add prompt - - with gr.Tab("🎵 - TTS"): - with gr.Row(): - with gr.Column(): - placeholder = "Enter text here." - input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder) - convert_to_ssml_button = gr.Button("Convert Input Text to SSML") - with gr.Column(): - seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1) - batchcount = gr.Number(label="Batch count", precision=0, value=1) - - with gr.Row(): - with gr.Column(): - gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)") - speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)") - - with gr.Column(): - text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative") - waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative") - - with gr.Row(): - with gr.Column(): - quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True) - settings_checkboxes = ["Use last generation as history", "Save generation as Voice"] - complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False) - with gr.Column(): - eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability") - - with gr.Row(): - with gr.Column(): - tts_create_button = gr.Button("Generate", variant="primary") - with gr.Column(): - hidden_checkbox = gr.Checkbox(visible=False) - button_stop_generation = gr.Button("Stop generation") - with gr.Row(): - output_audio = gr.Audio(label="Generated Audio", type="filepath") - - with gr.Row(): - with gr.Column(): - radio = gr.Radio( - ["mic", "file"], value="file", label="How would you like to upload your audio?", visible=False - ) - mic_input = gr.Mic(label="Input", type="filepath", visible=False) - audio_file = output_audio - inputs = [ - audio_file, - gr.Dropdown( - label="Add background noise", - choices=list(NOISES.keys()), - value="None", visible =False, - ), - gr.Dropdown( - label="Noise Level (SNR)", - choices=["-5", "0", "10", "20"], - value="0", visible =False, - ), - mic_input, - ] - btn_denoise = gr.Button("Denoise", variant="primary") - with gr.Column(): - outputs = [ - gr.Audio(type="filepath", label="Enhanced audio"), - ] - btn_denoise.click(fn=demo_fn, inputs=inputs, outputs=outputs) - radio.change(toggle, radio, [mic_input, audio_file]) - - with gr.Tab("🔮 - Voice Conversion"): - with gr.Row(): - swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", source="upload", type="filepath") - with gr.Row(): - with gr.Column(): - swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1]) - swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1) - with gr.Column(): - speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)") - swap_batchcount = gr.Number(label="Batch count", precision=0, value=1) - with gr.Row(): - swap_voice_button = gr.Button("Generate", variant="primary") - with gr.Row(): - output_swap = gr.Audio(label="Generated Audio", type="filepath") - - - quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings) - convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker],outputs=input_text) - gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob, quick_gen_checkbox, complete_settings, seedcomponent, batchcount],outputs=output_audio) - button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click]) - - - - swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap) - clone_voice_button.click(clone_voice, inputs=[input_audio_filename, output_voice], outputs=[dummy, npz_file]) - - - restart_server = False - try: - barkgui.queue().launch(show_error=True) - except: - restart_server = True - run_server = False - try: - while restart_server == False: - time.sleep(1.0) - except (KeyboardInterrupt, OSError): - print("Keyboard interruption in main thread... closing server.") - run_server = False - barkgui.close() - diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/extract_kp_videos.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/extract_kp_videos.py deleted file mode 100644 index 21616a3b4b5077ffdce99621395237b4edcff58c..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/extract_kp_videos.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import cv2 -import time -import glob -import argparse -import face_alignment -import numpy as np -from PIL import Image -from tqdm import tqdm -from itertools import cycle - -from torch.multiprocessing import Pool, Process, set_start_method - -class KeypointExtractor(): - def __init__(self, device): - self.detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, - device=device) - - def extract_keypoint(self, images, name=None, info=True): - if isinstance(images, list): - keypoints = [] - if info: - i_range = tqdm(images,desc='landmark Det:') - else: - i_range = images - - for image in i_range: - current_kp = self.extract_keypoint(image) - if np.mean(current_kp) == -1 and keypoints: - keypoints.append(keypoints[-1]) - else: - keypoints.append(current_kp[None]) - - keypoints = np.concatenate(keypoints, 0) - np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) - return keypoints - else: - while True: - try: - keypoints = self.detector.get_landmarks_from_image(np.array(images))[0] - break - except RuntimeError as e: - if str(e).startswith('CUDA'): - print("Warning: out of memory, sleep for 1s") - time.sleep(1) - else: - print(e) - break - except TypeError: - print('No face detected in this image') - shape = [68, 2] - keypoints = -1. * np.ones(shape) - break - if name is not None: - np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) - return keypoints - -def read_video(filename): - frames = [] - cap = cv2.VideoCapture(filename) - while cap.isOpened(): - ret, frame = cap.read() - if ret: - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = Image.fromarray(frame) - frames.append(frame) - else: - break - cap.release() - return frames - -def run(data): - filename, opt, device = data - os.environ['CUDA_VISIBLE_DEVICES'] = device - kp_extractor = KeypointExtractor() - images = read_video(filename) - name = filename.split('/')[-2:] - os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True) - kp_extractor.extract_keypoint( - images, - name=os.path.join(opt.output_dir, name[-2], name[-1]) - ) - -if __name__ == '__main__': - set_start_method('spawn') - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--input_dir', type=str, help='the folder of the input files') - parser.add_argument('--output_dir', type=str, help='the folder of the output files') - parser.add_argument('--device_ids', type=str, default='0,1') - parser.add_argument('--workers', type=int, default=4) - - opt = parser.parse_args() - filenames = list() - VIDEO_EXTENSIONS_LOWERCASE = {'mp4'} - VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE}) - extensions = VIDEO_EXTENSIONS - - for ext in extensions: - os.listdir(f'{opt.input_dir}') - print(f'{opt.input_dir}/*.{ext}') - filenames = sorted(glob.glob(f'{opt.input_dir}/*.{ext}')) - print('Total number of videos:', len(filenames)) - pool = Pool(opt.workers) - args_list = cycle([opt]) - device_ids = opt.device_ids.split(",") - device_ids = cycle(device_ids) - for data in tqdm(pool.imap_unordered(run, zip(filenames, args_list, device_ids))): - None diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py deleted file mode 100644 index 21d1122144d207637d2444cba1f68fe630c89f31..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py +++ /dev/null @@ -1,176 +0,0 @@ -import torch -from torch import nn - -assert torch.__version__ >= "1.8.1" -from torch.utils.checkpoint import checkpoint_sequential - -__all__ = ['iresnet2060'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, ) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, ) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def checkpoint(self, func, num_seg, x): - if self.training: - return checkpoint_sequential(func, num_seg, x) - else: - return func(x) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.checkpoint(self.layer2, 20, x) - x = self.checkpoint(self.layer3, 100, x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet2060(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) diff --git a/spaces/kevinwang676/vits-fast-finetuning-pcr/text/cantonese.py b/spaces/kevinwang676/vits-fast-finetuning-pcr/text/cantonese.py deleted file mode 100644 index b66d12138b81b70b86f18217d24a08fce76305c0..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/vits-fast-finetuning-pcr/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/khanguyen/voice-password-app/README.md b/spaces/khanguyen/voice-password-app/README.md deleted file mode 100644 index 99cba2702231dbaf4b1d30425bffb893fd7a8736..0000000000000000000000000000000000000000 --- a/spaces/khanguyen/voice-password-app/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Voice Password App -emoji: 🦀 -colorFrom: green -colorTo: pink -sdk: streamlit -sdk_version: 1.10.0 -app_file: Final_project.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kingabzpro/savtadepth/heroku/setup.sh b/spaces/kingabzpro/savtadepth/heroku/setup.sh deleted file mode 100644 index 81b2387faa0f8595c635aefb464b65e195b1ff84..0000000000000000000000000000000000000000 --- a/spaces/kingabzpro/savtadepth/heroku/setup.sh +++ /dev/null @@ -1,2 +0,0 @@ -export GRADIO_SERVER_NAME=0.0.0.0 -export GRADIO_SERVER_PORT="$PORT" \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py deleted file mode 100644 index f8e2eb0f15699f1b458a8445d0c1dd6229a21f77..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import os, sys -import subprocess -import re -from subprocess import check_call, check_output - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - - -BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") -def run_eval_bleu(cmd): - output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() - print(output) - bleu = -1.0 - for line in output.strip().split('\n'): - m = BLEU_REGEX.search(line) - if m is not None: - bleu = m.groups()[0] - bleu = float(bleu) - break - return bleu - -def check_data_test_bleu(raw_folder, data_lang_pairs): - not_matchings = [] - for sacrebleu_set, src_tgts in data_lang_pairs: - for src_tgt in src_tgts: - print(f'checking test bleus for: {src_tgt} at {sacrebleu_set}') - src, tgt = src_tgt.split('-') - ssrc, stgt = src[:2], tgt[:2] - if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): - # reversed direction may have different test set - test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' - else: - test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' - cmd1 = f'cat {test_src} | sacrebleu -t "{sacrebleu_set}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' - test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' - cmd2 = f'cat {test_tgt} | sacrebleu -t "{sacrebleu_set}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' - bleu1 = run_eval_bleu(cmd1) - if bleu1 != 100.0: - not_matchings.append(f'{sacrebleu_set}:{src_tgt} source side not matching: {test_src}') - bleu2 = run_eval_bleu(cmd2) - if bleu2 != 100.0: - not_matchings.append(f'{sacrebleu_set}:{src_tgt} target side not matching: {test_tgt}') - return not_matchings - -if __name__ == "__main__": - to_data_path = f'{WORKDIR_ROOT}/iwsltv2' - not_matching = check_data_test_bleu( - f'{to_data_path}/raw', - [ - ('iwslt17', ['en_XX-ar_AR', 'en_XX-ko_KR', 'ar_AR-en_XX', 'ko_KR-en_XX']), - ('iwslt17', ['en_XX-it_IT', 'en_XX-nl_XX', 'it_IT-en_XX', 'nl_XX-en_XX']), - ('iwslt17/tst2015', ['en_XX-vi_VN', "vi_VN-en_XX"]), - ] - ) - if len(not_matching) > 0: - print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching)) - diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py deleted file mode 100644 index e48d3c1e97e02cd188b567b50a4c0c615f187e4d..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -import sys -from abc import ABCMeta, abstractmethod -from types import TracebackType -from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar, overload -from warnings import warn - -if sys.version_info >= (3, 8): - from typing import Protocol -else: - from typing_extensions import Protocol - -if TYPE_CHECKING: - from anyio._core._tasks import CancelScope - -T_Retval = TypeVar("T_Retval") -T_contra = TypeVar("T_contra", contravariant=True) - - -class TaskStatus(Protocol[T_contra]): - @overload - def started(self: TaskStatus[None]) -> None: - ... - - @overload - def started(self, value: T_contra) -> None: - ... - - def started(self, value: T_contra | None = None) -> None: - """ - Signal that the task has started. - - :param value: object passed back to the starter of the task - """ - - -class TaskGroup(metaclass=ABCMeta): - """ - Groups several asynchronous tasks together. - - :ivar cancel_scope: the cancel scope inherited by all child tasks - :vartype cancel_scope: CancelScope - """ - - cancel_scope: CancelScope - - async def spawn( - self, - func: Callable[..., Awaitable[Any]], - *args: object, - name: object = None, - ) -> None: - """ - Start a new task in this task group. - - :param func: a coroutine function - :param args: positional arguments to call the function with - :param name: name of the task, for the purposes of introspection and debugging - - .. deprecated:: 3.0 - Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you - can keep using this until AnyIO 4. - - """ - warn( - 'spawn() is deprecated -- use start_soon() (without the "await") instead', - DeprecationWarning, - ) - self.start_soon(func, *args, name=name) - - @abstractmethod - def start_soon( - self, - func: Callable[..., Awaitable[Any]], - *args: object, - name: object = None, - ) -> None: - """ - Start a new task in this task group. - - :param func: a coroutine function - :param args: positional arguments to call the function with - :param name: name of the task, for the purposes of introspection and debugging - - .. versionadded:: 3.0 - """ - - @abstractmethod - async def start( - self, - func: Callable[..., Awaitable[Any]], - *args: object, - name: object = None, - ) -> Any: - """ - Start a new task and wait until it signals for readiness. - - :param func: a coroutine function - :param args: positional arguments to call the function with - :param name: name of the task, for the purposes of introspection and debugging - :return: the value passed to ``task_status.started()`` - :raises RuntimeError: if the task finishes without calling ``task_status.started()`` - - .. versionadded:: 3.0 - """ - - @abstractmethod - async def __aenter__(self) -> TaskGroup: - """Enter the task group context and allow starting new tasks.""" - - @abstractmethod - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - """Exit the task group context waiting for all tasks to finish.""" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_M_A_P_.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_M_A_P_.py deleted file mode 100644 index 39b0050c5f0591a2b36c21242863655ca1f3ef47..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_M_A_P_.py +++ /dev/null @@ -1,142 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.textTools import tobytes, tostr, safeEval -from . import DefaultTable - -GMAPFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - flags: H - recordsCount: H - recordsOffset: H - fontNameLength: H -""" -# psFontName is a byte string which follows the record above. This is zero padded -# to the beginning of the records array. The recordsOffsst is 32 bit aligned. - -GMAPRecordFormat1 = """ - > # big endian - UV: L - cid: H - gid: H - ggid: H - name: 32s -""" - - -class GMAPRecord(object): - def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): - self.UV = uv - self.cid = cid - self.gid = gid - self.ggid = ggid - self.name = name - - def toXML(self, writer, ttFont): - writer.begintag("GMAPRecord") - writer.newline() - writer.simpletag("UV", value=self.UV) - writer.newline() - writer.simpletag("cid", value=self.cid) - writer.newline() - writer.simpletag("gid", value=self.gid) - writer.newline() - writer.simpletag("glyphletGid", value=self.gid) - writer.newline() - writer.simpletag("GlyphletName", value=self.name) - writer.newline() - writer.endtag("GMAPRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name == "GlyphletName": - self.name = value - else: - setattr(self, name, safeEval(value)) - - def compile(self, ttFont): - if self.UV is None: - self.UV = 0 - nameLen = len(self.name) - if nameLen < 32: - self.name = self.name + "\0" * (32 - nameLen) - data = sstruct.pack(GMAPRecordFormat1, self) - return data - - def __repr__(self): - return ( - "GMAPRecord[ UV: " - + str(self.UV) - + ", cid: " - + str(self.cid) - + ", gid: " - + str(self.gid) - + ", ggid: " - + str(self.ggid) - + ", Glyphlet Name: " - + str(self.name) - + " ]" - ) - - -class table_G_M_A_P_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(GMAPFormat, data, self) - self.psFontName = tostr(newData[: self.fontNameLength]) - assert ( - self.recordsOffset % 4 - ) == 0, "GMAP error: recordsOffset is not 32 bit aligned." - newData = data[self.recordsOffset :] - self.gmapRecords = [] - for i in range(self.recordsCount): - gmapRecord, newData = sstruct.unpack2( - GMAPRecordFormat1, newData, GMAPRecord() - ) - gmapRecord.name = gmapRecord.name.strip("\0") - self.gmapRecords.append(gmapRecord) - - def compile(self, ttFont): - self.recordsCount = len(self.gmapRecords) - self.fontNameLength = len(self.psFontName) - self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) - data = sstruct.pack(GMAPFormat, self) - data = data + tobytes(self.psFontName) - data = data + b"\0" * (self.recordsOffset - len(data)) - for record in self.gmapRecords: - data = data + record.compile(ttFont) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(GMAPFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - writer.simpletag("PSFontName", value=self.psFontName) - writer.newline() - for gmapRecord in self.gmapRecords: - gmapRecord.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "GMAPRecord": - if not hasattr(self, "gmapRecords"): - self.gmapRecords = [] - gmapRecord = GMAPRecord() - self.gmapRecords.append(gmapRecord) - for element in content: - if isinstance(element, str): - continue - name, attrs, content = element - gmapRecord.fromXML(name, attrs, content, ttFont) - else: - value = attrs["value"] - if name == "PSFontName": - self.psFontName = value - else: - setattr(self, name, safeEval(value)) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py deleted file mode 100644 index 187641d03bc5770b817d6250409066322db71539..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py +++ /dev/null @@ -1,184 +0,0 @@ -# coding=utf-8 -# Copyright 2020 Optuna, Hugging Face -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Logging utilities.""" - -import logging -import os -from logging import ( - CRITICAL, # NOQA - DEBUG, # NOQA - ERROR, # NOQA - FATAL, # NOQA - INFO, # NOQA - NOTSET, # NOQA - WARN, # NOQA - WARNING, # NOQA -) -from typing import Optional - - -log_levels = { - "debug": logging.DEBUG, - "info": logging.INFO, - "warning": logging.WARNING, - "error": logging.ERROR, - "critical": logging.CRITICAL, -} - -_default_log_level = logging.WARNING - - -def _get_library_name() -> str: - return __name__.split(".")[0] - - -def _get_library_root_logger() -> logging.Logger: - return logging.getLogger(_get_library_name()) - - -def _get_default_logging_level(): - """ - If HUGGINGFACE_HUB_VERBOSITY env var is set to one of the valid choices - return that as the new default level. If it is not - fall back to - `_default_log_level` - """ - env_level_str = os.getenv("HUGGINGFACE_HUB_VERBOSITY", None) - if env_level_str: - if env_level_str in log_levels: - return log_levels[env_level_str] - else: - logging.getLogger().warning( - f"Unknown option HUGGINGFACE_HUB_VERBOSITY={env_level_str}, " - f"has to be one of: { ', '.join(log_levels.keys()) }" - ) - return _default_log_level - - -def _configure_library_root_logger() -> None: - library_root_logger = _get_library_root_logger() - library_root_logger.addHandler(logging.StreamHandler()) - library_root_logger.setLevel(_get_default_logging_level()) - - -def _reset_library_root_logger() -> None: - library_root_logger = _get_library_root_logger() - library_root_logger.setLevel(logging.NOTSET) - - -def get_logger(name: Optional[str] = None) -> logging.Logger: - """ - Returns a logger with the specified name. This function is not supposed - to be directly accessed by library users. - - Args: - name (`str`, *optional*): - The name of the logger to get, usually the filename - - Example: - - ```python - >>> from huggingface_hub import get_logger - - >>> logger = get_logger(__file__) - >>> logger.set_verbosity_info() - ``` - """ - - if name is None: - name = _get_library_name() - - return logging.getLogger(name) - - -def get_verbosity() -> int: - """Return the current level for the HuggingFace Hub's root logger. - - Returns: - Logging level, e.g., `huggingface_hub.logging.DEBUG` and - `huggingface_hub.logging.INFO`. - - <Tip> - - HuggingFace Hub has following logging levels: - - - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL` - - `huggingface_hub.logging.ERROR` - - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN` - - `huggingface_hub.logging.INFO` - - `huggingface_hub.logging.DEBUG` - - </Tip> - """ - return _get_library_root_logger().getEffectiveLevel() - - -def set_verbosity(verbosity: int) -> None: - """ - Sets the level for the HuggingFace Hub's root logger. - - Args: - verbosity (`int`): - Logging level, e.g., `huggingface_hub.logging.DEBUG` and - `huggingface_hub.logging.INFO`. - """ - _get_library_root_logger().setLevel(verbosity) - - -def set_verbosity_info(): - """ - Sets the verbosity to `logging.INFO`. - """ - return set_verbosity(INFO) - - -def set_verbosity_warning(): - """ - Sets the verbosity to `logging.WARNING`. - """ - return set_verbosity(WARNING) - - -def set_verbosity_debug(): - """ - Sets the verbosity to `logging.DEBUG`. - """ - return set_verbosity(DEBUG) - - -def set_verbosity_error(): - """ - Sets the verbosity to `logging.ERROR`. - """ - return set_verbosity(ERROR) - - -def disable_propagation() -> None: - """ - Disable propagation of the library log outputs. Note that log propagation is - disabled by default. - """ - _get_library_root_logger().propagate = False - - -def enable_propagation() -> None: - """ - Enable propagation of the library log outputs. Please disable the - HuggingFace Hub's default handler to prevent double logging if the root - logger has been configured. - """ - _get_library_root_logger().propagate = True - - -_configure_library_root_logger() diff --git a/spaces/ky2k/image_denoise_demo/basic_ops.py b/spaces/ky2k/image_denoise_demo/basic_ops.py deleted file mode 100644 index e8d1d4a850f986dd37f13532075e06a5beb73138..0000000000000000000000000000000000000000 --- a/spaces/ky2k/image_denoise_demo/basic_ops.py +++ /dev/null @@ -1,91 +0,0 @@ -import logging, os -logging.disable(logging.WARNING) -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -import tensorflow as tf -from network_configure import conf_basic_ops - - -"""This script defines basic operaters. -""" - - -def convolution_2D(inputs, filters, kernel_size, strides, use_bias, name=None): - """Performs 2D convolution without activation function. - If followed by batch normalization, set use_bias=False. - """ - return tf.layers.conv2d( - inputs=inputs, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=use_bias, - kernel_initializer=conf_basic_ops['kernel_initializer'], - name=name, - ) - -def convolution_3D(inputs, filters, kernel_size, strides, use_bias, name=None): - """Performs 3D convolution without activation function. - If followed by batch normalization, set use_bias=False. - """ - return tf.layers.conv3d( - inputs=inputs, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=use_bias, - kernel_initializer=conf_basic_ops['kernel_initializer'], - name=name, - ) - -def transposed_convolution_2D(inputs, filters, kernel_size, strides, use_bias, name=None): - """Performs 2D transposed convolution without activation function. - If followed by batch normalization, set use_bias=False. - """ - return tf.layers.conv2d_transpose( - inputs=inputs, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=use_bias, - kernel_initializer=conf_basic_ops['kernel_initializer'], - name=name, - ) - -def transposed_convolution_3D(inputs, filters, kernel_size, strides, use_bias, name=None): - """Performs 3D transposed convolution without activation function. - If followed by batch normalization, set use_bias=False. - """ - return tf.layers.conv3d_transpose( - inputs=inputs, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=use_bias, - kernel_initializer=conf_basic_ops['kernel_initializer'], - name=name, - ) - -def batch_norm(inputs, training, name=None): - """Performs a batch normalization. - We set fused=True for a significant performance boost. - See https://www.tensorflow.org/performance/performance_guide#common_fused_ops - """ - return tf.layers.batch_normalization( - inputs=inputs, - momentum=conf_basic_ops['momentum'], - epsilon=conf_basic_ops['epsilon'], - center=True, - scale=True, - training=training, - fused=True, - name=name, - ) - -def relu(inputs, name=None): - return tf.nn.relu(inputs, name=name) if conf_basic_ops['relu_type'] == 'relu' \ - else tf.nn.relu6(inputs, name=name) diff --git a/spaces/laurabarreda/genre_prediction/extract_electronic.py b/spaces/laurabarreda/genre_prediction/extract_electronic.py deleted file mode 100644 index 94f8347df0adc0d3454d6701393db8addb20b3a7..0000000000000000000000000000000000000000 --- a/spaces/laurabarreda/genre_prediction/extract_electronic.py +++ /dev/null @@ -1,161 +0,0 @@ -import requests -from variables import * -from time import sleep -import streamlit as st - -class Extract: - ''' - This class contains the board and it's necessary elements to start the game. - ''' - # Client Id and Secret from Spotify API - CLIENT_ID = CLIENT_ID - CLIENT_SECRET = CLIENT_SECRET - - # base URL of all Spotify API endpoints - BASE_URL = BASE_URL - - # URL for authorisation - AUTH_URL = AUTH_URL - - # List with the audio features to extract - track_features_list = track_features_list - artist_features_list = artist_features_list - audio_features_list = audio_features_list - - - def __init__(self, track_url): - - self.track_url = track_url - - if len(self.track_url) > 0: - try: - self.api_response() - self.define_track_id() - self.extract_all_features() - self.dict_into_dict() - except: - st.write('Url not available, please try a different one') - self.track_url = '' - - def api_response(self): - ''' - This function will create the persinalised headers to get data from the API - Needed personal variables: client_id, client_secret - ''' - # create the response from the API - auth_response = requests.post(AUTH_URL, {'grant_type': 'client_credentials', - 'client_id': CLIENT_ID, - 'client_secret': CLIENT_SECRET,}) - - # convert the response to JSON - auth_response_data = auth_response.json() - - # save the access token - access_token = auth_response_data['access_token'] - - # The headers to be used are personalised with our token - self.headers = {'Authorization': 'Bearer {token}'.format(token=access_token)} - - - def define_track_id(self): - ''' - This function will modify the track name, call the api and return the id of the track - ''' - self.track_id = self.track_url.split("/")[-1].split("?")[0] - - - def api_call(self, url_string, url_id): - ''' - This function will do individual calls to the API - ''' - track_info = requests.get(BASE_URL + url_string + '/' + url_id, headers=self.headers) - self.track_info = track_info.json() - self.api_call_completed = True - - - def extract_track_main_features(self): - ''' - This function will iterate over the main track features and extract them into a dictionary - ''' - for feature in track_features_list: - try: - feature_data = self.track_info[feature] - feature = 'track_' + feature - self.all_track_features[feature] = feature_data - except: - self.all_track_features[feature] = None - - try: - artist_name = self.track_info['artists'][0]['name'] - self.all_track_features['artist_name'] = artist_name - except: - self.all_track_features['artist_name'] = None - - # Album name - try: - album = self.track_info["album"]["name"] - self.all_track_features['album'] = album - except: - self.all_track_features['album'] = None - - # Album cover - try: - album_cover = self.track_info["album"]["images"][0]['url'] - self.all_track_features['album_cover'] = album_cover - except: - self.all_track_features['album_cover'] = None - - self.artist_id = self.track_info["artists"][0]["uri"] - self.artist_id = self.artist_id.replace('spotify:artist:', '') - - - def extract_artist_features(self): - ''' - This function will iterate over the main artist features and extract them into a dictionary - ''' - for feature in artist_features_list: - try: - feature_data = self.track_info[feature] - feature = 'artist_' + feature - self.all_track_features[feature] = feature_data - except: - self.all_track_features[feature] = None - - - def extract_track_audio_features(self): - ''' - This function will iterate over all the audio features and extract them into a dictionary - ''' - for feature in audio_features_list: - try: - feature_data = self.track_info[feature] - self.all_track_features[feature] = feature_data - except: - self.all_track_features[feature] = None - - - def extract_all_features(self): - ''' - This function will take all the necessary actions to extract all the audio features from the track - ''' - self.all_track_features = {} - self.track_data = {} - - # Extract track fatures - self.api_call('tracks', self.track_id) - self.extract_track_main_features() - - # Extract artist features - self.api_call('artists', self.artist_id) - self.extract_artist_features() - - # Extract audio features - self.api_call('audio-features', self.track_id) - self.extract_track_audio_features() - - - def dict_into_dict(self): - ''' - This function will add the audio features in a nested dictionary, under the track_id - ''' - self.track_data[self.track_id] = self.all_track_features \ No newline at end of file diff --git a/spaces/leesooleon/xiaolxl-GuoFeng3/app.py b/spaces/leesooleon/xiaolxl-GuoFeng3/app.py deleted file mode 100644 index df1cb039067c4cb2497d396745ccda872ea94249..0000000000000000000000000000000000000000 --- a/spaces/leesooleon/xiaolxl-GuoFeng3/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/xiaolxl/GuoFeng3").launch() \ No newline at end of file diff --git a/spaces/lewiswu1209/MockingBird/ppg2mel/rnn_decoder_mol.py b/spaces/lewiswu1209/MockingBird/ppg2mel/rnn_decoder_mol.py deleted file mode 100644 index 9d48d7bc697baef107818569dc3e87a96708fb00..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/ppg2mel/rnn_decoder_mol.py +++ /dev/null @@ -1,374 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -from .utils.mol_attention import MOLAttention -from .utils.basic_layers import Linear -from .utils.vc_utils import get_mask_from_lengths - - -class DecoderPrenet(nn.Module): - def __init__(self, in_dim, sizes): - super().__init__() - in_sizes = [in_dim] + sizes[:-1] - self.layers = nn.ModuleList( - [Linear(in_size, out_size, bias=False) - for (in_size, out_size) in zip(in_sizes, sizes)]) - - def forward(self, x): - for linear in self.layers: - x = F.dropout(F.relu(linear(x)), p=0.5, training=True) - return x - - -class Decoder(nn.Module): - """Mixture of Logistic (MoL) attention-based RNN Decoder.""" - def __init__( - self, - enc_dim, - num_mels, - frames_per_step, - attention_rnn_dim, - decoder_rnn_dim, - prenet_dims, - num_mixtures, - encoder_down_factor=1, - num_decoder_rnn_layer=1, - use_stop_tokens=False, - concat_context_to_last=False, - ): - super().__init__() - self.enc_dim = enc_dim - self.encoder_down_factor = encoder_down_factor - self.num_mels = num_mels - self.frames_per_step = frames_per_step - self.attention_rnn_dim = attention_rnn_dim - self.decoder_rnn_dim = decoder_rnn_dim - self.prenet_dims = prenet_dims - self.use_stop_tokens = use_stop_tokens - self.num_decoder_rnn_layer = num_decoder_rnn_layer - self.concat_context_to_last = concat_context_to_last - - # Mel prenet - self.prenet = DecoderPrenet(num_mels, prenet_dims) - self.prenet_pitch = DecoderPrenet(num_mels, prenet_dims) - - # Attention RNN - self.attention_rnn = nn.LSTMCell( - prenet_dims[-1] + enc_dim, - attention_rnn_dim - ) - - # Attention - self.attention_layer = MOLAttention( - attention_rnn_dim, - r=frames_per_step/encoder_down_factor, - M=num_mixtures, - ) - - # Decoder RNN - self.decoder_rnn_layers = nn.ModuleList() - for i in range(num_decoder_rnn_layer): - if i == 0: - self.decoder_rnn_layers.append( - nn.LSTMCell( - enc_dim + attention_rnn_dim, - decoder_rnn_dim)) - else: - self.decoder_rnn_layers.append( - nn.LSTMCell( - decoder_rnn_dim, - decoder_rnn_dim)) - # self.decoder_rnn = nn.LSTMCell( - # 2 * enc_dim + attention_rnn_dim, - # decoder_rnn_dim - # ) - if concat_context_to_last: - self.linear_projection = Linear( - enc_dim + decoder_rnn_dim, - num_mels * frames_per_step - ) - else: - self.linear_projection = Linear( - decoder_rnn_dim, - num_mels * frames_per_step - ) - - - # Stop-token layer - if self.use_stop_tokens: - if concat_context_to_last: - self.stop_layer = Linear( - enc_dim + decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid" - ) - else: - self.stop_layer = Linear( - decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid" - ) - - - def get_go_frame(self, memory): - B = memory.size(0) - go_frame = torch.zeros((B, self.num_mels), dtype=torch.float, - device=memory.device) - return go_frame - - def initialize_decoder_states(self, memory, mask): - device = next(self.parameters()).device - B = memory.size(0) - - # attention rnn states - self.attention_hidden = torch.zeros( - (B, self.attention_rnn_dim), device=device) - self.attention_cell = torch.zeros( - (B, self.attention_rnn_dim), device=device) - - # decoder rnn states - self.decoder_hiddens = [] - self.decoder_cells = [] - for i in range(self.num_decoder_rnn_layer): - self.decoder_hiddens.append( - torch.zeros((B, self.decoder_rnn_dim), - device=device) - ) - self.decoder_cells.append( - torch.zeros((B, self.decoder_rnn_dim), - device=device) - ) - # self.decoder_hidden = torch.zeros( - # (B, self.decoder_rnn_dim), device=device) - # self.decoder_cell = torch.zeros( - # (B, self.decoder_rnn_dim), device=device) - - self.attention_context = torch.zeros( - (B, self.enc_dim), device=device) - - self.memory = memory - # self.processed_memory = self.attention_layer.memory_layer(memory) - self.mask = mask - - def parse_decoder_inputs(self, decoder_inputs): - """Prepare decoder inputs, i.e. gt mel - Args: - decoder_inputs:(B, T_out, n_mel_channels) inputs used for teacher-forced training. - """ - decoder_inputs = decoder_inputs.reshape( - decoder_inputs.size(0), - int(decoder_inputs.size(1)/self.frames_per_step), -1) - # (B, T_out//r, r*num_mels) -> (T_out//r, B, r*num_mels) - decoder_inputs = decoder_inputs.transpose(0, 1) - # (T_out//r, B, num_mels) - decoder_inputs = decoder_inputs[:,:,-self.num_mels:] - return decoder_inputs - - def parse_decoder_outputs(self, mel_outputs, alignments, stop_outputs): - """ Prepares decoder outputs for output - Args: - mel_outputs: - alignments: - """ - # (T_out//r, B, T_enc) -> (B, T_out//r, T_enc) - alignments = torch.stack(alignments).transpose(0, 1) - # (T_out//r, B) -> (B, T_out//r) - if stop_outputs is not None: - if alignments.size(0) == 1: - stop_outputs = torch.stack(stop_outputs).unsqueeze(0) - else: - stop_outputs = torch.stack(stop_outputs).transpose(0, 1) - stop_outputs = stop_outputs.contiguous() - # (T_out//r, B, num_mels*r) -> (B, T_out//r, num_mels*r) - mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() - # decouple frames per step - # (B, T_out, num_mels) - mel_outputs = mel_outputs.view( - mel_outputs.size(0), -1, self.num_mels) - return mel_outputs, alignments, stop_outputs - - def attend(self, decoder_input): - cell_input = torch.cat((decoder_input, self.attention_context), -1) - self.attention_hidden, self.attention_cell = self.attention_rnn( - cell_input, (self.attention_hidden, self.attention_cell)) - self.attention_context, attention_weights = self.attention_layer( - self.attention_hidden, self.memory, None, self.mask) - - decoder_rnn_input = torch.cat( - (self.attention_hidden, self.attention_context), -1) - - return decoder_rnn_input, self.attention_context, attention_weights - - def decode(self, decoder_input): - for i in range(self.num_decoder_rnn_layer): - if i == 0: - self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i]( - decoder_input, (self.decoder_hiddens[i], self.decoder_cells[i])) - else: - self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i]( - self.decoder_hiddens[i-1], (self.decoder_hiddens[i], self.decoder_cells[i])) - return self.decoder_hiddens[-1] - - def forward(self, memory, mel_inputs, memory_lengths): - """ Decoder forward pass for training - Args: - memory: (B, T_enc, enc_dim) Encoder outputs - decoder_inputs: (B, T, num_mels) Decoder inputs for teacher forcing. - memory_lengths: (B, ) Encoder output lengths for attention masking. - Returns: - mel_outputs: (B, T, num_mels) mel outputs from the decoder - alignments: (B, T//r, T_enc) attention weights. - """ - # [1, B, num_mels] - go_frame = self.get_go_frame(memory).unsqueeze(0) - # [T//r, B, num_mels] - mel_inputs = self.parse_decoder_inputs(mel_inputs) - # [T//r + 1, B, num_mels] - mel_inputs = torch.cat((go_frame, mel_inputs), dim=0) - # [T//r + 1, B, prenet_dim] - decoder_inputs = self.prenet(mel_inputs) - # decoder_inputs_pitch = self.prenet_pitch(decoder_inputs__) - - self.initialize_decoder_states( - memory, mask=~get_mask_from_lengths(memory_lengths), - ) - - self.attention_layer.init_states(memory) - # self.attention_layer_pitch.init_states(memory_pitch) - - mel_outputs, alignments = [], [] - if self.use_stop_tokens: - stop_outputs = [] - else: - stop_outputs = None - while len(mel_outputs) < decoder_inputs.size(0) - 1: - decoder_input = decoder_inputs[len(mel_outputs)] - # decoder_input_pitch = decoder_inputs_pitch[len(mel_outputs)] - - decoder_rnn_input, context, attention_weights = self.attend(decoder_input) - - decoder_rnn_output = self.decode(decoder_rnn_input) - if self.concat_context_to_last: - decoder_rnn_output = torch.cat( - (decoder_rnn_output, context), dim=1) - - mel_output = self.linear_projection(decoder_rnn_output) - if self.use_stop_tokens: - stop_output = self.stop_layer(decoder_rnn_output) - stop_outputs += [stop_output.squeeze()] - mel_outputs += [mel_output.squeeze(1)] #? perhaps don't need squeeze - alignments += [attention_weights] - # alignments_pitch += [attention_weights_pitch] - - mel_outputs, alignments, stop_outputs = self.parse_decoder_outputs( - mel_outputs, alignments, stop_outputs) - if stop_outputs is None: - return mel_outputs, alignments - else: - return mel_outputs, stop_outputs, alignments - - def inference(self, memory, stop_threshold=0.5): - """ Decoder inference - Args: - memory: (1, T_enc, D_enc) Encoder outputs - Returns: - mel_outputs: mel outputs from the decoder - alignments: sequence of attention weights from the decoder - """ - # [1, num_mels] - decoder_input = self.get_go_frame(memory) - - self.initialize_decoder_states(memory, mask=None) - - self.attention_layer.init_states(memory) - - mel_outputs, alignments = [], [] - # NOTE(sx): heuristic - max_decoder_step = memory.size(1)*self.encoder_down_factor//self.frames_per_step - min_decoder_step = memory.size(1)*self.encoder_down_factor // self.frames_per_step - 5 - while True: - decoder_input = self.prenet(decoder_input) - - decoder_input_final, context, alignment = self.attend(decoder_input) - - #mel_output, stop_output, alignment = self.decode(decoder_input) - decoder_rnn_output = self.decode(decoder_input_final) - if self.concat_context_to_last: - decoder_rnn_output = torch.cat( - (decoder_rnn_output, context), dim=1) - - mel_output = self.linear_projection(decoder_rnn_output) - stop_output = self.stop_layer(decoder_rnn_output) - - mel_outputs += [mel_output.squeeze(1)] - alignments += [alignment] - - if torch.sigmoid(stop_output.data) > stop_threshold and len(mel_outputs) >= min_decoder_step: - break - if len(mel_outputs) >= max_decoder_step: - # print("Warning! Decoding steps reaches max decoder steps.") - break - - decoder_input = mel_output[:,-self.num_mels:] - - - mel_outputs, alignments, _ = self.parse_decoder_outputs( - mel_outputs, alignments, None) - - return mel_outputs, alignments - - def inference_batched(self, memory, stop_threshold=0.5): - """ Decoder inference - Args: - memory: (B, T_enc, D_enc) Encoder outputs - Returns: - mel_outputs: mel outputs from the decoder - alignments: sequence of attention weights from the decoder - """ - # [1, num_mels] - decoder_input = self.get_go_frame(memory) - - self.initialize_decoder_states(memory, mask=None) - - self.attention_layer.init_states(memory) - - mel_outputs, alignments = [], [] - stop_outputs = [] - # NOTE(sx): heuristic - max_decoder_step = memory.size(1)*self.encoder_down_factor//self.frames_per_step - min_decoder_step = memory.size(1)*self.encoder_down_factor // self.frames_per_step - 5 - while True: - decoder_input = self.prenet(decoder_input) - - decoder_input_final, context, alignment = self.attend(decoder_input) - - #mel_output, stop_output, alignment = self.decode(decoder_input) - decoder_rnn_output = self.decode(decoder_input_final) - if self.concat_context_to_last: - decoder_rnn_output = torch.cat( - (decoder_rnn_output, context), dim=1) - - mel_output = self.linear_projection(decoder_rnn_output) - # (B, 1) - stop_output = self.stop_layer(decoder_rnn_output) - stop_outputs += [stop_output.squeeze()] - # stop_outputs.append(stop_output) - - mel_outputs += [mel_output.squeeze(1)] - alignments += [alignment] - # print(stop_output.shape) - if torch.all(torch.sigmoid(stop_output.squeeze().data) > stop_threshold) \ - and len(mel_outputs) >= min_decoder_step: - break - if len(mel_outputs) >= max_decoder_step: - # print("Warning! Decoding steps reaches max decoder steps.") - break - - decoder_input = mel_output[:,-self.num_mels:] - - - mel_outputs, alignments, stop_outputs = self.parse_decoder_outputs( - mel_outputs, alignments, stop_outputs) - mel_outputs_stacked = [] - for mel, stop_logit in zip(mel_outputs, stop_outputs): - idx = np.argwhere(torch.sigmoid(stop_logit.cpu()) > stop_threshold)[0][0].item() - mel_outputs_stacked.append(mel[:idx,:]) - mel_outputs = torch.cat(mel_outputs_stacked, dim=0).unsqueeze(0) - return mel_outputs, alignments diff --git a/spaces/lewtun/donut-docvqa/app.py b/spaces/lewtun/donut-docvqa/app.py deleted file mode 100644 index da5f082ab032b27e87471006d5d574fc215ce300..0000000000000000000000000000000000000000 --- a/spaces/lewtun/donut-docvqa/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import re -import gradio as gr - -import torch -from transformers import DonutProcessor, VisionEncoderDecoderModel - -processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") -model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") - -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -def process_document(image, question): - # prepare encoder inputs - pixel_values = processor(image, return_tensors="pt").pixel_values - - # prepare decoder inputs - task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" - prompt = task_prompt.replace("{user_input}", question) - decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids - - # generate answer - outputs = model.generate( - pixel_values.to(device), - decoder_input_ids=decoder_input_ids.to(device), - max_length=model.decoder.config.max_position_embeddings, - early_stopping=True, - pad_token_id=processor.tokenizer.pad_token_id, - eos_token_id=processor.tokenizer.eos_token_id, - use_cache=True, - num_beams=1, - bad_words_ids=[[processor.tokenizer.unk_token_id]], - return_dict_in_generate=True, - ) - - # postprocess - sequence = processor.batch_decode(outputs.sequences)[0] - sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") - sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token - - return processor.token2json(sequence) - -description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below." -article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>" - -demo = gr.Interface( - fn=process_document, - inputs=["image", "text"], - outputs="json", - title="Demo: Donut 🍩 for DocVQA", - description=description, - article=article, - enable_queue=True, - examples=[["example_1.png", "When is the coffee break?"], ["example_2.jpeg", "What's the population of Stoddard?"]], - cache_examples=False) - -demo.launch() \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 English Win 64bit Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 English Win 64bit Crack.md deleted file mode 100644 index 6a43c0988fc7661da3ee178941d06ed28685d362..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2013 English Win 64bit Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>autocad 2013 english win 64bit crack</h2><br /><p><b><b>Download</b> &#10084; <a href="https://bytlly.com/2uGyut">https://bytlly.com/2uGyut</a></b></p><br /><br /> - -Cracked (by SSQ) DS License Server (subsequently the called DSLS-SSQ) allows ... Nov 07, 2017 · Catia V5 R21 Download & installation 64bit/32bit windows ... V5 6r2019, Divergent Media EditReady Latest Verion, Microsoft 2013 Key Free, ... 17th Edition (English Edition) de Prof. exe application is launched, this is due to ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ccboot 3 0 Crack Extra Quality Ulop Philippines.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ccboot 3 0 Crack Extra Quality Ulop Philippines.md deleted file mode 100644 index 55c11d66dcb50a9f140bb59c1fa0581ff19864f6..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ccboot 3 0 Crack Extra Quality Ulop Philippines.md +++ /dev/null @@ -1,9 +0,0 @@ -<h2>ccboot 3 0 crack ulop philippines</h2><br /><p><b><b>Download Zip</b> &mdash; <a href="https://bytlly.com/2uGvSV">https://bytlly.com/2uGvSV</a></b></p><br /><br /> - -Re: ccboot users thread without drives 15 Jan 2014 19:55 #750468 . 22:53:33 192.168.0.101:1024 login: isid 400027230000 tsih 0 cid 0 ExpStatSN 2 CmdSN 0 id 0 logs 0 -Re: Topic of ccboot users without disks January 15, 2014 20:34 #750501 . 22:57:05 192.168.0.101:1024 login: isid 400027230000 tsih 0 cid 0 ExpStatSN 2 CmdSN 0 id 0 logs 0 -Re: Topic of ccboot users without disks January 15, 2014 21:05 #750527 . 23:23:55 192.168.0.101:1024 login: isid 400027230000 tsih 0 cid 0 ExpStatSN 2 CmdSN 0 id 0 logs 0 -Re: Topic of ccboot users without disks January 15, 2014 21:16 #750538 . 23:31:33 192.168.0.101:1024 login: isid 400027230000 ts 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (22 Se Srabon Hd Movie Free [WORK] Download).md b/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (22 Se Srabon Hd Movie Free [WORK] Download).md deleted file mode 100644 index 9bad2d2d3fe4a6aae211b2403ea86a4cc1498f82..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (22 Se Srabon Hd Movie Free [WORK] Download).md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>HD Online Player (22 se srabon hd movie free download)</h2><br /><p><b><b>DOWNLOAD</b> &#10084; <a href="https://bytlly.com/2uGxfp">https://bytlly.com/2uGxfp</a></b></p><br /><br /> -<br /> -Watchlist 12 Dec 2020 Naya Rivera, a singer and actor who played a gay ... Bonny, Koushani Kolkata Bengali Movie Mp3 Songs Download. ... Watch Aashiqui 2 Full Movie HD - Top Movie - TES Channel on Tor Aashiqui ... He is an actor and director, known for Kahaani (2012), Feluda (2017) and Baishe Srabon (2011). 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Incir Receli 2 Tek Parca 720p Hd [HOT].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Incir Receli 2 Tek Parca 720p Hd [HOT].md deleted file mode 100644 index d7c2749bdbd20ae3edc4983aa988a91c1b5da785..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Incir Receli 2 Tek Parca 720p Hd [HOT].md +++ /dev/null @@ -1,56 +0,0 @@ -<h2>incir receli 2 tek parca 720p hd</h2><br /><p><b><b>Download</b> &#9733; <a href="https://bytlly.com/2uGvFB">https://bytlly.com/2uGvFB</a></b></p><br /><br /> - -videolarını çekti - - beyler ben anlamadım çok saygı duydum - - biliyordum abi - - geçmiş olsun abi - - sevmedim mi - -? - - sakladigin ben o sürücü neydi? - - 20mb - - çok amaçlı - - yaptığı o abi sürücü var - - uzyıyoruz abi - - ben windowsla yapıyorum - - ben sadece ubuntu yapıyorum - - evet - - ben 2 yerde bilgi alıyorum - - kurallarını değil - - ben çok sevdiğim projeleri inceleyiyorum - - evet kuralları ile ben de oldukışım - - alınma çünkü ödüşen hayatın sonunda ekranını kaybettim - - sevinci gibi - - kuralları - - kapını çıkıyorum ya - - bunu cıkıyorum - - bunun cıkıyorum - - düşünür çünkü - - bunu 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/terminate.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/terminate.h deleted file mode 100644 index d14bed2ab3d4db55750a92b76cba5daaba38a684..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/terminate.h +++ /dev/null @@ -1,63 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#pragma once - -#include <thrust/detail/config.h> -#include <thrust/system/cuda/detail/util.h> -#include <cstdio> - -namespace thrust -{ -namespace system -{ -namespace cuda -{ -namespace detail -{ - - -inline __device__ -void terminate() -{ - thrust::cuda_cub::terminate(); -} - - -inline __host__ __device__ -void terminate_with_message(const char* message) -{ - printf("%s\n", message); - thrust::cuda_cub::terminate(); -} - - -} // end detail -} // end cuda -} // end system -} // end thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/reverse.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/reverse.h deleted file mode 100644 index f80974e8a8d752c575a554018cd42e94600d3ab5..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/reverse.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <thrust/detail/config.h> - -// this system has no special reverse functions - diff --git a/spaces/manjuvallayil/te-reo/gtoken.py b/spaces/manjuvallayil/te-reo/gtoken.py deleted file mode 100644 index 8d68740c73f501941f2454b74c272cae3e71190c..0000000000000000000000000000000000000000 --- a/spaces/manjuvallayil/te-reo/gtoken.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -import ast -import math -import re -import time - -import httpx - -from utils import rshift - - -class TokenAcquirer: - """Google Translate API token generator - - translate.google.com uses a token to authorize the requests. If you are - not Google, you do have this token and will have to pay for use. - This class is the result of reverse engineering on the obfuscated and - minified code used by Google to generate such token. - - The token is based on a seed which is updated once per hour and on the - text that will be translated. - Both are combined - by some strange math - in order to generate a final - token (e.g. 744915.856682) which is used by the API to validate the - request. - - This operation will cause an additional request to get an initial - token from translate.google.com. - - Example usage: - >>> from googletrans.gtoken import TokenAcquirer - >>> acquirer = TokenAcquirer() - >>> text = 'test' - >>> tk = acquirer.do(text) - >>> tk - 950629.577246 - """ - - RE_TKK = re.compile(r'tkk:\'(.+?)\'', re.DOTALL) - RE_RAWTKK = re.compile(r'tkk:\'(.+?)\'', re.DOTALL) - - def __init__(self, client: httpx.Client, tkk='0', host='translate.google.com'): - self.client = client - self.tkk = tkk - self.host = host if 'http' in host else 'https://' + host - - def _update(self): - """update tkk - """ - # we don't need to update the base TKK value when it is still valid - now = math.floor(int(time.time() * 1000) / 3600000.0) - if self.tkk and int(self.tkk.split('.')[0]) == now: - return - - r = self.client.get(self.host) - - raw_tkk = self.RE_TKK.search(r.text) - if raw_tkk: - self.tkk = raw_tkk.group(1) - return - - try: - # this will be the same as python code after stripping out a reserved word 'var' - code = self.RE_TKK.search(r.text).group(1).replace('var ', '') - # unescape special ascii characters such like a \x3d(=) - code = code.encode().decode('unicode-escape') - except AttributeError: - raise Exception('Could not find TKK token for this request.\nSee https://github.com/ssut/py-googletrans/issues/234 for more details.') - except: - raise - - if code: - tree = ast.parse(code) - visit_return = False - operator = '+' - n, keys = 0, dict(a=0, b=0) - for node in ast.walk(tree): - if isinstance(node, ast.Assign): - name = node.targets[0].id - if name in keys: - if isinstance(node.value, ast.Num): - keys[name] = node.value.n - # the value can sometimes be negative - elif isinstance(node.value, ast.UnaryOp) and \ - isinstance(node.value.op, ast.USub): # pragma: nocover - keys[name] = -node.value.operand.n - elif isinstance(node, ast.Return): - # parameters should be set after this point - visit_return = True - elif visit_return and isinstance(node, ast.Num): - n = node.n - elif visit_return and n > 0: - # the default operator is '+' but implement some more for - # all possible scenarios - if isinstance(node, ast.Add): # pragma: nocover - pass - elif isinstance(node, ast.Sub): # pragma: nocover - operator = '-' - elif isinstance(node, ast.Mult): # pragma: nocover - operator = '*' - elif isinstance(node, ast.Pow): # pragma: nocover - operator = '**' - elif isinstance(node, ast.BitXor): # pragma: nocover - operator = '^' - # a safety way to avoid Exceptions - clause = compile('{1}{0}{2}'.format( - operator, keys['a'], keys['b']), '', 'eval') - value = eval(clause, dict(__builtin__={})) - result = '{}.{}'.format(n, value) - - self.tkk = result - - def _lazy(self, value): - """like lazy evaluation, this method returns a lambda function that - returns value given. - We won't be needing this because this seems to have been built for - code obfuscation. - - the original code of this method is as follows: - - ... code-block: javascript - - var ek = function(a) { - return function() { - return a; - }; - } - """ - return lambda: value - - def _xr(self, a, b): - size_b = len(b) - c = 0 - while c < size_b - 2: - d = b[c + 2] - d = ord(d[0]) - 87 if 'a' <= d else int(d) - d = rshift(a, d) if '+' == b[c + 1] else a << d - a = a + d & 4294967295 if '+' == b[c] else a ^ d - - c += 3 - return a - - def acquire(self, text): - a = [] - # Convert text to ints - for i in text: - val = ord(i) - if val < 0x10000: - a += [val] - else: - # Python doesn't natively use Unicode surrogates, so account for those - a += [ - math.floor((val - 0x10000) / 0x400 + 0xD800), - math.floor((val - 0x10000) % 0x400 + 0xDC00) - ] - - b = self.tkk if self.tkk != '0' else '' - d = b.split('.') - b = int(d[0]) if len(d) > 1 else 0 - - # assume e means char code array - e = [] - g = 0 - size = len(a) - while g < size: - l = a[g] - # just append if l is less than 128(ascii: DEL) - if l < 128: - e.append(l) - # append calculated value if l is less than 2048 - else: - if l < 2048: - e.append(l >> 6 | 192) - else: - # append calculated value if l matches special condition - if (l & 64512) == 55296 and g + 1 < size and \ - a[g + 1] & 64512 == 56320: - g += 1 - l = 65536 + ((l & 1023) << 10) + (a[g] & 1023) # This bracket is important - e.append(l >> 18 | 240) - e.append(l >> 12 & 63 | 128) - else: - e.append(l >> 12 | 224) - e.append(l >> 6 & 63 | 128) - e.append(l & 63 | 128) - g += 1 - a = b - for i, value in enumerate(e): - a += value - a = self._xr(a, '+-a^+6') - a = self._xr(a, '+-3^+b+-f') - a ^= int(d[1]) if len(d) > 1 else 0 - if a < 0: # pragma: nocover - a = (a & 2147483647) + 2147483648 - a %= 1000000 # int(1E6) - - return '{}.{}'.format(a, a ^ b) - - def do(self, text): - self._update() - tk = self.acquire(text) - return tk diff --git a/spaces/matthoffner/chatbot-mini/services/useApiService.ts b/spaces/matthoffner/chatbot-mini/services/useApiService.ts deleted file mode 100644 index 474c5b73a40330c54aff0da3f22ef22e826cb5f4..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot-mini/services/useApiService.ts +++ /dev/null @@ -1,30 +0,0 @@ -import { useCallback } from 'react'; - -import { useFetch } from '@/hooks/useFetch'; - -export interface GetModelsRequestProps { - key: string; -} - -const useApiService = () => { - const fetchService = useFetch(); - - const getModels = useCallback( - (params: GetModelsRequestProps, signal?: AbortSignal) => { - return fetchService.post<GetModelsRequestProps>(`/api/models`, { - body: { key: params.key }, - headers: { - 'Content-Type': 'application/json', - }, - signal, - }); - }, - [fetchService], - ); - - return { - getModels, - }; -}; - -export default useApiService; diff --git a/spaces/matthoffner/open-codetree/pages/api/updateUser.ts b/spaces/matthoffner/open-codetree/pages/api/updateUser.ts deleted file mode 100644 index b706f7bd9cbcde7a7f270254792f46a837099f06..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/pages/api/updateUser.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { withSessionApiRoute } from "../../utils/withSession"; - -export default withSessionApiRoute(async (req, res) => { - const userData = await req.session.user; - - const user = { - ...userData, - data: { - ...userData?.data, - ...(req.body.avatar && { avatar: req.body.avatar }), - ...(req.body.username && { username: req.body.username }), - }, - }; - - // @ts-ignore - req.session.user = user; - await req.session.save(); - res.json(user); -}); diff --git a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/metrics.py b/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/metrics.py deleted file mode 100644 index 63026a71989441603df6abd447555524f2fd1e85..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/metrics.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES -# SPDX-License-Identifier: MIT - -from abc import ABC, abstractmethod - -import torch -import torch.distributed as dist -from torch import Tensor - - -class Metric(ABC): - """ Metric class with synchronization capabilities similar to TorchMetrics """ - - def __init__(self): - self.states = {} - - def add_state(self, name: str, default: Tensor): - assert name not in self.states - self.states[name] = default.clone() - setattr(self, name, default) - - def synchronize(self): - if dist.is_initialized(): - for state in self.states: - dist.all_reduce(getattr(self, state), op=dist.ReduceOp.SUM, group=dist.group.WORLD) - - def __call__(self, *args, **kwargs): - self.update(*args, **kwargs) - - def reset(self): - for name, default in self.states.items(): - setattr(self, name, default.clone()) - - def compute(self): - self.synchronize() - value = self._compute().item() - self.reset() - return value - - @abstractmethod - def _compute(self): - pass - - @abstractmethod - def update(self, preds: Tensor, targets: Tensor): - pass - - -class MeanAbsoluteError(Metric): - def __init__(self): - super().__init__() - self.add_state('error', torch.tensor(0, dtype=torch.float32, device='cuda')) - self.add_state('total', torch.tensor(0, dtype=torch.int32, device='cuda')) - - def update(self, preds: Tensor, targets: Tensor): - preds = preds.detach() - n = preds.shape[0] - error = torch.abs(preds.view(n, -1) - targets.view(n, -1)).sum() - self.total += n - self.error += error - - def _compute(self): - return self.error / self.total diff --git a/spaces/merve/data-leak/public/dataset-worldviews/README.md b/spaces/merve/data-leak/public/dataset-worldviews/README.md deleted file mode 100644 index 74e4920975910a03f1cc2ebe582a7a6d03eb8da6..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/dataset-worldviews/README.md +++ /dev/null @@ -1,6 +0,0 @@ -## Photo todos - -x highlight the active button -x firing when not expected? -x clear timer when clicked -- maybe convert to HTML? \ No newline at end of file diff --git a/spaces/merve/data-leak/source/fill-in-the-blank/style.css b/spaces/merve/data-leak/source/fill-in-the-blank/style.css deleted file mode 100644 index 726984190483443c3da0905eae281514eccc7487..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/fill-in-the-blank/style.css +++ /dev/null @@ -1,737 +0,0 @@ -@media (max-width: 1100px){ - body{ - /*overflow-x: hidden;*/ - } -} - - -.tooltip { - top: -1000px; - position: absolute; - padding: 10px; - background: rgba(255, 255, 255, .8); - border: 0px solid lightgray; - - width: 300px; - font-size: 14px; - line-height: 1.4em; - background: rgba(0, 0, 0, .8); - color: #fff; - pointer-events: all !important; -} -.tooltip a{ - color: #fff !important; -} -.tooltip:hover{ -/* opacity: 1; - pointer-events: all !important; -*/} - -.tooltip-hidden{ - opacity: 0; - transition: all .3s; - transition-delay: .2s; - pointer-events: none !important; -} - -@media (max-width: 590px){ - .footend{ - margin-left: 0px; - width: 10px; - } - - - div.tooltip{ - transition: all 0s !important; - transition-delay: 0s !important; - - display: none; - position: fixed; - bottom: -1px; - width: calc(100%); - left: -1px !important; - right: -1px !important; - top: auto !important; - width: auto !important; - } -} - -svg{ - overflow: visible; -} - -.domain{ - display: none; -} - -.tick{ - display: none; -} - -.bg-tick{ - stroke: #eee; -} - -text{ - pointer-events: none; - /*fill: #fff;*/ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; -} - -.pair{ - width: 820px; - /*height: 550px;*/ - margin: 0px auto; - margin-top: 25px !important -} - -.nurse-name-zari-cda{ - margin-bottom: 35px; -} - -.pair > div{ - display: inline-block; - vertical-align: top; -} - -.pair .graph{ - width: 500px; -} - -.pair .options{ - width: 250px; - padding-right: 20px; -} - -.pair .warning{ - width: 250px; - /*border: 1px solid orange;*/ - /*background: #fff9e4;*/ - /*padding: 10px;*/ - margin-top: 15px; - padding-left: 0px; - font-size: 14px; - line-height: 1.25em; - opacity: 0; - transition: all .2s; -} - -.pair .reset{ - width: 58px; - /*border: 1px solid orange;*/ - /*background: #fff9e4;*/ - /*padding: 10px;*/ - margin-top: 15px; - font-size: 14px; - line-height: 1.25em; - opacity: 0; - transition: opacity .2s; - cursor: pointer; - user-select: none; - outline: 1px solid #ccc; - padding: 5px; - -} -.pair .reset span{ - position: relative; - top: -1px; - padding-right: 4px; - padding-left: 1px; - /*font-size: ;*/ -} - -.pair .reset:hover{ - background: #eee; - color: #000; - outline: 1px solid #000; -} - -.options > *{ - margin-right: 10px; -} - -.options b{ - display: block; - margin-bottom: 5px; - margin-top: 10px; -} - - - - -.flex-row{ - width: 100%; - display: flex; - justify-content: space-between; - column-gap: 10px -} - -.flex-row > *{ - flex-grow: 1; - margin-right: 0px !important; -} - -.options > *{ - margin-right: 0px; -} - -.pair textarea{ - width: 100%; -} - -.flex-row-textarea{ - display: block; -} - -@media (max-width: 820px){ - .pair{ - width: 100%; - height: auto; - max-width: 500px; - margin: 0px auto; - } - - .flex-row{ - margin-bottom: -10px; - } - - .flex-row-textarea{ - display: flex; - margin-bottom: 10px; - } - - - .pair .options{ - width: auto; - padding-right: 0px; - } - - .warning{ - display: none !important; - } - - .reset{ - display: none !important; - } - - .pair .graph{ - width: 100%; - } - - .annotations{ - display: none; - } -} - - - -.pair.difference{ - width: 1000px; - margin-left: 0px; -} - -.pair.difference .pair-container{ -} - -.pair .options.wide{ - width: 100%; - margin-bottom: 20px; -} -.pair .options.wide > div{ - display: inline-block; -} - -.options.wide .option-type .button{ - width: 78px !important; -} - -.options.wide .option-model .button{ - width: 40px !important; -} - -.options.wide .update.button{ - width: 80px !important; -} - -textarea{ - font-family: 'Roboto', Helvetica, sans-serif; - font-weight: 300; - line-height: 1.55em; - font-size: 16px; - font-weight: bold; - border: 1px #ccc solid; - resize: none; -} - -.button.update{ - /*height: 20px;*/ - /*position: relative;*/ - /*top: -30px;*/ - /*margin-bottom: -10px;*/ - /*vertical-align: center;*/ - margin-top: 25px; - width: 252px; - text-align: center; - font-weight: 500; -} -.button{ - display: inline-block; - outline: 1px solid #ccc; - padding: 5px; - margin-top: 10px; - margin-right: 10px; - position: relative; - top: -12px; - cursor: pointer; - user-select: none; -} - -@media (hover: hover) and (pointer: fine) { - .button:hover{ - outline-color: #000; - } -} - -@media screen and (-webkit-min-device-pixel-ratio:0) and @media (max-width: 900px) { - select, - textarea, - input { - font-size: 16px !important; - } - - textarea{ - height: 80px !important; - } -} - - -.button.active{ - background: #eee; - color: #000; - /*font-weight: 500;*/ -} - - -.button.loading i{ - opacity: 1; -} - -.button.loading{ - pointer-events: none; - /*opacity: .6;*/ -} -.p-button{ - /*position: relative;*/ - /*top: -3px;*/ - /*line-height: 10px;*/ - /*line-height: */ - display: inline-block; - margin-right: 15px; -} -.p-button-link{ - text-decoration: underline; - cursor: pointer; - padding-right: 10px; -} -.interesting-pair-alts .p-button-link{ - display: block; - text-decoration: none; -} -.interesting-pair-alts .p-button-link div{ - padding-left: 10px; - padding-right: 10px; - padding-top: 5px; - padding-bottom: 5px; - outline: 1px solid #ccc; - margin-top: 5px; - margin-bottom: 5px; - margin-left: 10px; - -} -.difference-difference-alts .p-button-link:hover div{ - outline: 1px solid #000; -} - -.difference-difference-alts .p-button-link{ - display: block; - text-decoration: none; -} -.difference-difference-alts .p-button-link div{ - padding-left: 10px; - padding-right: 10px; - padding-top: 5px; - padding-bottom: 5px; - outline: 1px solid #ccc; - margin-top: 5px; - margin-bottom: 5px; - margin-left: 10px; - -} -.difference-difference-alts .p-button-link:hover div{ - outline: 1px solid #000; -} - - -.wide .flex-row{ - width: 220px; -} - -.wide > *{ - margin-right: 40px; -} - -.wide textarea{ - position: relative; - top: 12px; -} - - -@media (max-width: 1100px){ - .pair-container-overflow{ - overflow-x: scroll; - width: 100% !important; - } - - .pair.difference{ - width: auto; - max-width: 2000px; - } - - .pair.difference .options{ - margin: 0px auto; - margin-left: max(50vh - 500px, 0px); - width: min(500px, 100%); - } - -} - -.pair-container{ - width: 1000px; -} - - - - - -.checkbox{ - display: inline-block; - position: relative; - top: -10px; - margin-left: 10px; - -} - -circle:hover{ - stroke: blue; -} - - - -.hover text{ - fill: #000; - font-weight: 300; - /*stroke-width: 2px;*/ - /*text-shadow: 0 2px 0 #000, 2px 0 0 #000, 0 -2px 0 #000, -2px 0 0 #000;*/ -} - -#graph > div{ - display: inline-block; -} - -text.tiny{ - font-size: 9px; - font-family: monospace; - /*fill: #555;*/ -} - - - - - -svg{ - overflow: visible; -} - - -input{ - font-family: monospace; - width: 900px; - overflow: hidden; - background-color: rgba(0,0,0,0); - border: 0px; -} - -textarea{ - font-family: monospace; - font-size: 14px; -} - -/* Hide scrollbar for Chrome, Safari and Opera */ -.top-sents::-webkit-scrollbar { - /*display: none;*/ -} - -/* Hide scrollbar for IE, Edge and Firefox */ -.top-sents { - -ms-overflow-style: none; /* IE and Edge */ - scrollbar-width: none; /* Firefox */ -} - -.sent{ - margin-top: -15px; -} - - - -.post-summary{ - display: none; -} - - -.token-container{ - text-align: center; - line-height: 2em; -} - -.token{ - display: inline-block; - padding: 5px; - margin: 10px; - margin-top: 0px; - margin-bottom: 0px; - font-size: 20px; - font-family: monospace; - outline: 1px solid #ccc; - color: #000; - cursor: pointer; - background: #fff; - border: 0px; -} - -.token:hover, .token.active{ - outline: 1px solid #000; -} - - -.xy-only, .rotate-only{ - opacity: 0; - transition: all .2s; -} - -.annotations{ - transition: opacity .2s; -} - -.is-xy .xy-only{ - opacity: 1 !important; -} -.is-rotate .rotate-only{ - opacity: 1 !important; -} - -.hamlet{ - min-height: 304px; - margin-bottom: 20px; -} - -.hamlet-edit .button{ - color: #ccc; - pointer-events: none; -} -.hamlet-edit.changed .button{ - color: #000; - pointer-events: all; -} - -@media (max-width: 500px){ - .hamlet-edit .button{ - display: block; - text-align: center; - top: 0px !important; - margin: 0px auto !important; - margin-top: 5px !important; - width: 100%; - } -} - - - -.pair .update{ - color: #ccc; - pointer-events: none; -} -.pair.changed .update{ - color: #000; - pointer-events: all; -} - - - - -.difference-difference-list{ - display: none; -} - -.pair-container{ - width: 900px; -} -.pair-container > div{ - display: inline-block; -} - - -.difference-difference textarea{ - height: 52px; -} - -.not-is-color-by .y-axis-label text, .not-is-color-by .sent-1 text, .not-is-color-by .x-axis-label{ - fill: #444 !important; -} - -.is-color-by .y-axis-label text, .is-color-by .sent-1 text, .is-color-by .x-axis-label{ - font-weight: 400; - /*text-decoration: underline;*/ -} - - - -.time-token.active path{ - stroke: #f0f; - opacity: 1; -} -.time-token.active text{ - fill: #f0f !important; - opacity: 1 !important; - font-size: 14px; -} - - -.token{ - -} - -.gender-over-time{ - width: 1100px; - margin: 0px auto; - font-size: 14px; - margin-left: -91px; -} - -.gender-over-time .tick{ - display: block; -} - -.gender-over-time .axis{ - opacity: .7; -} - -.gender-over-time .sentence{ - /*position: relative;*/ - width: 32%; -} - -.gender-over-time .sentence .sentence-title{ - right: 42px; - position: relative; - text-align: right; - font-family: monospace; - -} -.gender-over-time .sentence.is-bear .sentence-title{ - /*text-align: center;*/ - right: 115px; -} - -.gender-over-time .g-caption{ - line-height: 18px; - margin-bottom: 30px; - margin-top: 5px; - width: 290px; - font-size: 13px; - left: 365px; - position: relative; -} - -@media (max-width: 1100px){ - .gender-over-time{ - width: 100%; - margin-left: 0px; - max-width: 500px; - margin: 0px auto; - } - - .gender-over-time .sentence{ - width: 100% !important; - margin-bottom: 20px; - } - - .gender-over-time .g-caption{ - left: 0px; - width: 100%; - } -} - -.time-token text{ - font-family: monospace; - pointer-events: all !important; - cursor: default; -} - - - -img[src*="img/wiki-years.png"] { - width: 300px; -} - - -#more-explorables{ - margin-top: 100px; -} - - - - -/*html{ - font-smooth: never; - -webkit-font-smoothing: none; - background: transparent; -} - -path{ - display: none; -}*/ - - -button { - display: inline-block; - border: none; - margin: 0; - text-decoration: none; - background: #fff; - color: #ffffff; - font-size: 1em; - cursor: pointer; - text-align: center; - -webkit-appearance: none; - -moz-appearance: none; - font-family : inherit; - -} - -button:active { - transform: scale(0.99); -} - - -info{ - font-weight: 300; - font-size: 12px; - line-height: 0em; - position: relative; - left: 7px; - top: -1px; - cursor: default; -} -info:hover{ - font-weight: 600; -} \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/server-side/private-and-fair/README.md b/spaces/merve/measuring-fairness/server-side/private-and-fair/README.md deleted file mode 100644 index 4027669d437e37527797233c24a6de2a1ae62829..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/server-side/private-and-fair/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Colabs that generate data for [Can a Model Be Differentially Private and Fair?](https://pair.withgoogle.com/explorables/private-and-fair/). - - -- [MNIST_Generate_UMAP](https://colab.research.google.com/github/PAIR-code/ai-explorables/blob/master/server-side/private-and-fair/MNIST_Generate_UMAP.ipynb): Parses privacy rankings and projects MNIST digits with UMAP. -- [MNIST DP - Model Grid](https://colab.research.google.com/github/PAIR-code/ai-explorables/blob/master/server-side/private-and-fair/MNIST_DP_Model_Grid.ipynb): Trains a series of differentially private models with different levels of privacy and training data to generate the accuracy tradeoff line charts. -- [MNIST DP - Rotated](https://colab.research.google.com/github/PAIR-code/ai-explorables/blob/master/server-side/private-and-fair/MNIST_DP_Rotated.ipynb): Trains model with rotated digits for the [Subgroup Size and Accuracy Appendix](https://pair.withgoogle.com/explorables/private-and-fair/#appendix-subgroup-size-and-accuracy) . \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/public/measuring-diversity/style.css b/spaces/merve/uncertainty-calibration/public/measuring-diversity/style.css deleted file mode 100644 index 38a1149b1a986d176009fce1d0d2861091ef2c1e..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/measuring-diversity/style.css +++ /dev/null @@ -1,229 +0,0 @@ -html{ - min-width: 800px; - overflow-x: auto; -} - -p{ - max-width: 750px; - margin-left: 0px auto; - margin-right: 0px auto; - margin: 0px auto; - margin-top: 1em; - margin-bottom: 1em; -} - -.white{ - stroke: #fff; - fill: none; - stroke-width: 1; -} - -.player{ - cursor: pointer; - stroke: #000; - stroke-width: 2; -} - -.button{ - border: .5px solid #000; - /*border-bottom-width: 4px;*/ - /*border-right-width: 4px;*/ - border-radius: 8px; - padding: 4px; - margin: 2px; - cursor: pointer; - display: inline-block; - /*font-family: monospace;*/ - /*font-family: 'Roboto Slab', serif;*/ - /*font-size: 16px;*/ - user-select: none; - font-family: 'Google Sans', sans-serif; - font-family: 'Roboto', Helvetica, sans-serif; - - /*font-weight: 300;*/ - -} -.button:hover{ - background: #eee !important; -} - -.button:active{ -} - - -svg{ - overflow: visible; -} - -.axis text{ - fill: #999; - font-family: 'Google Sans', sans-serif; - font-family: 'Roboto', Helvetica, sans-serif; -} -.axis text.chart-title{ - fill: #000; - font-size: 16px; -} - -.field{ - font-family: 'Roboto', Helvetica, sans-serif; -} - -.chart-title span{ - padding: 4px; -} - - -.shapes{ - line-height: 0px; - margin-bottom: 80px; - margin-top: 20px; -} - -.shape{ - display: inline-block; - outline: 1px solid #bbb; - margin: 5px; - cursor: pointer; -} -.shape:hover{ - outline: 1px solid #000; - background: #eee !important; -} -.measure:hover{ - outline: 1px solid #ccc; - background: #eee !important; - outline: 1px solid #000 !important; -} -.measure.active{ -} - -.shape{ - opacity: .3; -} - -.shapes{ - user-select: none; -} - - -.shape.active{ - opacity: 1; - outline: 1px solid #bf0bbf; - background: rgba(255,0,255,.03); -} -.shape.active:hover{ - background: rgba(255,0,255,.1) !important; -} -#all-shapes .shape.active{ - outline: 1px solid #bbb; - background: #fff; -} - - -.top, .bot{ - line-height: 1.8em; -} - -.measure{ - cursor: pointer; - outline: 1px solid #ccc; - margin: 10px; -} - -.measure-container{ - display:inline-block; - width: 300px; - margin-top: 15px; -} - -.measure-description{ - font-size: 14px; - max-width: 120px; - line-height: 16px; - display: inline-block; -} - -.emphasized{ - font-weight: 400; -} - -.set.no-stroke{ - opacity: 0; -} -.set{ - stroke: #000; - opacity: .3; -} -.set.selected{ - stroke: #fcb2f7; - stroke: #bf0bbf; - stroke-width: 1; - opacity: 1; -} -.row.selected text{ - opacity: 1 !important; - fill: #bf0bbf; - font-weight: 500; -} - -text.selected{ - opacity: 1 !important; - fill: #bf0bbf; - font-weight: 500; - -} - - - -text{ - /*pointer-events: none;*/ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; -} - -#coat-v-gender, #pick-green, #pick-triangle, #pick-metric, #all-shapes{ - width: 850px; -} -#coat-v-gender > div > div{ - background-size: cover; - background-position: center; -} - -.note, ul{ - opacity: .5; - max-width: 750px; - max-width: 750px; - margin-left: 0px auto; - margin-right: 0px auto; - margin: 0px auto; - margin-top: 1em; - margin-bottom: 1em; - -} - -#columns-height { - margin-bottom: 70px; -} - -.post-summary{ - - margin-bottom: auto; -} - - -#all-shapes{ - pointer-events: none; -} - -#all-shapes .shape{ - outline: 0px !important; -} - -.post-summary{ - display: none; -} - -#pick-metric .top text, #coat-v-gender .top text { - font-weight: 300 !important; -} - diff --git a/spaces/merve/uncertainty-calibration/source/private-and-fair/accuracy-v-privacy-class.js b/spaces/merve/uncertainty-calibration/source/private-and-fair/accuracy-v-privacy-class.js deleted file mode 100644 index 39daddb629006c967bfa8c3a6c1d43fc9887bc1b..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/private-and-fair/accuracy-v-privacy-class.js +++ /dev/null @@ -1,285 +0,0 @@ -var state = { - dataset_size: 15000, - threshold: .8, - label: 8 -} - -var sel = d3.select('.accuracy-v-privacy-class').html('') - .at({role: 'graphics-document', 'aria-label': `Line chart showing that high accuracy models can still perform poorly on some digit classes.`}) - -async function loadData(){ - var rawData = await util.getFile(`cns-cache/grid_${state.dataset_size}trainpoints_test_labels.csv`) - - rawData.forEach(d => { - delete d[''] - d.i = +d.i - d.label = +d.label - }) - - var aVal2Meta = {} - var metadata = await util.getFile('cns-cache/model_grid_test_accuracy.json') - metadata - .filter(d => d.dataset_size == state.dataset_size) - .forEach(d => aVal2Meta['aVal_' + d.aVal] = d) - - var allCols = d3.keys(rawData[0]) - .filter(d => d.includes('aVal')) - .map(key => { - var {epsilon, aVal} = aVal2Meta[key] - return {key, epsilon, aVal} - }) - - var byDigit = d3.nestBy(rawData, d => d.label) - byDigit.forEach(d => { - d.label = +d.key - }) - byDigit.forEach(digitClass => { - digitClass.cols = allCols.map(({key, epsilon}, colIndex) => { - return { - key, - colIndex, - epsilon, - digitClass, - label: digitClass.label, - accuracy: d3.mean(digitClass, d => d[key] > state.threshold) - } - }) - }) - - var data = _.flatten(byDigit.map(d => d.cols)) - .filter(d => util.epsilonExtent[1] <= d.epsilon && d.epsilon <= util.epsilonExtent[0]) - var byLabel = d3.nestBy(data, d => d.label) - byLabel.forEach((d, i) => { - d.label = d.key - }) - - return {data, byLabel} -} - - -async function initChart(){ - var {data, byLabel} = await loadData() - - var c = d3.conventions({ - sel: sel.append('div'), - height: 400, - margin: {bottom: 75, top: 5}, - layers: 'ds', - }) - - c.x = d3.scaleLog().domain(util.epsilonExtent).range(c.x.range()) - c.xAxis = d3.axisBottom(c.x).tickFormat(d => { - var rv = d + '' - if (rv.split('').filter(d => d !=0 && d != '.')[0] == 1) return rv - }) - - c.yAxis.tickFormat(d => d3.format('.0%')(d))//.ticks(8) - d3.drawAxis(c) - util.addAxisLabel(c, 'Higher Privacy →', '') - util.ggPlotBg(c, false) - c.layers[0].append('div') - .st({fontSize: 12, color: '#555', width: 120*2, textAlign: 'center', lineHeight: '1.3em', verticalAlign: 'top'}) - .translate([c.width/2 - 120, c.height + 45]) - .html('in ε') - - var line = d3.line().x(d => c.x(d.epsilon)).y(d => c.y(d.accuracy)) - - var lineSel = c.svg.append('g').appendMany('path.accuracy-line', byLabel) - .at({ - d: line, - fill: 'none', - stroke: '#000', - // opacity: 0, - }) - .on('mousemove', setActiveLabel) - - var circleSel = c.svg.append('g') - .appendMany('g.accuracy-circle', data) - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - .on('mousemove', setActiveLabel) - // .call(d3.attachTooltip) - - circleSel.append('circle') - .at({r: 7, stroke: '#fff'}) - - circleSel.append('text') - .text(d => d.label) - .at({textAnchor: 'middle', fontSize: 10, fill: '#fff', dy: '.33em'}) - - setActiveLabel(state) - function setActiveLabel({label}){ - lineSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - circleSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - state.label = label - } - - - async function updateDatasetSize(){ - var newData = await loadData() - data = newData.data - byLabel = newData.byLabel - - lineSel.data(byLabel) - .transition() - .at({d: line}) - - circleSel.data(data) - .transition() - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('text.annotation').remove() - } - - function updateThreshold(){ - data.forEach(d => { - d.accuracy = d3.mean(d.digitClass, e => e[d.key] > state.threshold) - }) - - lineSel.at({d: line}) - circleSel.translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('.y .axis-label').text(`Test Points With More Than ${d3.format('.2%')(state.threshold)} Confidence In Label`) - - c.svg.select('text.annotation').remove() - } - updateThreshold() - - return {c, updateDatasetSize, updateThreshold} -} - - -async function init(){ - sel.append('div.chart-title').text('High accuracy models can still perform poorly on some digit classes') - - var chart = await initChart() - - var buttonRowSel = sel.append('div.button-row') - .st({height: 50}) - - var buttonSel = buttonRowSel.append('div') - .st({width: 500}) - .append('span.chart-title').text('Training points') - .parent() - .append('div').st({display: 'inline-block', width: 300, marginLeft: 10}) - .append('div.digit-button-container.dataset_size') - .appendMany('div.button', [2000, 3750, 7500, 15000, 30000, 60000]) - .text(d3.format(',')) - .classed('active', d => d == state.dataset_size) - .on('click', d => { - buttonSel.classed('active', e => e == d) - state.dataset_size = d - chart.updateDatasetSize() - }) - - buttonRowSel.append('div.conf-slider') - .append('span.chart-title').text('Confidence threshold') - .parent() - .append('input.slider-native') - .at({ - type: 'range', - min: .0001, - max: .9999, - step: .0001, - value: state.threshold, - }) - .on('input', function(){ - state.threshold = this.value - chart.updateThreshold() - }) - - - function addSliders(){ - var width = 140 - var height = 30 - var color = '#000' - - var sliders = [ - {key: 'threshold', label: 'Confidence threshold', r: [.0001, .9999]}, - ] - sliders.forEach(d => { - d.value = state[d.key] - d.xScale = d3.scaleLinear().range([0, width]).domain(d.r).clamp(1) - }) - - d3.select('.conf-slider .slider-container').remove() - d3.select('.slider-native').remove() - - var svgSel = d3.select('.conf-slider').parent() - // .st({marginTop: 5, marginBottom: 5}) - .appendMany('div.slider-container', sliders) - .append('svg').at({width, height}) - .append('g').translate([10, 25]) - - var sliderSel = svgSel - .on('click', function(d){ - d.value = d.xScale.invert(d3.mouse(this)[0]) - renderSliders(d) - }) - .classed('slider', true) - .st({cursor: 'pointer'}) - - var textSel = sliderSel.append('text.annotation') - .at({y: -15, fontWeight: 300, textAnchor: 'middle', x: 180/2}) - - sliderSel.append('rect') - .at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'}) - - sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 1 - }) - - var leftPathSel = sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 3 - }) - - var drag = d3.drag() - .on('drag', function(d){ - var x = d3.mouse(this)[0] - d.value = d.xScale.invert(x) - - renderSliders(d) - }) - - var circleSel = sliderSel.append('circle').call(drag) - .at({r: 7, stroke: '#000'}) - - function renderSliders(d){ - if (d) state[d.key] = d.value - - circleSel.at({cx: d => d.xScale(d.value)}) - leftPathSel.at({d: d => `M 0 -.5 H ${d.xScale(d.value)}`}) - textSel - .at({x: d => d.xScale(d.value)}) - .text(d => d3.format('.2%')(d.value)) - chart.updateThreshold() - } - renderSliders() - } - addSliders() - - - chart.c.svg.append('text.annotation') - .translate([505, 212]) - .tspans(d3.wordwrap(`8s are correctly predicted with high confidence much more rarely than other digits`, 25), 12) - .at({textAnchor: 'end'}) - -} -init() - - - - diff --git a/spaces/michuS/overwatchClassificator/app.py b/spaces/michuS/overwatchClassificator/app.py deleted file mode 100644 index c1939a8f7d6a0729a9e9864e8fdcb00423d62a43..0000000000000000000000000000000000000000 --- a/spaces/michuS/overwatchClassificator/app.py +++ /dev/null @@ -1,15 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -categories = ('D.va','Doomfist', 'Junker Queen', 'Orisa', 'Reinhardt', 'Roadhog', 'Sigma', 'Winston', 'Wrecking ball', 'Ashe', 'Bastion', 'Cassidy', 'Echo', 'Genji', 'Hanzo', 'Junkrat', 'Mei', 'Pharah', 'Soldier: 76', 'Sojourn', 'Sombra', 'Symmetra', 'Torbjorn', 'Tracer', 'Widowmaker', 'Ana', 'Baptiste', 'Brigitte', 'Kiriko', 'Lucio', 'Mercy', 'Moira', 'Zenyatta') -learn = load_learner('model.pkl') -def classify_image(img): - pred,idx,probs = learn.predict(img) - return pred - -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() -examples = ['hanzo.jpg', 'kiriko.jpg', 'winston.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline =False) \ No newline at end of file diff --git a/spaces/ml6team/dynamic-pricing/README.md b/spaces/ml6team/dynamic-pricing/README.md deleted file mode 100644 index 1d1d2c43a5e86e95a38e411b91e84c0c1f93017c..0000000000000000000000000000000000000000 --- a/spaces/ml6team/dynamic-pricing/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: 💸 Dynamic Pricing 💸 -emoji: 💸 -colorFrom: green -colorTo: gray -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -This demo will explore the current state-of-the-art in dynamic pricing methods. - -We will cover the motivation behind dynamic pricing and discuss how we can leverage Bayesian statistics, Thompson sampling and Gaussian processes to derive an optimal price-setting strategy. diff --git a/spaces/mrchtr/semantic-demo/retriever.py b/spaces/mrchtr/semantic-demo/retriever.py deleted file mode 100644 index 7c2a69b7df512c3d3aa105f74b3cc17d58f62538..0000000000000000000000000000000000000000 --- a/spaces/mrchtr/semantic-demo/retriever.py +++ /dev/null @@ -1,93 +0,0 @@ -from haystack.document_stores import InMemoryDocumentStore - -from haystack.nodes.retriever import TfidfRetriever -from haystack.pipelines import DocumentSearchPipeline, ExtractiveQAPipeline -from haystack.nodes.retriever import EmbeddingRetriever -import pickle -from pprint import pprint -dutch_datset_name = 'Partisan news 2019 (dutch)' -german_datset_name = 'CDU election program 2021' - -class ExportableInMemoryDocumentStore(InMemoryDocumentStore): - """ - Wrapper class around the InMemoryDocumentStore. - When the application is deployed to Huggingface Spaces there will be no GPU available. - We need to load pre-calculated data into the InMemoryDocumentStore. - """ - def export(self, file_name='in_memory_store.pkl'): - with open(file_name, 'wb') as f: - pickle.dump(self.indexes, f) - - def load_data(self, file_name='in_memory_store.pkl'): - with open(file_name, 'rb') as f: - self.indexes = pickle.load(f) - - -class SearchEngine(): - - def __init__(self, document_store_name_base, document_store_name_adpated, - adapted_retriever_path): - self.document_store = ExportableInMemoryDocumentStore(similarity='cosine') - self.document_store.load_data(document_store_name_base) - - self.document_store_adapted = ExportableInMemoryDocumentStore(similarity='cosine') - self.document_store_adapted.load_data(document_store_name_adpated) - - self.retriever = TfidfRetriever(document_store=self.document_store) - - self.base_dense_retriever = EmbeddingRetriever( - document_store=self.document_store, - embedding_model='sentence-transformers/paraphrase-multilingual-mpnet-base-v2', - model_format='sentence_transformers' - ) - - self.fine_tuned_retriever = EmbeddingRetriever( - document_store=self.document_store_adapted, - embedding_model=adapted_retriever_path, - model_format='sentence_transformers' - ) - - def sparse_retrieval(self, query): - """Sparse retrieval pipeline""" - scores = self.retriever._calc_scores(query) - p_retrieval = DocumentSearchPipeline(self.retriever) - documents = p_retrieval.run(query=query) - documents['documents'][0].score = list(scores[0].values())[0] - return documents - - def dense_retrieval(self, query, retriever='base'): - if retriever == 'base': - p_retrieval = DocumentSearchPipeline(self.base_dense_retriever) - return p_retrieval.run(query=query) - if retriever == 'adapted': - p_retrieval = DocumentSearchPipeline(self.fine_tuned_retriever) - return p_retrieval.run(query=query) - - def do_search(self, query): - sparse_result = self.sparse_retrieval(query)['documents'][0] - dense_base_result = self.dense_retrieval(query, 'base')['documents'][0] - dense_adapted_result = self.dense_retrieval(query, 'adapted')['documents'][0] - return sparse_result, dense_base_result, dense_adapted_result - - -dutch_search_engine = SearchEngine('dutch-article-idx.pkl', 'dutch-article-idx_adapted.pkl', - 'dutch-article-retriever') -german_search_engine = SearchEngine('documentstore_german-election-idx.pkl', - 'documentstore_german-election-idx_adapted.pkl', - 'adapted-retriever') - -def do_search(query, dataset): - if dataset == german_datset_name: - return german_search_engine.do_search(query) - else: - return dutch_search_engine.do_search(query) - -if __name__ == '__main__': - search_engine = SearchEngine('dutch-article-idx.pkl', 'dutch-article-idx_adapted.pkl', - 'dutch-article-retriever') - query = 'Kindergarten' - - result = search_engine.do_search(query) - pprint(result) - - diff --git a/spaces/mrciolino/ppt_owl_vit/setup.sh b/spaces/mrciolino/ppt_owl_vit/setup.sh deleted file mode 100644 index 688da97ff896ea11c44bf77c9910a139014a2de1..0000000000000000000000000000000000000000 --- a/spaces/mrciolino/ppt_owl_vit/setup.sh +++ /dev/null @@ -1,10 +0,0 @@ -/home/appuser/venv/bin/python -m pip install --upgrade pip - -mkdir -p ~/.streamlit/ -echo "\ -[server]\n\ -headless = true\n\ -port = $PORT\n\ -enableCORS = false\n\ -\n\ -" > ~/.streamlit/config.toml \ No newline at end of file diff --git a/spaces/mserras/somos-alpaca-es/load_data.py b/spaces/mserras/somos-alpaca-es/load_data.py deleted file mode 100644 index add1949e031ffd29d1f84b2971d9389cecade707..0000000000000000000000000000000000000000 --- a/spaces/mserras/somos-alpaca-es/load_data.py +++ /dev/null @@ -1,105 +0,0 @@ -import sys -import time -import os - -import argilla as rg -import pandas as pd -import requests -from datasets import load_dataset, concatenate_datasets - -from argilla.listeners import listener - -HF_TOKEN = os.environ.get("HF_TOKEN") -HUB_DATASET_NAME = "mserras/alpaca-es-hackaton" -HUB_DATASET_NAME_VAL = "mserras/alpaca-es-hackaton-validated" - -@listener( - dataset="somos-alpaca-es", - query="status:Validated", # https://docs.argilla.io/en/latest/guides/features/queries.html - execution_interval_in_seconds=1200, # interval to check the execution of `save_validated_to_hub` -) -def save_validated_to_hub(records, ctx): - if len(records) > 0: - ds = rg.DatasetForTextClassification(records=records).to_datasets() - if HF_TOKEN: - print("Pushing the dataset") - print(ds) - ds.push_to_hub(HUB_DATASET_NAME_VAL, token=HF_TOKEN) - else: - print("SET HF_TOKEN and HUB_DATASET_NAME TO SYNC YOUR DATASET!!!") - else: - print("NO RECORDS found") - -class LoadDatasets: - def __init__(self, api_key, workspace="team"): - rg.init(api_key=api_key, workspace=workspace) - - @staticmethod - def load_somos(): - # Leer el dataset del Hub - try: - print(f"Trying to sync with {HUB_DATASET_NAME}") - dataset = load_dataset(HUB_DATASET_NAME, split="train") - except Exception as e: - print(f"Not possible to sync with {HUB_DATASET_NAME}") - print(e) - dataset = None - - # dataset = load_dataset("somosnlp/somos-clean-alpaca-es", split="train") - - - # if old_ds: - # print("Concatenating datasets") - # dataset = concatenate_datasets([dataset, old_ds]) - # print("Concatenated dataset is:") - # print(dataset) - - dataset = dataset.remove_columns("metrics") - if not dataset: - print(f"There is no DATASET - Skipping!") - return - - print(f"Generating records from the dataset") - records = rg.DatasetForTextClassification.from_datasets(dataset) - settings = rg.TextClassificationSettings( - label_schema=["BAD INSTRUCTION", "BAD INPUT", "BAD OUTPUT", "INAPPROPRIATE", "BIASED", "ALL GOOD", "HALLUCINATION", "UNPROCESSABLE"] - ) - rg.configure_dataset(name="somos-alpaca-es", settings=settings, workspace="team") - - print("Logging the dataset!") - # Log the dataset - rg.log( - records, - name="somos-alpaca-es", - tags={"description": "SomosNLP Hackathon dataset - instruction filtering version"}, - batch_size=200 - ) - - # run listener - save_validated_to_hub.start() - -if __name__ == "__main__": - API_KEY = sys.argv[1] - LOAD_DATASETS = sys.argv[2] - - if LOAD_DATASETS.lower() == "none": - print("No datasets being loaded") - else: - while True: - try: - response = requests.get("http://0.0.0.0:6900/") - if response.status_code == 200: - ld = LoadDatasets(API_KEY) - ld.load_somos() - break - - except requests.exceptions.ConnectionError: - pass - except Exception as e: - print(e) - time.sleep(10) - pass - - time.sleep(5) - while True: - time.sleep(60) \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/train_multilingual_model.sh b/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/train_multilingual_model.sh deleted file mode 100644 index cc050bd3f02de8a2f303737f187442d2eb80e4ef..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/train_multilingual_model.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -path_2_data=$1 # <path to data> which contains binarized data for each directions -lang_list=$2 # <path to a file which contains a list of languages separted by new lines> -lang_pairs=$3 #a list language pairs to train multilingual models, e.g. "en-fr,en-cs,fr-en,cs-en" - -fairseq-train "$path_2_data" \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --sampling-method "temperature" \ - --sampling-temperature 1.5 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/new/decoders/__init__.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/new/decoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/README.md b/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/README.md deleted file mode 100644 index 7a76ffd57c066c20af94aa3fca24c18e2ba4c3dd..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Generative Spoken Language Modeling - -* [Paper](https://arxiv.org/abs/2102.01192) -* [Demo](https://speechbot.github.io/gslm/index.html) - -We build and evaluate generative speech2speech systems using [Log Mel Filtebank](https://pytorch.org/audio/stable/compliance.kaldi.html#fbank), [Modified CPC](https://github.com/facebookresearch/CPC_audio), [HuBERT Base](https://github.com/pytorch/fairseq/tree/main/examples/hubert) and [Wav2Vec 2.0 Large](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). Our system is composed of three components, namely, *speech2unit*, *ulm* and *unit2speech*. We explain about models and usage of these components in their respective sub-directories. See the links below. - -## Speech to Unit Model (speech2unit) -Speech to unit model is used for quantizing raw speech into learned discrete speech units. [More details](speech2unit) - -## Unit Language Model (ulm) -Unit Language Model is a generative language model trained on discrete speech units. [More details](ulm) - -## Unit to Speech Model (unit2speech) -Unit to speech model is used for synthesizing speech from discrete speech units. [More details](unit2speech) - -## Metrics -We show how to compute ASR based metrics as well as zero-shot metrics proposed in our paper [here](metrics). - -## Tools -We share two tools to resynthesize a given spoken utterance, and generate novel spoken language given a spoken prompt. [More detail](tools) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/multi_corpus_dataset.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/multi_corpus_dataset.py deleted file mode 100644 index 746155e515897da9fc9c803f9396a45b5cead8d0..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/multi_corpus_dataset.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import time -from collections import OrderedDict -from typing import Dict, List - -import numpy as np -from fairseq.data import data_utils - -from . import FairseqDataset - -logger = logging.getLogger(__name__) - - -class MultiCorpusDataset(FairseqDataset): - """ - Stores multiple instances of FairseqDataset together. Requires each instance - to be the same dataset, as the collate method needs to work on batches with - samples from each dataset. - - Allows specifying a distribution over the datasets to use. Note that unlike - MultiCorpusSampledDataset, this distribution allows sampling for each item, - rather than on a batch level. - - Each time ordered_indices() is called, a new sample is generated with - the specified distribution. - - Args: - datasets: a OrderedDict of FairseqDataset instances. - distribution: a List containing the probability of getting an utterance from - corresponding dataset - seed: random seed for sampling the datsets - sort_indices: if true, will sort the ordered indices by size - batch_sample: if true, will ensure each batch is from a single dataset - """ - - def __init__( - self, - datasets: Dict[str, FairseqDataset], - distribution: List[float], - seed: int, - sort_indices: bool = False, - batch_sample: bool = False, - distributed_rank=None, - ): - super().__init__() - assert isinstance(datasets, OrderedDict) - assert len(datasets) == len(distribution) - assert sum(distribution) == 1 - self.datasets = datasets - self.distribution = distribution - self.seed = seed - self.sort_indices = sort_indices - self.batch_sample = batch_sample - self.distributed_rank = distributed_rank - - # Avoid repeated conversions to list later - self.dataset_list = list(datasets.values()) - self.total_num_instances = 0 - - first_dataset = list(self.datasets.values())[0] - - self.dataset_offsets = [] - for dataset in datasets.values(): - assert isinstance(dataset, FairseqDataset) - assert type(dataset) is type(first_dataset) - self.dataset_offsets.append(self.total_num_instances) - self.total_num_instances += len(dataset) - - def ordered_indices(self): - start = time.time() - with data_utils.numpy_seed(self.seed, self.epoch): - logger.info(f"sampling new dataset with seed {self.seed} epoch {self.epoch}") - sampled_indices = [] - num_selected_instances = 0 - - # For each dataset i, sample self.distribution[i] * self.total_num_instances - for i, key in enumerate(self.datasets): - - if i < len(self.datasets) - 1: - num_instances = int(self.distribution[i] * self.total_num_instances) - high = self.dataset_offsets[i + 1] - else: - num_instances = self.total_num_instances - num_selected_instances - high = self.total_num_instances - - logger.info(f"sampling {num_instances} from {key} dataset") - num_selected_instances += num_instances - - # First, add k copies of the dataset where k = num_instances // len(dataset). - # This ensures an equal distribution of the data points as much as possible. - # For the remaining entries randomly sample them - dataset_size = len(self.datasets[key]) - num_copies = num_instances // dataset_size - dataset_indices = ( - np.random.permutation(high - self.dataset_offsets[i]) - + self.dataset_offsets[i] - )[: num_instances - num_copies * dataset_size] - if num_copies > 0: - sampled_indices += list( - np.concatenate( - ( - np.repeat( - np.arange(self.dataset_offsets[i], high), num_copies - ), - dataset_indices, - ) - ) - ) - else: - sampled_indices += list(dataset_indices) - - assert ( - len(sampled_indices) == self.total_num_instances - ), f"{len(sampled_indices)} vs {self.total_num_instances}" - - np.random.shuffle(sampled_indices) - if self.sort_indices: - sampled_indices.sort(key=lambda i: self.num_tokens(i)) - - logger.info( - "multi_corpus_dataset ordered_indices took {}s".format( - time.time() - start - ) - ) - return np.array(sampled_indices, dtype=np.int64) - - def _map_index(self, index: int): - """ - If dataset A has length N and dataset B has length M - then index 1 maps to index 1 of dataset A, and index N + 1 - maps to index 1 of B. - """ - counter = 0 - for key, dataset in self.datasets.items(): - if index < counter + len(dataset): - return index - counter, key - counter += len(dataset) - raise ValueError( - "Invalid index: {}, max: {}".format(index, self.total_num_instances) - ) - - def __len__(self): - """ - Length of this dataset is the sum of individual datasets - """ - return self.total_num_instances - - def __getitem__(self, index): - new_index, key = self._map_index(index) - try: - item = self.datasets[key][new_index] - item["full_id"] = index - return item - except Exception as e: - e.args = (f"Error from {key} dataset", *e.args) - raise - - def collater(self, samples): - """ - If we are doing batch sampling, then pick the right collater to use. - - Otherwise we assume all collaters are the same. - """ - if len(samples) == 0: - return None - if "full_id" in samples[0]: - _, key = self._map_index(samples[0]["full_id"]) - try: - batch = self.datasets[key].collater(samples) - except Exception: - print(f"Collating failed for key {key}", flush=True) - raise - return batch - else: - # Subclasses may override __getitem__ to not specify full_id - return list(self.datasets.values())[0].collater(samples) - - def num_tokens(self, index: int): - index, key = self._map_index(index) - return self.datasets[key].num_tokens(index) - - def size(self, index: int): - index, key = self._map_index(index) - return self.datasets[key].size(index) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return False - - def set_epoch(self, epoch, **unused): - super().set_epoch(epoch) - logger.info(f"setting epoch of multi_corpus_dataset to {epoch}") - self.epoch = epoch - - @property - def supports_prefetch(self): - return False - - @property - def supports_fetch_outside_dataloader(self): - return all( - self.datasets[key].supports_fetch_outside_dataloader - for key in self.datasets - ) - - def batch_by_size( - self, - indices, - max_tokens=None, - max_sentences=None, - required_batch_size_multiple=1, - ): - if not self.batch_sample: - return super().batch_by_size( - indices, max_tokens, max_sentences, required_batch_size_multiple - ) - - dataset_indices = {key: [] for key in self.datasets} - for i in indices: - _, key = self._map_index(i) - dataset_indices[key].append(i) - - batches = [] - for key in dataset_indices: - cur_batches = super().batch_by_size( - np.array(dataset_indices[key], dtype=np.int64), - max_tokens, - max_sentences, - required_batch_size_multiple, - ) - logger.info(f"Created {len(cur_batches)} batches for dataset {key}") - batches += cur_batches - - # If this dataset is used in a distributed training setup, - # then shuffle such that the order is seeded by the distributed rank - # as well - if self.distributed_rank is not None: - with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank): - np.random.shuffle(batches) - return batches diff --git a/spaces/mskov/whisper_fileStream/streaming.py b/spaces/mskov/whisper_fileStream/streaming.py deleted file mode 100644 index 6c0329ca899a928c05fd99db517fd1481d874a4e..0000000000000000000000000000000000000000 --- a/spaces/mskov/whisper_fileStream/streaming.py +++ /dev/null @@ -1,24 +0,0 @@ -from transformers import pipeline -import gradio as gr -import time - -p = pipeline("automatic-speech-recognition") - -def transcribe(audio, state=""): - time.sleep(1) - text = p(audio)["text"] - state += text + " " - return state, state - -gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath"), - "state" - ], - outputs=[ - "textbox", - "state" - ], - live=True).launch() - \ No newline at end of file diff --git a/spaces/mygyasir/deep-voice-cloning/build/lib/deep_voice_cloning/transcriber/__init__.py b/spaces/mygyasir/deep-voice-cloning/build/lib/deep_voice_cloning/transcriber/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mygyasir/genious_bgremover/carvekit/utils/image_utils.py b/spaces/mygyasir/genious_bgremover/carvekit/utils/image_utils.py deleted file mode 100644 index 8b939f56a1041f3fb3db1b6e9874d0c8c99473c7..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/genious_bgremover/carvekit/utils/image_utils.py +++ /dev/null @@ -1,150 +0,0 @@ -""" - Source url: https://github.com/OPHoperHPO/image-background-remove-tool - Author: Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO]. - License: Apache License 2.0 -""" - -import pathlib -from typing import Union, Any, Tuple - -import PIL.Image -import numpy as np -import torch - -ALLOWED_SUFFIXES = [".jpg", ".jpeg", ".bmp", ".png", ".webp"] - - -def to_tensor(x: Any) -> torch.Tensor: - """ - Returns a PIL.Image.Image as torch tensor without swap tensor dims. - - Args: - x: PIL.Image.Image instance - - Returns: - torch.Tensor instance - """ - return torch.tensor(np.array(x, copy=True)) - - -def load_image(file: Union[str, pathlib.Path, PIL.Image.Image]) -> PIL.Image.Image: - """Returns a PIL.Image.Image class by string path or pathlib path or PIL.Image.Image instance - - Args: - file: File path or PIL.Image.Image instance - - Returns: - PIL.Image.Image instance - - Raises: - ValueError: If file not exists or file is directory or file isn't an image or file is not correct PIL Image - - """ - if isinstance(file, str) and is_image_valid(pathlib.Path(file)): - return PIL.Image.open(file) - elif isinstance(file, PIL.Image.Image): - return file - elif isinstance(file, pathlib.Path) and is_image_valid(file): - return PIL.Image.open(str(file)) - else: - raise ValueError("Unknown input file type") - - -def convert_image(image: PIL.Image.Image, mode="RGB") -> PIL.Image.Image: - """Performs image conversion to correct color mode - - Args: - image: PIL.Image.Image instance - mode: Colort Mode to convert - - Returns: - PIL.Image.Image instance - - Raises: - ValueError: If image hasn't convertable color mode, or it is too small - """ - if is_image_valid(image): - return image.convert(mode) - - -def is_image_valid(image: Union[pathlib.Path, PIL.Image.Image]) -> bool: - """This function performs image validation. - - Args: - image: Path to the image or PIL.Image.Image instance being checked. - - Returns: - True if image is valid - - Raises: - ValueError: If file not a valid image path or image hasn't convertable color mode, or it is too small - - """ - if isinstance(image, pathlib.Path): - if not image.exists(): - raise ValueError("File is not exists") - elif image.is_dir(): - raise ValueError("File is a directory") - elif image.suffix.lower() not in ALLOWED_SUFFIXES: - raise ValueError( - f"Unsupported image format. Supported file formats: {', '.join(ALLOWED_SUFFIXES)}" - ) - elif isinstance(image, PIL.Image.Image): - if not (image.size[0] > 32 and image.size[1] > 32): - raise ValueError("Image should be bigger then (32x32) pixels.") - elif image.mode not in ["RGB", "RGBA", "L"]: - raise ValueError("Wrong image color mode.") - else: - raise ValueError("Unknown input file type") - return True - - -def transparency_paste( - bg_img: PIL.Image.Image, fg_img: PIL.Image.Image, box=(0, 0) -) -> PIL.Image.Image: - """ - Inserts an image into another image while maintaining transparency. - - Args: - bg_img: background image - fg_img: foreground image - box: place to paste - - Returns: - Background image with pasted foreground image at point or in the specified box - """ - fg_img_trans = PIL.Image.new("RGBA", bg_img.size) - fg_img_trans.paste(fg_img, box, mask=fg_img) - new_img = PIL.Image.alpha_composite(bg_img, fg_img_trans) - return new_img - - -def add_margin( - pil_img: PIL.Image.Image, - top: int, - right: int, - bottom: int, - left: int, - color: Tuple[int, int, int, int], -) -> PIL.Image.Image: - """ - Adds margin to the image. - - Args: - pil_img: Image that needed to add margin. - top: pixels count at top side - right: pixels count at right side - bottom: pixels count at bottom side - left: pixels count at left side - color: color of margin - - Returns: - Image with margin. - """ - width, height = pil_img.size - new_width = width + right + left - new_height = height + top + bottom - # noinspection PyTypeChecker - result = PIL.Image.new(pil_img.mode, (new_width, new_height), color) - result.paste(pil_img, (left, top)) - return result diff --git a/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/README.md b/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/README.md deleted file mode 100644 index f67cff42324493213c6fe8cf3b2d0a877633bbae..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stablediffusionapi Dreamlike Photoreal1 -emoji: 👁 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Auto Data Free ((HOT)) Download 2013 Software.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Auto Data Free ((HOT)) Download 2013 Software.md deleted file mode 100644 index 38a248b19105eceee836b7a87888e54173545808..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Auto Data Free ((HOT)) Download 2013 Software.md +++ /dev/null @@ -1,36 +0,0 @@ -<br /> -<h1>How to Download Auto Data 2013 Software for Free</h1> -<p>Auto Data is a software that provides comprehensive information on various vehicles, such as technical specifications, service schedules, wiring diagrams, diagnostic trouble codes, and more. It is a useful tool for mechanics, car enthusiasts, and students who want to learn more about automotive engineering.</p> -<p>However, Auto Data is not a free software. It requires a subscription fee to access its database and features. If you want to download Auto Data 2013 software for free, you will need to find a reliable source that offers a cracked version of the software. This means that the software has been modified to bypass the security and licensing checks of the original software.</p> -<h2>Auto Data Free Download 2013 Software</h2><br /><p><b><b>Download File</b> ->>->>->> <a href="https://urlcod.com/2uIawv">https://urlcod.com/2uIawv</a></b></p><br /><br /> -<p>Before you proceed, you should be aware of the risks and consequences of downloading cracked software. First of all, it is illegal and unethical to use software without paying for it. You may be violating the intellectual property rights of the software developer and face legal actions. Second, cracked software may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Third, cracked software may not work properly or have limited functionality. You may encounter errors, bugs, or compatibility issues that can affect your user experience.</p> -<p>If you still want to download Auto Data 2013 software for free, you will need to follow these steps:</p> -<ol> -<li>Search for a reputable website that offers cracked software downloads. You can use search engines or online forums to find such websites. Some examples are getintopc.com, oceanofgames.com, and crackzsoft.com.</li> -<li>Find the link for Auto Data 2013 software on the website. Make sure that the link is working and not broken. You may need to complete some surveys or captcha tests to access the link.</li> -<li>Download the file from the link. The file may be in a compressed format such as ZIP or RAR. You will need to extract the file using a program such as WinRAR or 7-Zip.</li> -<li>Run the setup file from the extracted folder. Follow the instructions on the screen to install the software. You may need to copy and paste a serial key or a patch file from the folder to activate the software.</li> -<li>Enjoy using Auto Data 2013 software for free.</li> -</ol> -<p>Note: This article is for educational purposes only. We do not condone or encourage piracy or illegal use of software. Please support the software developers by purchasing their products legally.</p> - -<p>Auto Data 2013 software is one of the most popular and comprehensive automotive software in the market. It covers over 17,000 vehicle models from 80 manufacturers worldwide. It provides detailed and accurate information on various aspects of vehicle maintenance and repair, such as:</p> -<ul> -<li>Engine management and fuel injection systems</li> -<li>ABS, ESP, ASR and airbag systems</li> -<li>Key programming and service indicators</li> -<li>Climate control and air conditioning systems</li> -<li>Steering and suspension systems</li> -<li>Brake systems and tyre pressure monitoring systems</li> -<li>Electrical systems and wiring diagrams</li> -<li>Body and chassis systems</li> -<li>Technical data and specifications</li> -<li>Troubleshooting and diagnostic procedures</li> -<li>Service schedules and intervals</li> -<li>Torque settings and tools required</li> -</ul> -<p>Auto Data 2013 software is designed to be user-friendly and easy to navigate. It has a simple and intuitive interface that allows you to search for information by vehicle make, model, year, engine code, or system. It also has a graphical display that shows the location and layout of components, connectors, fuses, relays, and sensors. It also has a print function that lets you print out the information you need.</p> -<p></p> -<p>Auto Data 2013 software is compatible with Windows XP, Vista, 7, 8, and 10 operating systems. It requires a minimum of 1 GB of RAM and 10 GB of hard disk space. It also requires an internet connection for activation and updates.</p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest].md deleted file mode 100644 index 2370b0e44ecdaa1a1256a3993dce7d450b519153..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest].md +++ /dev/null @@ -1,53 +0,0 @@ - -<h1>IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]: How to Get It and Why You Need It</h1> - -<p>If you are a software developer, you probably know that IntelliJ IDEA is one of the best IDEs for Java and Kotlin development. It offers a lot of features and tools that make your coding experience more efficient and enjoyable. But did you know that you can also get IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest] for free?</p> - -<p>IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest] is a software that allows you to activate the full version of IntelliJ IDEA without paying any fees. This means that you can enjoy all the benefits of the premium edition, such as advanced code analysis, smart code completion, debugging, testing, refactoring, and more.</p> -<h2>IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]</h2><br /><p><b><b>Download File</b> &#10001; &#10001; &#10001; <a href="https://urlcod.com/2uIaM9">https://urlcod.com/2uIaM9</a></b></p><br /><br /> - -<p>But why would you need IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]? Well, there are several reasons why you might want to use this software:</p> - -<ul> -<li>You want to save money and avoid paying for a subscription or a license.</li> -<li>You want to try out the full features of IntelliJ IDEA before buying it.</li> -<li>You want to use IntelliJ IDEA for personal or educational purposes.</li> -<li>You want to support the developers of IntelliJ IDEA and JetBrains.</li> -</ul> - -<p>Whatever your reason is, you can get IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest] easily and safely by following these steps:</p> - -<ol> -<li>Download IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest] from this link: <a href="https://urlgoal.com/2j8gd9">https://urlgoal.com/2j8gd9</a></li> -<li>Extract the zip file and run the setup.exe file.</li> -<li>Follow the instructions on the screen and install IntelliJ IDEA on your computer.</li> -<li>Launch IntelliJ IDEA and enter the activation code that you will find in the crack folder.</li> -<li>Enjoy using IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]!</li> -</ol> - -<p>That's it! You have successfully activated IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest] and you can now use it for your projects. Remember to always update your software to get the latest features and bug fixes.</p> - -<p>If you liked this article, please share it with your friends and colleagues who might be interested in IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]. Also, feel free to leave a comment below if you have any questions or feedback.</p> - -<p>Happy coding!</p> - -<p>Now that you have IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest], you might want to learn some tips and tricks to make the most of this powerful IDE. Here are some of the best ones that you can use to improve your productivity and code quality:</p> -<p></p> - -<ul> -<li>Use auto import to stop doing importing manually. IntelliJ IDEA can automatically add import statements for classes, methods, and variables that you use in your code. You can enable this feature by going to Preferences | Editor | General | Auto Import and checking the options for Java[^2^].</li> -<li>Use inspections to find and fix code issues. IntelliJ IDEA has some cool inspections that can help you detect errors, bugs, performance issues, style violations, and more. You can see the inspection results in the editor as yellow or red highlights, or in a dedicated tool window by pressing ⌥⏎, or Alt + Enter[^2^]. You can also run inspections for the whole project or a specific scope by going to Analyze | Inspect Code[^4^].</li> -<li>Use VM options to speed up your IDE performance. Working on a big project with thousands of classes? You might want to increase the memory allocated to IntelliJ IDEA by editing the VM options file. You can find this file by going to Help | Edit Custom VM Options and adjust the values for -Xms and -Xmx parameters[^2^].</li> -<li>Use plugins to extend the functionality of IntelliJ IDEA. There is a plugin repository with many useful plugins for IntelliJ IDEA that you can browse and install from within the IDE. You can go to Preferences | Plugins and search for plugins by name, category, or rating. Some of the most popular plugins are Lombok, SonarLint, Rainbow Brackets, and Code With Me[^2^].</li> -<li>Use navigation shortcuts to quickly jump to any file, class, method, or symbol in your project. You can use ⌘O, or Ctrl + N, to go to a class by name, ⌘⇧O, or Ctrl + Shift + N, to go to a file by name, ⌘⌥O, or Ctrl + Alt + Shift + N, to go to a symbol by name, and ⌘F12, or Ctrl + F12, to go to a method or field in the current file[^3^].</li> -<li>Use refactor shortcuts to quickly change your code structure without breaking it. You can use ⇧F6, or Shift + F6, to rename classes, methods, and variables in IntelliJ IDEA. You can also use ⌘⌥V, or Ctrl + Alt + V, to extract code to a variable, ⌘⌥M, or Ctrl + Alt + M, to extract code to a method, ⌘⌥C, or Ctrl + Alt + C, to extract code to a constant, and more[^3^].</li> -<li>Use presentation mode to show your code on a big screen or record a video tutorial. You can enter and exit presentation mode by using Quick Switch Scheme shortcut ⌃`, or Ctrl + `, and selecting Presentation Mode from the list. You can also configure the font size for presentation mode by going to Preferences | Appearance & Behavior | Appearance and changing the Presentation Mode font size value[^3^].</li> -</ul> - -<p>These are just some of the tips and tricks that you can use with IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]. There are many more that you can discover by exploring the IDE features and settings. You can also check out the official documentation[^4^], blog[^3^], and guide[^2^] for more resources and tutorials.</p> - -<p>We hope you enjoyed this article and learned something new. If you did, please share it with your friends and colleagues who might be interested in IntelliJ IDEA 2020.5.8 Crack Full License Activation Code [Latest]. Also, feel free to leave a comment below if you have any questions or feedback.</p> - -<p>Happy coding!</p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/nikhil567/Turkey-Syria-Earthquake/helper.py b/spaces/nikhil567/Turkey-Syria-Earthquake/helper.py deleted file mode 100644 index 7e8b6cb6f6ed12184b7e7b25d1892e7e1be3c680..0000000000000000000000000000000000000000 --- a/spaces/nikhil567/Turkey-Syria-Earthquake/helper.py +++ /dev/null @@ -1,77 +0,0 @@ -import pandas as pd -import streamlit as st -import altair as alt -import matplotlib.pyplot as plt -from wordcloud import WordCloud, STOPWORDS -import seaborn as sns -import pickle -import numpy as np -import cv2 - -def plot_bar_chart(tweet_df): - x_name = tweet_df.columns[0] - y_name = tweet_df.columns[1] - st.write(alt.Chart(tweet_df).mark_bar().encode( - x=alt.X(x_name, sort=None), - y=y_name, - )) - -def plot_line_chart(tweet_df): - x_name = tweet_df.columns[0] - y_name = tweet_df.columns[1] - st.write(alt.Chart(tweet_df).mark_line().encode( - x=alt.X(x_name, sort=None), - y=y_name, - )) - -def plot_pie(tweet_df, labels): - explode = (0, 0.1) - fig1, ax1 = plt.subplots() - colors = ("orange", "brown") - ax1.pie(tweet_df, explode=explode, colors=colors, labels=labels, autopct='%1.1f%%', - shadow=True, startangle=90) - ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. - - st.pyplot(fig1) - -def word_cloud(hashtags, col): - mask = np.array(cv2.imread("twitter.png")) - stopwords = STOPWORDS - wc = WordCloud(width=500, height=500, min_font_size=10, background_color='black', stopwords=stopwords, mask=mask) - if col == 'hashtags': - df_wc = wc.generate(hashtags[col].str.cat(sep=",")) - else: - text = str(hashtags[col].values) - df_wc = wc.generate(text) - return df_wc - -def plot_heatmap(): - table = pickle.load(open('table.pkl', 'rb')) - fig, ax = plt.subplots(figsize=(9, 6), ncols=1) - - sns.heatmap(table, cmap="Greens", - linewidths=0.5, ax=ax) - st.pyplot(fig) - - # day_df = pd.DataFrame(list(df.groupby('day')['hash_tags'])) - # day_df.columns = ['date', 'hashtags'] - - # top_hashtags = pd.DataFrame() - # day_hash_freq = pd.DataFrame() - # for i in range(len(day_df)): - # hold = pd.DataFrame(np.hstack(day_df['hashtags'][i])).value_counts().head(15) - # v1 = hold.index - # v2 = hold.values - # v1 = [i[0] for i in v1] - # v1 = np.array(v1) - # day_hash_freq = day_hash_freq.append(pd.DataFrame({'date': day_df['date'][i], 'hashtag': v1, 'Frequency': v2}), - # ignore_index=True) - # top_hashtags = top_hashtags.append(pd.DataFrame({'hashtag': v1, 'Frequency': v2}), ignore_index=True) - - # top_hashtags = top_hashtags.sort_values(by='Frequency', ascending=False, ignore_index=True).head(30) - # top_hashtags = pd.DataFrame(top_hashtags['hashtag'].unique()) - # top_hashtags.columns = ['hashtag'] - - # day_hash_freq = day_hash_freq.merge(top_hashtags, on='hashtag').sort_values(by='date', ascending=True) - # table = day_hash_freq.pivot_table(index='date', columns='hashtag', values='Frequency', aggfunc='sum').fillna( - # 0).astype('int') \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/lvis_v1_categories.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/lvis_v1_categories.py deleted file mode 100644 index 7374e6968bb006f5d8c49e75d9d3b31ea3d77d05..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/lvis_v1_categories.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Autogen with -# with open("lvis_v1_val.json", "r") as f: -# a = json.load(f) -# c = a["categories"] -# for x in c: -# del x["image_count"] -# del x["instance_count"] -# LVIS_CATEGORIES = repr(c) + " # noqa" -# with open("/tmp/lvis_categories.py", "wt") as f: -# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}") -# Then paste the contents of that file below - -# fmt: off -LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa -# fmt: on diff --git a/spaces/nomic-ai/IlyaGusev_ru_turbo_alpaca/README.md b/spaces/nomic-ai/IlyaGusev_ru_turbo_alpaca/README.md deleted file mode 100644 index bfb8ee202f32022d7197637a69fc5e702d8565b8..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/IlyaGusev_ru_turbo_alpaca/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: IlyaGusev/ru_turbo_alpaca -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- diff --git a/spaces/nsarrazin/chat-ui-idefics/README.md b/spaces/nsarrazin/chat-ui-idefics/README.md deleted file mode 100644 index 5c7ae77951fbdab3eff6be1f55f891d9e0313147..0000000000000000000000000000000000000000 --- a/spaces/nsarrazin/chat-ui-idefics/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: IDEFICS + HuggingChat -emoji: 🚀 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false -app_port: 3000 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/__init__.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/__init__.py deleted file mode 100644 index c608be9615c145466c14d93e93c77e1a4cce9c5d..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -import numpy as np -from skimage.metrics import peak_signal_noise_ratio as psnr -from skimage.metrics import structural_similarity as ssim -import cvbase -import os - -os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" - - -def calculate_metrics(results_flow, gts_flow): - """ - - Args: - results_flow: inpainted optical flow with shape [b, h, w, c], numpy array - gts_flow: ground truth optical flow with shape [b, h, w, c], numpy array - - Returns: PSNR, SSIM for flow images, and L1/L2 error for flow map - - """ - B, H, W, C = results_flow.shape - psnr_values, ssim_values, L1errors, L2errors = [], [], [], [] - for i in range(B): - result = results_flow[i] - gt = gts_flow[i] - result_img = cvbase.flow2rgb(result) - gt_img = cvbase.flow2rgb(gt) - residual = result - gt - L1error = np.mean(np.abs(residual)) - L2error = np.sum(residual ** 2) ** 0.5 / (H * W * C) - psnr_value = psnr(result_img, gt_img) - ssim_value = ssim(result_img, gt_img, multichannel=True) - L1errors.append(L1error) - L2errors.append(L2error) - psnr_values.append(psnr_value) - ssim_values.append(ssim_value) - L1_value = np.mean(L1errors) - L2_value = np.mean(L2errors) - psnr_value = np.mean(psnr_values) - ssim_value = np.mean(ssim_values) - - return {'l1': L1_value, 'l2': L2_value, 'psnr': psnr_value, 'ssim': ssim_value} diff --git a/spaces/oliver2023/chatgpt-on-wechat/common/token_bucket.py b/spaces/oliver2023/chatgpt-on-wechat/common/token_bucket.py deleted file mode 100644 index 23901b67e6e1e902304d21e9eb53790526e43240..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/common/token_bucket.py +++ /dev/null @@ -1,45 +0,0 @@ -import threading -import time - - -class TokenBucket: - def __init__(self, tpm, timeout=None): - self.capacity = int(tpm) # 令牌桶容量 - self.tokens = 0 # 初始令牌数为0 - self.rate = int(tpm) / 60 # 令牌每秒生成速率 - self.timeout = timeout # 等待令牌超时时间 - self.cond = threading.Condition() # 条件变量 - self.is_running = True - # 开启令牌生成线程 - threading.Thread(target=self._generate_tokens).start() - - def _generate_tokens(self): - """生成令牌""" - while self.is_running: - with self.cond: - if self.tokens < self.capacity: - self.tokens += 1 - self.cond.notify() # 通知获取令牌的线程 - time.sleep(1 / self.rate) - - def get_token(self): - """获取令牌""" - with self.cond: - while self.tokens <= 0: - flag = self.cond.wait(self.timeout) - if not flag: # 超时 - return False - self.tokens -= 1 - return True - - def close(self): - self.is_running = False - - -if __name__ == "__main__": - token_bucket = TokenBucket(20, None) # 创建一个每分钟生产20个tokens的令牌桶 - # token_bucket = TokenBucket(20, 0.1) - for i in range(3): - if token_bucket.get_token(): - print(f"第{i+1}次请求成功") - token_bucket.close() diff --git a/spaces/p1atdev/waifu_aesthetics/predictor.py b/spaces/p1atdev/waifu_aesthetics/predictor.py deleted file mode 100644 index 5c24337ed456f6ea8ad90cff8d9deeee70563523..0000000000000000000000000000000000000000 --- a/spaces/p1atdev/waifu_aesthetics/predictor.py +++ /dev/null @@ -1,23 +0,0 @@ -from PIL import Image - -from transformers import BeitImageProcessor, BeitForImageClassification - - -class Predictor: - def __init__(self, model_id: str) -> None: - self.processor = BeitImageProcessor.from_pretrained(model_id) - self.model = BeitForImageClassification.from_pretrained(model_id) - - def predict(self, images: list[Image.Image]) -> list[dict[str, float]]: - inputs = self.processor(images, return_tensors="pt") - logits = self.model(**inputs).logits.softmax(1) # 一応見た目が良いのでsoftmaxをかける - - results = [] - - for scores in logits: - result = {} - for i, score in enumerate(scores): - result[self.model.config.id2label[i]] = score.item() - results.append(result) - - return results diff --git a/spaces/patgpt4/MusicGen/tests/modules/test_seanet.py b/spaces/patgpt4/MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/patgpt4/MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/pd4solutions/ATLChatbot/app.py b/spaces/pd4solutions/ATLChatbot/app.py deleted file mode 100644 index 7c4b37d79220c4a02bb952025290030683acac65..0000000000000000000000000000000000000000 --- a/spaces/pd4solutions/ATLChatbot/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import time -import gradio as gr -from gpt_index import RefinePrompt -from gpt_index import ( - SimpleWebPageReader, - WikipediaReader, - GPTListIndex, - GPTSimpleVectorIndex, - LLMPredictor, - QuestionAnswerPrompt, - RefinePrompt, - PromptHelper -) - -system_message = {"role": "system", "content": "You are an AI specialized in Atlanta."} - - -with gr.Blocks() as demo: - gr.Markdown( - ''' - # Customized Atlanta Chatbot Demo - This chatbot uses the Atlantaga.gov and ATL311.com websites as its custom knowledge base. - Before starting a new conversation, please refresh the chatbot for the best results. - If the chatbot is giving incorrect answers, please refresh. - ''' - ) - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.Button("Clear") - - - state = gr.State([]) - - def user(user_message, history): - return "", history + [[user_message, None]] - - def bot(history, messages_history): - user_message = history[-1][0] - bot_message, messages_history = ask_gpt(user_message, messages_history) - messages_history += [{"role": "assistant", "content": bot_message}] - history[-1][1] = bot_message - time.sleep(1) - return history, messages_history - - def ask_gpt(message, messages_history): - messages_history += [{"role": "user", "content": message}] - query_str = '' - QA_PROMPT_TMPL = ( - "You are an conversational AI specialized in Atlanta.\n" - "If a query does not relate to Atlanta, say you can't answer the query.\n"# and make the answer related to Atlanta.\n" - "We have provided context information below. \n" - "---------------------\n" - "{context_str}" - "\n---------------------\n" - "Given this information, please give a detailed and conversational answer to the query: {query_str} and cite the url source associated with this answer.\n" - "Use information from previous queries in your response when appropriate.\n" - "Format the answer to the query like this: Answer: .\n" - "\nSource: followed by the source in bold.\n" - "Put the Answer and Source on different lines of the response and the Source is the url source associated with the answer.\n" - ) - QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL) - - - # Takes in the input from the user to deliver responses - index = GPTSimpleVectorIndex.load_from_disk('index_demo.json') - message = ' '.join([message['content'] for message in messages_history]) - response = index.query(message, text_qa_template = QA_PROMPT) - return response.response, messages_history - #return response['choices'][0]['message']['content'], messages_history - - - def init_history(messages_history): - messages_history = [] - messages_history += [system_message] - return messages_history - - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, [chatbot, state], [chatbot, state] - ) - - clear.click(lambda: None, None, chatbot, queue=False).success(init_history, [state], [state]) - -demo.launch() \ No newline at end of file diff --git a/spaces/peter2000/E-Coicop-food-classifier/README.md b/spaces/peter2000/E-Coicop-food-classifier/README.md deleted file mode 100644 index 01dbd9db7979211fbd029bc847405f1afa6add9e..0000000000000000000000000000000000000000 --- a/spaces/peter2000/E-Coicop-food-classifier/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: E Coicop Food Classifier -emoji: 🏃 -colorFrom: yellow -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/dataloaders.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/dataloaders.py deleted file mode 100644 index 5eab6201f4f44595c2b279eff15146841a539e46..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/dataloaders.py +++ /dev/null @@ -1,1222 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Dataloaders and dataset utils -""" - -import contextlib -import glob -import hashlib -import json -import math -import os -import random -import shutil -import time -from itertools import repeat -from multiprocessing.pool import Pool, ThreadPool -from pathlib import Path -from threading import Thread -from urllib.parse import urlparse - -import numpy as np -import psutil -import torch -import torch.nn.functional as F -import torchvision -import yaml -from PIL import ExifTags, Image, ImageOps -from torch.utils.data import DataLoader, Dataset, dataloader, distributed -from tqdm import tqdm - -from utils.augmentations_torchscript import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) -from utils.general_torchscript import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, - check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, - xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first - -# Parameters -HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes -VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders - -# Get orientation exif tag -for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': - break - - -def get_hash(paths): - # Returns a single hash value of a list of paths (files or dirs) - size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.sha256(str(size).encode()) # hash sizes - h.update(''.join(paths).encode()) # hash paths - return h.hexdigest() # return hash - - -def exif_size(img): - # Returns exif-corrected PIL size - s = img.size # (width, height) - with contextlib.suppress(Exception): - rotation = dict(img._getexif().items())[orientation] - if rotation in [6, 8]: # rotation 270 or 90 - s = (s[1], s[0]) - return s - - -def exif_transpose(image): - """ - Transpose a PIL image accordingly if it has an EXIF Orientation tag. - Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() - - :param image: The image to transpose. - :return: An image. - """ - exif = image.getexif() - orientation = exif.get(0x0112, 1) # default 1 - if orientation > 1: - method = { - 2: Image.FLIP_LEFT_RIGHT, - 3: Image.ROTATE_180, - 4: Image.FLIP_TOP_BOTTOM, - 5: Image.TRANSPOSE, - 6: Image.ROTATE_270, - 7: Image.TRANSVERSE, - 8: Image.ROTATE_90}.get(orientation) - if method is not None: - image = image.transpose(method) - del exif[0x0112] - image.info['exif'] = exif.tobytes() - return image - - -def seed_worker(worker_id): - # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader - worker_seed = torch.initial_seed() % 2 ** 32 - np.random.seed(worker_seed) - random.seed(worker_seed) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - seed=0): - if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') - shuffle = False - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabels( - path, - imgsz, - batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix) - - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - generator = torch.Generator() - generator.manual_seed(6148914691236517205 + seed + RANK) - return loader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, - worker_init_fn=seed_worker, - generator=generator), dataset - - -class InfiniteDataLoader(dataloader.DataLoader): - """ Dataloader that reuses workers - - Uses same syntax as vanilla DataLoader - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) - self.iterator = super().__iter__() - - def __len__(self): - return len(self.batch_sampler.sampler) - - def __iter__(self): - for _ in range(len(self)): - yield next(self.iterator) - - -class _RepeatSampler: - """ Sampler that repeats forever - - Args: - sampler (Sampler) - """ - - def __init__(self, sampler): - self.sampler = sampler - - def __iter__(self): - while True: - yield from iter(self.sampler) - - -class LoadScreenshots: - # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` - def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): - # source = [screen_number left top width height] (pixels) - check_requirements('mss') - import mss - - source, *params = source.split() - self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 - if len(params) == 1: - self.screen = int(params[0]) - elif len(params) == 4: - left, top, width, height = (int(x) for x in params) - elif len(params) == 5: - self.screen, left, top, width, height = (int(x) for x in params) - self.img_size = img_size - self.stride = stride - self.transforms = transforms - self.auto = auto - self.mode = 'stream' - self.frame = 0 - self.sct = mss.mss() - - # Parse monitor shape - monitor = self.sct.monitors[self.screen] - self.top = monitor['top'] if top is None else (monitor['top'] + top) - self.left = monitor['left'] if left is None else (monitor['left'] + left) - self.width = width or monitor['width'] - self.height = height or monitor['height'] - self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} - - def __iter__(self): - return self - - def __next__(self): - # mss screen capture: get raw pixels from the screen as np array - im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' - - if self.transforms: - im = self.transforms(im0) # transforms - else: - im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - self.frame += 1 - return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s - - -class LoadImages: - # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line - path = Path(path).read_text().rsplit() - files = [] - for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: - p = str(Path(p).resolve()) - if '*' in p: - files.extend(sorted(glob.glob(p, recursive=True))) # glob - elif os.path.isdir(p): - files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir - elif os.path.isfile(p): - files.append(p) # files - else: - raise FileNotFoundError(f'{p} does not exist') - - images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] - videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] - ni, nv = len(images), len(videos) - - self.img_size = img_size - self.stride = stride - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' - self.auto = auto - self.transforms = transforms # optional - self.vid_stride = vid_stride # video frame-rate stride - if any(videos): - self._new_video(videos[0]) # new video - else: - self.cap = None - assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' - - def __iter__(self): - self.count = 0 - return self - - def __next__(self): - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - for _ in range(self.vid_stride): - self.cap.grab() - ret_val, im0 = self.cap.retrieve() - while not ret_val: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - path = self.files[self.count] - self._new_video(path) - ret_val, im0 = self.cap.read() - - self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' - - else: - # Read image - self.count += 1 - im0 = cv2.imread(path) # BGR - assert im0 is not None, f'Image Not Found {path}' - s = f'image {self.count}/{self.nf} {path}: ' - - if self.transforms: - im = self.transforms(im0) # transforms - else: - im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - - return path, im, im0, self.cap, s - - def _new_video(self, path): - # Create a new video capture object - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) - self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees - # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 - - def _cv2_rotate(self, im): - # Rotate a cv2 video manually - if self.orientation == 0: - return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) - elif self.orientation == 180: - return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) - elif self.orientation == 90: - return cv2.rotate(im, cv2.ROTATE_180) - return im - - def __len__(self): - return self.nf # number of files - - -class LoadStreams: - # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - torch.backends.cudnn.benchmark = True # faster for fixed-size inference - self.mode = 'stream' - self.img_size = img_size - self.stride = stride - self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] - n = len(sources) - self.sources = [clean_str(x) for x in sources] # clean source names for later - self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n - for i, s in enumerate(sources): # index, source - # Start thread to read frames from video stream - st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video - # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' - check_requirements(('pafy', 'youtube_dl==2020.12.2')) - import pafy - s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL - s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam - if s == 0: - assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' - assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' - cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'{st}Failed to open {s}' - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan - self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback - self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback - - _, self.imgs[i] = cap.read() # guarantee first frame - self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') - self.threads[i].start() - LOGGER.info('') # newline - - # check for common shapes - s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) - self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal - self.auto = auto and self.rect - self.transforms = transforms # optional - if not self.rect: - LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') - - def update(self, i, cap, stream): - # Read stream `i` frames in daemon thread - n, f = 0, self.frames[i] # frame number, frame array - while cap.isOpened() and n < f: - n += 1 - cap.grab() # .read() = .grab() followed by .retrieve() - if n % self.vid_stride == 0: - success, im = cap.retrieve() - if success: - self.imgs[i] = im - else: - LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') - self.imgs[i] = np.zeros_like(self.imgs[i]) - cap.open(stream) # re-open stream if signal was lost - time.sleep(0.0) # wait time - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - - im0 = self.imgs.copy() - if self.transforms: - im = np.stack([self.transforms(x) for x in im0]) # transforms - else: - im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize - im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW - im = np.ascontiguousarray(im) # contiguous - - return self.sources, im, im0, None, '' - - def __len__(self): - return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years - - -def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings - return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] - - -class LoadImagesAndLabels(Dataset): - # YOLOv5 train_loader/val_loader, loads images and labels for training and validation - cache_version = 0.6 # dataset labels *.cache version - rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] - - def __init__(self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0.0, - min_items=0, - prefix=''): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - self.path = path - self.albumentations = Albumentations(size=img_size) if augment else None - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('*.*')) # pathlib - elif p.is_file(): # file - with open(p) as t: - t = t.read().strip().splitlines() - parent = str(p.parent) + os.sep - f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) - else: - raise FileNotFoundError(f'{prefix}{p} does not exist') - self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) - # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert self.im_files, f'{prefix}No images found' - except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e - - # Check cache - self.label_files = img2label_paths(self.im_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') - try: - cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == self.cache_version # matches current version - assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash - except Exception: - cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops - - # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total - if exists and LOCAL_RANK in {-1, 0}: - d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' - tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results - if cache['msgs']: - LOGGER.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' - - # Read cache - [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items - labels, shapes, self.segments = zip(*cache.values()) - nl = len(np.concatenate(labels, 0)) # number of labels - assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' - self.labels = list(labels) - self.shapes = np.array(shapes) - self.im_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - - # Filter images - if min_items: - include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) - LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') - self.im_files = [self.im_files[i] for i in include] - self.label_files = [self.label_files[i] for i in include] - self.labels = [self.labels[i] for i in include] - self.segments = [self.segments[i] for i in include] - self.shapes = self.shapes[include] # wh - - # Create indices - n = len(self.shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - self.indices = range(n) - - # Update labels - include_class = [] # filter labels to include only these classes (optional) - self.segments = list(self.segments) - include_class_array = np.array(include_class).reshape(1, -1) - for i, (label, segment) in enumerate(zip(self.labels, self.segments)): - if include_class: - j = (label[:, 0:1] == include_class_array).any(1) - self.labels[i] = label[j] - if segment: - self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] - if single_cls: # single-class training, merge all classes into 0 - self.labels[i][:, 0] = 0 - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.im_files = [self.im_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.segments = [self.segments[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - - # Cache images into RAM/disk for faster training - if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): - cache_images = False - self.ims = [None] * n - self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] - if cache_images: - b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes - self.im_hw0, self.im_hw = [None] * n, [None] * n - fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image - results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) - for i, x in pbar: - if cache_images == 'disk': - b += self.npy_files[i].stat().st_size - else: # 'ram' - self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - b += self.ims[i].nbytes - pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' - pbar.close() - - def check_cache_ram(self, safety_margin=0.1, prefix=''): - # Check image caching requirements vs available memory - b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes - n = min(self.n, 30) # extrapolate from 30 random images - for _ in range(n): - im = cv2.imread(random.choice(self.im_files)) # sample image - ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio - b += im.nbytes * ratio ** 2 - mem_required = b * self.n / n # GB required to cache dataset into RAM - mem = psutil.virtual_memory() - cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question - if not cache: - LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' - f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' - f"{'caching images ✅' if cache else 'not caching images ⚠️'}") - return cache - - def cache_labels(self, path=Path('./labels.cache'), prefix=''): - # Cache dataset labels, check images and read shapes - x = {} # dict - nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f'{prefix}Scanning {path.parent / path.stem}...' - with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, - total=len(self.im_files), - bar_format=TQDM_BAR_FORMAT) - for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: - nm += nm_f - nf += nf_f - ne += ne_f - nc += nc_f - if im_file: - x[im_file] = [lb, shape, segments] - if msg: - msgs.append(msg) - pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' - - pbar.close() - if msgs: - LOGGER.info('\n'.join(msgs)) - if nf == 0: - LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.im_files) - x['results'] = nf, nm, ne, nc, len(self.im_files) - x['msgs'] = msgs # warnings - x['version'] = self.cache_version # cache version - try: - np.save(path, x) # save cache for next time - path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - LOGGER.info(f'{prefix}New cache created: {path}') - except Exception as e: - LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable - return x - - def __len__(self): - return len(self.im_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - img, labels = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp['mixup']: - img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels = random_perspective(img, - labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) - - if self.augment: - # Albumentations - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - - # Flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - - # Cutouts - # labels = cutout(img, labels, p=0.5) - # nl = len(labels) # update after cutout - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.im_files[index], shapes - - def load_image(self, i): - # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) - im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], - if im is None: # not cached in RAM - if fn.exists(): # load npy - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR - assert im is not None, f'Image Not Found {f}' - h0, w0 = im.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # ratio - if r != 1: # if sizes are not equal - interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) - return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized - return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized - - def cache_images_to_disk(self, i): - # Saves an image as an *.npy file for faster loading - f = self.npy_files[i] - if not f.exists(): - np.save(f.as_posix(), cv2.imread(self.im_files[i])) - - def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic - labels4, segments4 = [], [] - s = self.img_size - yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - random.shuffle(indices) - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - def load_mosaic9(self, index): - # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic - labels9, segments9 = [], [] - s = self.img_size - indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices - random.shuffle(indices) - hp, wp = -1, -1 # height, width previous - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img9 - if i == 0: # center - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padx, pady) for x in segments] - labels9.append(labels) - segments9.extend(segments) - - # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - c = np.array([xc, yc]) # centers - segments9 = [x - c for x in segments9] - - for x in (labels9[:, 1:], *segments9): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) - img9, labels9 = random_perspective(img9, - labels9, - segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - @staticmethod - def collate_fn(batch): - im, label, path, shapes = zip(*batch) # transposed - for i, lb in enumerate(label): - lb[:, 0] = i # add target image index for build_targets() - return torch.stack(im, 0), torch.cat(label, 0), path, shapes - - @staticmethod - def collate_fn4(batch): - im, label, path, shapes = zip(*batch) # transposed - n = len(shapes) // 4 - im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - - ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale - for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW - i *= 4 - if random.random() < 0.5: - im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', - align_corners=False)[0].type(im[i].type()) - lb = label[i] - else: - im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) - lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - im4.append(im1) - label4.append(lb) - - for i, lb in enumerate(label4): - lb[:, 0] = i # add target image index for build_targets() - - return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 - - -# Ancillary functions -------------------------------------------------------------------------------------------------- -def flatten_recursive(path=DATASETS_DIR / 'coco128'): - # Flatten a recursive directory by bringing all files to top level - new_path = Path(f'{str(path)}_flat') - if os.path.exists(new_path): - shutil.rmtree(new_path) # delete output folder - os.makedirs(new_path) # make new output folder - for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): - shutil.copyfile(file, new_path / Path(file).name) - - -def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() - # Convert detection dataset into classification dataset, with one directory per class - path = Path(path) # images dir - shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing - files = list(path.rglob('*.*')) - n = len(files) # number of files - for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in IMG_FORMATS: - # image - im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB - h, w = im.shape[:2] - - # labels - lb_file = Path(img2label_paths([str(im_file)])[0]) - if Path(lb_file).exists(): - with open(lb_file) as f: - lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels - - for j, x in enumerate(lb): - c = int(x[0]) # class - f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename - if not f.parent.is_dir(): - f.parent.mkdir(parents=True) - - b = x[1:] * [w, h, w, h] # box - # b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) - - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - - -def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): - """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.dataloaders import *; autosplit() - Arguments - path: Path to images directory - weights: Train, val, test weights (list, tuple) - annotated_only: Only use images with an annotated txt file - """ - path = Path(path) # images dir - files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only - n = len(files) # number of files - random.seed(0) # for reproducibility - indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - for x in txt: - if (path.parent / x).exists(): - (path.parent / x).unlink() # remove existing - - print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) - for i, img in tqdm(zip(indices, files), total=n): - if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path.parent / txt[i], 'a') as f: - f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file - - -def verify_image_label(args): - # Verify one image-label pair - im_file, lb_file, prefix = args - nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' - if im.format.lower() in ('jpg', 'jpeg'): - with open(im_file, 'rb') as f: - f.seek(-2, 2) - if f.read() != b'\xff\xd9': # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' - - # verify labels - if os.path.isfile(lb_file): - nf = 1 # label found - with open(lb_file) as f: - lb = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any(len(x) > 6 for x in lb): # is segment - classes = np.array([x[0] for x in lb], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) - lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - lb = np.array(lb, dtype=np.float32) - nl = len(lb) - if nl: - assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' - assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' - assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' - _, i = np.unique(lb, axis=0, return_index=True) - if len(i) < nl: # duplicate row check - lb = lb[i] # remove duplicates - if segments: - segments = [segments[x] for x in i] - msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' - else: - ne = 1 # label empty - lb = np.zeros((0, 5), dtype=np.float32) - else: - nm = 1 # label missing - lb = np.zeros((0, 5), dtype=np.float32) - return im_file, lb, shape, segments, nm, nf, ne, nc, msg - except Exception as e: - nc = 1 - msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' - return [None, None, None, None, nm, nf, ne, nc, msg] - - -class HUBDatasetStats(): - """ Class for generating HUB dataset JSON and `-hub` dataset directory - - Arguments - path: Path to data.yaml or data.zip (with data.yaml inside data.zip) - autodownload: Attempt to download dataset if not found locally - - Usage - from utils.dataloaders import HUBDatasetStats - stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 - stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 - stats.get_json(save=False) - stats.process_images() - """ - - def __init__(self, path='coco128.yaml', autodownload=False): - # Initialize class - zipped, data_dir, yaml_path = self._unzip(Path(path)) - try: - with open(check_yaml(yaml_path), errors='ignore') as f: - data = yaml.safe_load(f) # data dict - if zipped: - data['path'] = data_dir - except Exception as e: - raise Exception('error/HUB/dataset_stats/yaml_load') from e - - check_dataset(data, autodownload) # download dataset if missing - self.hub_dir = Path(data['path'] + '-hub') - self.im_dir = self.hub_dir / 'images' - self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary - self.data = data - - @staticmethod - def _find_yaml(dir): - # Return data.yaml file - files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive - assert files, f'No *.yaml file found in {dir}' - if len(files) > 1: - files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name - assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' - assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' - return files[0] - - def _unzip(self, path): - # Unzip data.zip - if not str(path).endswith('.zip'): # path is data.yaml - return False, None, path - assert Path(path).is_file(), f'Error unzipping {path}, file not found' - unzip_file(path, path=path.parent) - dir = path.with_suffix('') # dataset directory == zip name - assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' - return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path - - def _hub_ops(self, f, max_dim=1920): - # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing - f_new = self.im_dir / Path(f).name # dataset-hub image filename - try: # use PIL - im = Image.open(f) - r = max_dim / max(im.height, im.width) # ratio - if r < 1.0: # image too large - im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, 'JPEG', quality=50, optimize=True) # save - except Exception as e: # use OpenCV - LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') - im = cv2.imread(f) - im_height, im_width = im.shape[:2] - r = max_dim / max(im_height, im_width) # ratio - if r < 1.0: # image too large - im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) - cv2.imwrite(str(f_new), im) - - def get_json(self, save=False, verbose=False): - # Return dataset JSON for Ultralytics HUB - def _round(labels): - # Update labels to integer class and 6 decimal place floats - return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] - - for split in 'train', 'val', 'test': - if self.data.get(split) is None: - self.stats[split] = None # i.e. no test set - continue - dataset = LoadImagesAndLabels(self.data[split]) # load dataset - x = np.array([ - np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) - for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) - self.stats[split] = { - 'instance_stats': { - 'total': int(x.sum()), - 'per_class': x.sum(0).tolist()}, - 'image_stats': { - 'total': dataset.n, - 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{ - str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} - - # Save, print and return - if save: - stats_path = self.hub_dir / 'stats.json' - print(f'Saving {stats_path.resolve()}...') - with open(stats_path, 'w') as f: - json.dump(self.stats, f) # save stats.json - if verbose: - print(json.dumps(self.stats, indent=2, sort_keys=False)) - return self.stats - - def process_images(self): - # Compress images for Ultralytics HUB - for split in 'train', 'val', 'test': - if self.data.get(split) is None: - continue - dataset = LoadImagesAndLabels(self.data[split]) # load dataset - desc = f'{split} images' - for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): - pass - print(f'Done. All images saved to {self.im_dir}') - return self.im_dir - - -# Classification dataloaders ------------------------------------------------------------------------------------------- -class ClassificationDataset(torchvision.datasets.ImageFolder): - """ - YOLOv5 Classification Dataset. - Arguments - root: Dataset path - transform: torchvision transforms, used by default - album_transform: Albumentations transforms, used if installed - """ - - def __init__(self, root, augment, imgsz, cache=False): - super().__init__(root=root) - self.torch_transforms = classify_transforms(imgsz) - self.album_transforms = classify_albumentations(augment, imgsz) if augment else None - self.cache_ram = cache is True or cache == 'ram' - self.cache_disk = cache == 'disk' - self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im - - def __getitem__(self, i): - f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image - if self.cache_ram and im is None: - im = self.samples[i][3] = cv2.imread(f) - elif self.cache_disk: - if not fn.exists(): # load npy - np.save(fn.as_posix(), cv2.imread(f)) - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR - if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] - else: - sample = self.torch_transforms(im) - return sample, j - - -def create_classification_dataloader(path, - imgsz=224, - batch_size=16, - augment=True, - cache=False, - rank=-1, - workers=8, - shuffle=True): - # Returns Dataloader object to be used with YOLOv5 Classifier - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) - return InfiniteDataLoader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - worker_init_fn=seed_worker, - generator=generator) # or DataLoader(persistent_workers=True) \ No newline at end of file diff --git a/spaces/pixiou/bingo/src/pages/api/blob.ts b/spaces/pixiou/bingo/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py deleted file mode 100644 index 5e3e198233698f2b007489dd299cecb87d971067..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -All of the Enums that are used throughout the chardet package. - -:author: Dan Blanchard (dan.blanchard@gmail.com) -""" - -from enum import Enum, Flag - - -class InputState: - """ - This enum represents the different states a universal detector can be in. - """ - - PURE_ASCII = 0 - ESC_ASCII = 1 - HIGH_BYTE = 2 - - -class LanguageFilter(Flag): - """ - This enum represents the different language filters we can apply to a - ``UniversalDetector``. - """ - - NONE = 0x00 - CHINESE_SIMPLIFIED = 0x01 - CHINESE_TRADITIONAL = 0x02 - JAPANESE = 0x04 - KOREAN = 0x08 - NON_CJK = 0x10 - ALL = 0x1F - CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL - CJK = CHINESE | JAPANESE | KOREAN - - -class ProbingState(Enum): - """ - This enum represents the different states a prober can be in. - """ - - DETECTING = 0 - FOUND_IT = 1 - NOT_ME = 2 - - -class MachineState: - """ - This enum represents the different states a state machine can be in. - """ - - START = 0 - ERROR = 1 - ITS_ME = 2 - - -class SequenceLikelihood: - """ - This enum represents the likelihood of a character following the previous one. - """ - - NEGATIVE = 0 - UNLIKELY = 1 - LIKELY = 2 - POSITIVE = 3 - - @classmethod - def get_num_categories(cls) -> int: - """:returns: The number of likelihood categories in the enum.""" - return 4 - - -class CharacterCategory: - """ - This enum represents the different categories language models for - ``SingleByteCharsetProber`` put characters into. - - Anything less than CONTROL is considered a letter. - """ - - UNDEFINED = 255 - LINE_BREAK = 254 - SYMBOL = 253 - DIGIT = 252 - CONTROL = 251 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py deleted file mode 100644 index 666307e8fe0608c69f2b6578a49794e1e20a139a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py +++ /dev/null @@ -1,95 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# Proofpoint, Inc. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import Optional, Union - -from .chardistribution import CharDistributionAnalysis -from .charsetprober import CharSetProber -from .codingstatemachine import CodingStateMachine -from .enums import LanguageFilter, MachineState, ProbingState - - -class MultiByteCharSetProber(CharSetProber): - """ - MultiByteCharSetProber - """ - - def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: - super().__init__(lang_filter=lang_filter) - self.distribution_analyzer: Optional[CharDistributionAnalysis] = None - self.coding_sm: Optional[CodingStateMachine] = None - self._last_char = bytearray(b"\0\0") - - def reset(self) -> None: - super().reset() - if self.coding_sm: - self.coding_sm.reset() - if self.distribution_analyzer: - self.distribution_analyzer.reset() - self._last_char = bytearray(b"\0\0") - - def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: - assert self.coding_sm is not None - assert self.distribution_analyzer is not None - - for i, byte in enumerate(byte_str): - coding_state = self.coding_sm.next_state(byte) - if coding_state == MachineState.ERROR: - self.logger.debug( - "%s %s prober hit error at byte %s", - self.charset_name, - self.language, - i, - ) - self._state = ProbingState.NOT_ME - break - if coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - if coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if self.distribution_analyzer.got_enough_data() and ( - self.get_confidence() > self.SHORTCUT_THRESHOLD - ): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self) -> float: - assert self.distribution_analyzer is not None - return self.distribution_analyzer.get_confidence() diff --git a/spaces/prerna9811/Chord/portaudio/src/common/pa_allocation.h b/spaces/prerna9811/Chord/portaudio/src/common/pa_allocation.h deleted file mode 100644 index 5c3cf5309cd9f367d948a4003e8f111a7f304063..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/common/pa_allocation.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef PA_ALLOCATION_H -#define PA_ALLOCATION_H -/* - * $Id$ - * Portable Audio I/O Library allocation context header - * memory allocation context for tracking allocation groups - * - * Based on the Open Source API proposed by Ross Bencina - * Copyright (c) 1999-2008 Ross Bencina, Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - @ingroup common_src - - @brief Allocation Group prototypes. An Allocation Group makes it easy to - allocate multiple blocks of memory and free them all at once. - - An allocation group is useful for keeping track of multiple blocks - of memory which are allocated at the same time (such as during initialization) - and need to be deallocated at the same time. The allocation group maintains - a list of allocated blocks, and can free all allocations at once. This - can be useful for cleaning up after a partially initialized object fails. - - The allocation group implementation is built on top of the lower - level allocation functions defined in pa_util.h -*/ - - -#ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ - - -typedef struct -{ - long linkCount; - struct PaUtilAllocationGroupLink *linkBlocks; - struct PaUtilAllocationGroupLink *spareLinks; - struct PaUtilAllocationGroupLink *allocations; -}PaUtilAllocationGroup; - - - -/** Create an allocation group. -*/ -PaUtilAllocationGroup* PaUtil_CreateAllocationGroup( void ); - -/** Destroy an allocation group, but not the memory allocated through the group. -*/ -void PaUtil_DestroyAllocationGroup( PaUtilAllocationGroup* group ); - -/** Allocate a block of memory though an allocation group. -*/ -void* PaUtil_GroupAllocateMemory( PaUtilAllocationGroup* group, long size ); - -/** Free a block of memory that was previously allocated though an allocation - group. Calling this function is a relatively time consuming operation. - Under normal circumstances clients should call PaUtil_FreeAllAllocations to - free all allocated blocks simultaneously. - @see PaUtil_FreeAllAllocations -*/ -void PaUtil_GroupFreeMemory( PaUtilAllocationGroup* group, void *buffer ); - -/** Free all blocks of memory which have been allocated through the allocation - group. This function doesn't destroy the group itself. -*/ -void PaUtil_FreeAllAllocations( PaUtilAllocationGroup* group ); - - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* PA_ALLOCATION_H */ diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_table.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_table.py deleted file mode 100644 index 328fd020c1ee7255f7c6fa8c827042605513575f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_table.py +++ /dev/null @@ -1,231 +0,0 @@ -import numpy as np -import pytest - -import matplotlib.pyplot as plt -import matplotlib as mpl -from matplotlib.path import Path -from matplotlib.table import CustomCell, Table -from matplotlib.testing.decorators import image_comparison, check_figures_equal -from matplotlib.transforms import Bbox - - -def test_non_square(): - # Check that creating a non-square table works - cellcolors = ['b', 'r'] - plt.table(cellColours=cellcolors) - - -@image_comparison(['table_zorder.png'], remove_text=True) -def test_zorder(): - data = [[66386, 174296], - [58230, 381139]] - - colLabels = ('Freeze', 'Wind') - rowLabels = ['%d year' % x for x in (100, 50)] - - cellText = [] - yoff = np.zeros(len(colLabels)) - for row in reversed(data): - yoff += row - cellText.append(['%1.1f' % (x/1000.0) for x in yoff]) - - t = np.linspace(0, 2*np.pi, 100) - plt.plot(t, np.cos(t), lw=4, zorder=2) - - plt.table(cellText=cellText, - rowLabels=rowLabels, - colLabels=colLabels, - loc='center', - zorder=-2, - ) - - plt.table(cellText=cellText, - rowLabels=rowLabels, - colLabels=colLabels, - loc='upper center', - zorder=4, - ) - plt.yticks([]) - - -@image_comparison(['table_labels.png']) -def test_label_colours(): - dim = 3 - - c = np.linspace(0, 1, dim) - colours = plt.cm.RdYlGn(c) - cellText = [['1'] * dim] * dim - - fig = plt.figure() - - ax1 = fig.add_subplot(4, 1, 1) - ax1.axis('off') - ax1.table(cellText=cellText, - rowColours=colours, - loc='best') - - ax2 = fig.add_subplot(4, 1, 2) - ax2.axis('off') - ax2.table(cellText=cellText, - rowColours=colours, - rowLabels=['Header'] * dim, - loc='best') - - ax3 = fig.add_subplot(4, 1, 3) - ax3.axis('off') - ax3.table(cellText=cellText, - colColours=colours, - loc='best') - - ax4 = fig.add_subplot(4, 1, 4) - ax4.axis('off') - ax4.table(cellText=cellText, - colColours=colours, - colLabels=['Header'] * dim, - loc='best') - - -@image_comparison(['table_cell_manipulation.png'], remove_text=True) -def test_diff_cell_table(): - cells = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L') - cellText = [['1'] * len(cells)] * 2 - colWidths = [0.1] * len(cells) - - _, axs = plt.subplots(nrows=len(cells), figsize=(4, len(cells)+1)) - for ax, cell in zip(axs, cells): - ax.table( - colWidths=colWidths, - cellText=cellText, - loc='center', - edges=cell, - ) - ax.axis('off') - plt.tight_layout() - - -def test_customcell(): - types = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L') - codes = ( - (Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.MOVETO), - (Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO), - (Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO), - (Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY), - (Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO), - (Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO), - (Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO, Path.MOVETO), - (Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO), - ) - - for t, c in zip(types, codes): - cell = CustomCell((0, 0), visible_edges=t, width=1, height=1) - code = tuple(s for _, s in cell.get_path().iter_segments()) - assert c == code - - -@image_comparison(['table_auto_column.png']) -def test_auto_column(): - fig = plt.figure() - - # iterable list input - ax1 = fig.add_subplot(4, 1, 1) - ax1.axis('off') - tb1 = ax1.table( - cellText=[['Fit Text', 2], - ['very long long text, Longer text than default', 1]], - rowLabels=["A", "B"], - colLabels=["Col1", "Col2"], - loc="center") - tb1.auto_set_font_size(False) - tb1.set_fontsize(12) - tb1.auto_set_column_width([-1, 0, 1]) - - # iterable tuple input - ax2 = fig.add_subplot(4, 1, 2) - ax2.axis('off') - tb2 = ax2.table( - cellText=[['Fit Text', 2], - ['very long long text, Longer text than default', 1]], - rowLabels=["A", "B"], - colLabels=["Col1", "Col2"], - loc="center") - tb2.auto_set_font_size(False) - tb2.set_fontsize(12) - tb2.auto_set_column_width((-1, 0, 1)) - - # 3 single inputs - ax3 = fig.add_subplot(4, 1, 3) - ax3.axis('off') - tb3 = ax3.table( - cellText=[['Fit Text', 2], - ['very long long text, Longer text than default', 1]], - rowLabels=["A", "B"], - colLabels=["Col1", "Col2"], - loc="center") - tb3.auto_set_font_size(False) - tb3.set_fontsize(12) - tb3.auto_set_column_width(-1) - tb3.auto_set_column_width(0) - tb3.auto_set_column_width(1) - - # 4 non integer iterable input - ax4 = fig.add_subplot(4, 1, 4) - ax4.axis('off') - tb4 = ax4.table( - cellText=[['Fit Text', 2], - ['very long long text, Longer text than default', 1]], - rowLabels=["A", "B"], - colLabels=["Col1", "Col2"], - loc="center") - tb4.auto_set_font_size(False) - tb4.set_fontsize(12) - with pytest.warns(mpl.MatplotlibDeprecationWarning, - match="'col' must be an int or sequence of ints"): - tb4.auto_set_column_width("-101") # type: ignore [arg-type] - with pytest.warns(mpl.MatplotlibDeprecationWarning, - match="'col' must be an int or sequence of ints"): - tb4.auto_set_column_width(["-101"]) # type: ignore [list-item] - - -def test_table_cells(): - fig, ax = plt.subplots() - table = Table(ax) - - cell = table.add_cell(1, 2, 1, 1) - assert isinstance(cell, CustomCell) - assert cell is table[1, 2] - - cell2 = CustomCell((0, 0), 1, 2, visible_edges=None) - table[2, 1] = cell2 - assert table[2, 1] is cell2 - - # make sure getitem support has not broken - # properties and setp - table.properties() - plt.setp(table) - - -@check_figures_equal(extensions=["png"]) -def test_table_bbox(fig_test, fig_ref): - data = [[2, 3], - [4, 5]] - - col_labels = ('Foo', 'Bar') - row_labels = ('Ada', 'Bob') - - cell_text = [[f"{x}" for x in row] for row in data] - - ax_list = fig_test.subplots() - ax_list.table(cellText=cell_text, - rowLabels=row_labels, - colLabels=col_labels, - loc='center', - bbox=[0.1, 0.2, 0.8, 0.6] - ) - - ax_bbox = fig_ref.subplots() - ax_bbox.table(cellText=cell_text, - rowLabels=row_labels, - colLabels=col_labels, - loc='center', - bbox=Bbox.from_extents(0.1, 0.2, 0.9, 0.8) - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py deleted file mode 100644 index 32b6db5c59359da5977cde2dd8d8c2ed85c29a47..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,838 +0,0 @@ -#!/usr/bin/env python3 -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson <pearu@ioc.ee> -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from . import __version__ -f2py_version = __version__.version - -import copy -import re -import os -from .crackfortran import markoutercomma -from . import cb_rules -from ._isocbind import iso_c_binding_map - -# The environment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - -__all__ = [ - 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', - 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', - 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' -] - - -depargs = [] -lcb_map = {} -lcb2_map = {} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map = {'double': 'float', - 'float': 'float', # forced casting - 'long_double': 'float', # forced casting - 'char': 'int', # forced casting - 'signed_char': 'int', # forced casting - 'unsigned_char': 'int', # forced casting - 'short': 'int', # forced casting - 'unsigned_short': 'int', # forced casting - 'int': 'int', # forced casting - 'long': 'int', - 'long_long': 'long', - 'unsigned': 'int', # forced casting - 'complex_float': 'complex', # forced casting - 'complex_double': 'complex', - 'complex_long_double': 'complex', # forced casting - 'string': 'string', - 'character': 'bytes', - } - -c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_LONGDOUBLE', - 'char': 'NPY_BYTE', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'unsigned_long': 'NPY_ULONG', - 'long_long': 'NPY_LONGLONG', - 'unsigned_long_long': 'NPY_ULONGLONG', - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', - 'string': 'NPY_STRING', - 'character': 'NPY_STRING'} - -c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'g', - 'char': 'b', - 'unsigned_char': 'B', - 'signed_char': 'b', - 'short': 'h', - 'unsigned_short': 'H', - 'int': 'i', - 'unsigned': 'I', - 'long': 'l', - 'unsigned_long': 'L', - 'long_long': 'q', - 'unsigned_long_long': 'Q', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'G', - 'string': 'S', - 'character': 'c'} - -# https://docs.python.org/3/c-api/arg.html#building-values -c2buildvalue_map = {'double': 'd', - 'float': 'f', - 'char': 'b', - 'signed_char': 'b', - 'short': 'h', - 'int': 'i', - 'long': 'l', - 'long_long': 'L', - 'complex_float': 'N', - 'complex_double': 'N', - 'complex_long_double': 'N', - 'string': 'y', - 'character': 'c'} - -f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', - '12': 'long_double', '16': 'long_double'}, - 'integer': {'': 'int', '1': 'signed_char', '2': 'short', - '4': 'int', '8': 'long_long', - '-1': 'unsigned_char', '-2': 'unsigned_short', - '-4': 'unsigned', '-8': 'unsigned_long_long'}, - 'complex': {'': 'complex_float', '8': 'complex_float', - '16': 'complex_double', '24': 'complex_long_double', - '32': 'complex_long_double'}, - 'complexkind': {'': 'complex_float', '4': 'complex_float', - '8': 'complex_double', '12': 'complex_long_double', - '16': 'complex_long_double'}, - 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', - '8': 'long_long'}, - 'double complex': {'': 'complex_double'}, - 'double precision': {'': 'double'}, - 'byte': {'': 'char'}, - } - -f2cmap_all = deep_merge(f2cmap_all, iso_c_binding_map) -f2cmap_default = copy.deepcopy(f2cmap_all) - -f2cmap_mapped = [] - -def load_f2cmap_file(f2cmap_file): - global f2cmap_all - - f2cmap_all = copy.deepcopy(f2cmap_default) - - if f2cmap_file is None: - # Default value - f2cmap_file = '.f2py_f2cmap' - if not os.path.isfile(f2cmap_file): - return - - # User defined additions to f2cmap_all. - # f2cmap_file must contain a dictionary of dictionaries, only. For - # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. This feature is useful for F90/95 users if - # they use PARAMETERS in type specifications. - try: - outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) - with open(f2cmap_file) as f: - d = eval(f.read().lower(), {}, {}) - for k, d1 in d.items(): - for k1 in d1.keys(): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in d.keys(): - if k not in f2cmap_all: - f2cmap_all[k] = {} - for k1 in d[k].keys(): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess( - "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % - (k, k1, d[k][k1])) - f2cmap_mapped.append(d[k][k1]) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( - k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) - outmess('Successfully applied user defined f2cmap changes\n') - except Exception as msg: - errmess( - 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) - -cformat_map = {'double': '%g', - 'float': '%g', - 'long_double': '%Lg', - 'char': '%d', - 'signed_char': '%d', - 'unsigned_char': '%hhu', - 'short': '%hd', - 'unsigned_short': '%hu', - 'int': '%d', - 'unsigned': '%u', - 'long': '%ld', - 'unsigned_long': '%lu', - 'long_long': '%ld', - 'complex_float': '(%g,%g)', - 'complex_double': '(%g,%g)', - 'complex_long_double': '(%Lg,%Lg)', - 'string': '\\"%s\\"', - 'character': "'%c'", - } - -# Auxiliary functions - - -def getctype(var): - """ - Determines C type - """ - ctype = 'void' - if isfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n' % a) - elif issubroutine(var): - return ctype - elif ischaracter_or_characterarray(var): - return 'character' - elif isstring_or_stringarray(var): - return 'string' - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap = f2cmap_all[typespec] - ctype = f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype = f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n' % - (var['typespec'], '*', var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec + 'kind' in f2cmap_all: - f2cmap = f2cmap_all[typespec + 'kind'] - try: - ctype = f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap = f2cmap_all[typespec] - try: - ctype = f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n' - % (typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n' % var) - return ctype - - -def f2cexpr(expr): - """Rewrite Fortran expression as f2py supported C expression. - - Due to the lack of a proper expression parser in f2py, this - function uses a heuristic approach that assumes that Fortran - arithmetic expressions are valid C arithmetic expressions when - mapping Fortran function calls to the corresponding C function/CPP - macros calls. - - """ - # TODO: support Fortran `len` function with optional kind parameter - expr = re.sub(r'\blen\b', 'f2py_slen', expr) - return expr - - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n' % a) - if not isstring(var): - errmess( - 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) - len = '1' - if 'charselector' in var: - a = var['charselector'] - if '*' in a: - len = a['*'] - elif 'len' in a: - len = f2cexpr(a['len']) - if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( - repr(var))) - len = '-1' - return len - - -def getarrdims(a, var, verbose=0): - ret = {} - if isstring(var) and not isarray(var): - ret['size'] = getstrlength(var) - ret['rank'] = '0' - ret['dims'] = '' - elif isscalar(var): - ret['size'] = '1' - ret['rank'] = '0' - ret['dims'] = '' - elif isarray(var): - dim = copy.copy(var['dimension']) - ret['size'] = '*'.join(dim) - try: - ret['size'] = repr(eval(ret['size'])) - except Exception: - pass - ret['dims'] = ','.join(dim) - ret['rank'] = repr(len(dim)) - ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] - for i in range(len(dim)): # solve dim for dependencies - v = [] - if dim[i] in depargs: - v = [dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*' % va, dim[i]): - v.append(va) - for va in v: - if depargs.index(va) > depargs.index(a): - dim[i] = '*' - break - ret['setdims'], i = '', -1 - for d in dim: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['setdims'], i, d) - if ret['setdims']: - ret['setdims'] = ret['setdims'][:-1] - ret['cbsetdims'], i = '', -1 - for d in var['dimension']: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' - % (d)) - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, 0) - elif verbose: - errmess( - 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) - if ret['cbsetdims']: - ret['cbsetdims'] = ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - - -def getpydocsign(a, var): - global lcb_map - if isfunction(var): - if 'result' in var: - af = var['result'] - else: - af = var['name'] - if af in var['vars']: - return getpydocsign(af, var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n' % af) - return '', '' - sig, sigout = a, a - opt = '' - if isintent_in(var): - opt = 'input' - elif isintent_inout(var): - opt = 'in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - init = '' - ctype = getctype(var) - - if hasinitvalue(var): - init, showinit = getinit(a, var) - init = ', optional\\n Default: %s' % showinit - if isscalar(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) - else: - sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) - sigout = '%s : %s' % (out_a, c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( - a, opt, getstrlength(var), init) - else: - sig = '%s : %s string(len=%s)%s' % ( - a, opt, getstrlength(var), init) - sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, - c2pycode_map[ - ctype], - ','.join(dim), init) - if a == out_a: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ - % (a, rank, c2pycode_map[ctype], ','.join(dim)) - else: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) - elif isexternal(var): - ua = '' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua = lcb2_map[lcb_map[a]]['argname'] - if not ua == a: - ua = ' => %s' % ua - else: - ua = '' - sig = '%s : call-back function%s' % (a, ua) - sigout = sig - else: - errmess( - 'getpydocsign: Could not resolve docsignature for "%s".\n' % a) - return sig, sigout - - -def getarrdocsign(a, var): - ctype = getctype(var) - if isstring(var) and (not isarray(var)): - sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, - getstrlength(var)) - elif isscalar(var): - sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, - c2pycode_map[ - ctype], - ','.join(dim)) - return sig - - -def getinit(a, var): - if isstring(var): - init, showinit = '""', "''" - else: - init, showinit = '', '' - if hasinitvalue(var): - init = var['='] - showinit = init - if iscomplex(var) or iscomplexarray(var): - ret = {} - - try: - v = var["="] - if ',' in v: - ret['init.r'], ret['init.i'] = markoutercomma( - v[1:-1]).split('@,@') - else: - v = eval(v, {}, {}) - ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) - except Exception: - raise ValueError( - 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) - if isarray(var): - init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( - ret['init.r'], ret['init.i']) - elif isstring(var): - if not init: - init, showinit = '""', "''" - if init[0] == "'": - init = '"%s"' % (init[1:-1].replace('"', '\\"')) - if init[0] == '"': - showinit = "'%s'" % (init[1:-1]) - return init, showinit - - -def get_elsize(var): - if isstring(var) or isstringarray(var): - elsize = getstrlength(var) - # override with user-specified length when available: - elsize = var['charselector'].get('f2py_len', elsize) - return elsize - if ischaracter(var) or ischaracterarray(var): - return '1' - # for numerical types, PyArray_New* functions ignore specified - # elsize, so we just return 1 and let elsize be determined at - # runtime, see fortranobject.c - return '1' - - -def sign2map(a, var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrformat - - intent - """ - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} - intent_flags = [] - for f, s in isintent_dict.items(): - if f(var): - intent_flags.append('F2PY_%s' % s) - if intent_flags: - # TODO: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): - ret['varrformat'] = 'N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['varrformat'] = 'O' - ret['init'], ret['showinit'] = getinit(a, var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'], ret['init.i'] = markoutercomma( - ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey'] = a - if a in lcb_map: - ret['cbname'] = lcb_map[a] - ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname'] = a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( - a, list(lcb_map.keys()))) - if isstring(var): - ret['length'] = getstrlength(var) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - dim = copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - ret['elsize'] = get_elsize(var) - # Debug info - if debugcapi(var): - il = [isintent_in, 'input', isintent_out, 'output', - isintent_inout, 'inoutput', isrequired, 'required', - isoptional, 'optional', isintent_hide, 'hidden', - iscomplex, 'complex scalar', - l_and(isscalar, l_not(iscomplex)), 'scalar', - isstring, 'string', isarray, 'array', - iscomplexarray, 'complex array', isstringarray, 'string array', - iscomplexfunction, 'complex function', - l_and(isfunction, l_not(iscomplexfunction)), 'function', - isexternal, 'callback', - isintent_callback, 'callback', - isintent_aux, 'auxiliary', - ] - rl = [] - for i in range(0, len(il), 2): - if il[i](var): - rl.append(il[i + 1]) - if isstring(var): - rl.append('slen(%s)=%s' % (a, ret['length'])) - if isarray(var): - ddim = ','.join( - map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) - rl.append('dims(%s)' % ddim) - if isexternal(var): - ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( - a, ret['cbname'], ','.join(rl)) - else: - ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( - ret['ctype'], a, ret['showinit'], ','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isexternal(var): - ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) - if ret['ctype'] in cformat_map: - ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - return ret - - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret = {'name': name, - 'texname': name.replace('_', '\\_'), - 'name_lower': name.lower(), - 'NAME': name.upper(), - 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s' % name), - 'fortranname': fname, - 'FORTRANNAME': fname.upper(), - 'callstatement': getcallstatement(rout) or '', - 'usercode': getusercode(rout) or '', - 'usercode1': getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map = {} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln = un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k] == un[0]: - ln = k - break - lcb_map[ln] = un[1] - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( - ret['name'], repr(rout['externals']))) - ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - ret['ctype'] = getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['rformat'] = 'O' - errmess('routsign2map: no c2buildvalue key for type %s\n' % - (repr(ret['ctype']))) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isstringfunction(rout): - ret['rlength'] = getstrlength(rout['vars'][a]) - if ret['rlength'] == '-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( - repr(rout['name']))) - ret['rlength'] = '10' - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret = {'f90modulename': m['name'], - 'F90MODULENAME': m['name'].upper(), - 'texf90modulename': m['name'].replace('_', '\\_')} - else: - ret = {'modulename': m['name'], - 'MODULENAME': m['name'].upper(), - 'texmodulename': m['name'].replace('_', '\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note'] = m['note'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - if 'coutput' in m: - ret['coutput'] = m['coutput'] - if 'f2py_wrapper_output' in m: - ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] - return ret - - -def cb_sign2map(a, var, index=None): - ret = {'varname': a} - ret['varname_i'] = ret['varname'] - ret['ctype'] = getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - ret['elsize'] = get_elsize(var) - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - return ret - - -def cb_routsign2map(rout, um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), - 'returncptr': ''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname'] = rout['name'] - ret['begintitle'] = gentitle(ret['name']) - ret['endtitle'] = gentitle('end of %s' % ret['name']) - ret['ctype'] = getctype(rout) - ret['rctype'] = 'void' - if ret['ctype'] == 'string': - ret['rctype'] = 'void' - else: - ret['rctype'] = ret['ctype'] - if ret['rctype'] != 'void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength'] = getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if hasnote(rout['vars'][a]): - ret['note'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - if iscomplexfunction(rout): - ret['rctype'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - nofargs = 0 - nofoptargs = 0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var = rout['vars'][a] - if l_or(isintent_in, isintent_inout)(var): - nofargs = nofargs + 1 - if isoptional(var): - nofoptargs = nofoptargs + 1 - ret['maxnofargs'] = repr(nofargs) - ret['nofoptargs'] = repr(nofoptargs) - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def common_sign2map(a, var): # obsolute - ret = {'varname': a, 'ctype': getctype(var)} - if isstringarray(var): - ret['ctype'] = 'char' - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - ret['elsize'] = get_elsize(var) - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - elif isstring(var): - ret['size'] = getstrlength(var) - ret['rank'] = '1' - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - # for strings this returns 0-rank but actually is 1-rank - ret['arrdocstr'] = getarrdocsign(a, var) - return ret diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/datalib/common.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/datalib/common.py deleted file mode 100644 index 96f9908a18ab7b3b3e2064c0dab856090364f266..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/datalib/common.py +++ /dev/null @@ -1,17 +0,0 @@ -INSTRUCTIONS = """ - -OpenAI error: - - missing `{library}` - -This feature requires additional dependencies: - - $ pip install openai[datalib] - -""" - -NUMPY_INSTRUCTIONS = INSTRUCTIONS.format(library="numpy") - - -class MissingDependencyError(Exception): - pass diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/transform/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/transform/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py deleted file mode 100644 index 2b0b3f7cb36d72abedc538eda9e6a85eb45067e2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py +++ /dev/null @@ -1,188 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - Index, - MultiIndex, -) - - -def test_is_monotonic_increasing_lexsorted(lexsorted_two_level_string_multiindex): - # string ordering - mi = lexsorted_two_level_string_multiindex - assert mi.is_monotonic_increasing is False - assert Index(mi.values).is_monotonic_increasing is False - assert mi._is_strictly_monotonic_increasing is False - assert Index(mi.values)._is_strictly_monotonic_increasing is False - - -def test_is_monotonic_increasing(): - i = MultiIndex.from_product([np.arange(10), np.arange(10)], names=["one", "two"]) - assert i.is_monotonic_increasing is True - assert i._is_strictly_monotonic_increasing is True - assert Index(i.values).is_monotonic_increasing is True - assert i._is_strictly_monotonic_increasing is True - - i = MultiIndex.from_product( - [np.arange(10, 0, -1), np.arange(10)], names=["one", "two"] - ) - assert i.is_monotonic_increasing is False - assert i._is_strictly_monotonic_increasing is False - assert Index(i.values).is_monotonic_increasing is False - assert Index(i.values)._is_strictly_monotonic_increasing is False - - i = MultiIndex.from_product( - [np.arange(10), np.arange(10, 0, -1)], names=["one", "two"] - ) - assert i.is_monotonic_increasing is False - assert i._is_strictly_monotonic_increasing is False - assert Index(i.values).is_monotonic_increasing is False - assert Index(i.values)._is_strictly_monotonic_increasing is False - - i = MultiIndex.from_product([[1.0, np.nan, 2.0], ["a", "b", "c"]]) - assert i.is_monotonic_increasing is False - assert i._is_strictly_monotonic_increasing is False - assert Index(i.values).is_monotonic_increasing is False - assert Index(i.values)._is_strictly_monotonic_increasing is False - - i = MultiIndex( - levels=[["bar", "baz", "foo", "qux"], ["mom", "next", "zenith"]], - codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=["first", "second"], - ) - assert i.is_monotonic_increasing is True - assert Index(i.values).is_monotonic_increasing is True - assert i._is_strictly_monotonic_increasing is True - assert Index(i.values)._is_strictly_monotonic_increasing is True - - # mixed levels, hits the TypeError - i = MultiIndex( - levels=[ - [1, 2, 3, 4], - [ - "gb00b03mlx29", - "lu0197800237", - "nl0000289783", - "nl0000289965", - "nl0000301109", - ], - ], - codes=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], - names=["household_id", "asset_id"], - ) - - assert i.is_monotonic_increasing is False - assert i._is_strictly_monotonic_increasing is False - - # empty - i = MultiIndex.from_arrays([[], []]) - assert i.is_monotonic_increasing is True - assert Index(i.values).is_monotonic_increasing is True - assert i._is_strictly_monotonic_increasing is True - assert Index(i.values)._is_strictly_monotonic_increasing is True - - -def test_is_monotonic_decreasing(): - i = MultiIndex.from_product( - [np.arange(9, -1, -1), np.arange(9, -1, -1)], names=["one", "two"] - ) - assert i.is_monotonic_decreasing is True - assert i._is_strictly_monotonic_decreasing is True - assert Index(i.values).is_monotonic_decreasing is True - assert i._is_strictly_monotonic_decreasing is True - - i = MultiIndex.from_product( - [np.arange(10), np.arange(10, 0, -1)], names=["one", "two"] - ) - assert i.is_monotonic_decreasing is False - assert i._is_strictly_monotonic_decreasing is False - assert Index(i.values).is_monotonic_decreasing is False - assert Index(i.values)._is_strictly_monotonic_decreasing is False - - i = MultiIndex.from_product( - [np.arange(10, 0, -1), np.arange(10)], names=["one", "two"] - ) - assert i.is_monotonic_decreasing is False - assert i._is_strictly_monotonic_decreasing is False - assert Index(i.values).is_monotonic_decreasing is False - assert Index(i.values)._is_strictly_monotonic_decreasing is False - - i = MultiIndex.from_product([[2.0, np.nan, 1.0], ["c", "b", "a"]]) - assert i.is_monotonic_decreasing is False - assert i._is_strictly_monotonic_decreasing is False - assert Index(i.values).is_monotonic_decreasing is False - assert Index(i.values)._is_strictly_monotonic_decreasing is False - - # string ordering - i = MultiIndex( - levels=[["qux", "foo", "baz", "bar"], ["three", "two", "one"]], - codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=["first", "second"], - ) - assert i.is_monotonic_decreasing is False - assert Index(i.values).is_monotonic_decreasing is False - assert i._is_strictly_monotonic_decreasing is False - assert Index(i.values)._is_strictly_monotonic_decreasing is False - - i = MultiIndex( - levels=[["qux", "foo", "baz", "bar"], ["zenith", "next", "mom"]], - codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=["first", "second"], - ) - assert i.is_monotonic_decreasing is True - assert Index(i.values).is_monotonic_decreasing is True - assert i._is_strictly_monotonic_decreasing is True - assert Index(i.values)._is_strictly_monotonic_decreasing is True - - # mixed levels, hits the TypeError - i = MultiIndex( - levels=[ - [4, 3, 2, 1], - [ - "nl0000301109", - "nl0000289965", - "nl0000289783", - "lu0197800237", - "gb00b03mlx29", - ], - ], - codes=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], - names=["household_id", "asset_id"], - ) - - assert i.is_monotonic_decreasing is False - assert i._is_strictly_monotonic_decreasing is False - - # empty - i = MultiIndex.from_arrays([[], []]) - assert i.is_monotonic_decreasing is True - assert Index(i.values).is_monotonic_decreasing is True - assert i._is_strictly_monotonic_decreasing is True - assert Index(i.values)._is_strictly_monotonic_decreasing is True - - -def test_is_strictly_monotonic_increasing(): - idx = MultiIndex( - levels=[["bar", "baz"], ["mom", "next"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] - ) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is False - - -def test_is_strictly_monotonic_decreasing(): - idx = MultiIndex( - levels=[["baz", "bar"], ["next", "mom"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] - ) - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is False - - -@pytest.mark.parametrize("attr", ["is_monotonic_increasing", "is_monotonic_decreasing"]) -@pytest.mark.parametrize( - "values", - [[(np.nan,), (1,), (2,)], [(1,), (np.nan,), (2,)], [(1,), (2,), (np.nan,)]], -) -def test_is_monotonic_with_nans(values, attr): - # GH: 37220 - idx = MultiIndex.from_tuples(values, names=["test"]) - assert getattr(idx, attr) is False diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_tooltip.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_tooltip.py deleted file mode 100644 index c49a0e05c67002ab0b6eebd1ffd3bda554622f4d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_tooltip.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import pytest - -from pandas import DataFrame - -pytest.importorskip("jinja2") -from pandas.io.formats.style import Styler - - -@pytest.fixture -def df(): - return DataFrame( - data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], - columns=["A", "B", "C"], - index=["x", "y", "z"], - ) - - -@pytest.fixture -def styler(df): - return Styler(df, uuid_len=0) - - -@pytest.mark.parametrize( - "ttips", - [ - DataFrame( # Test basic reindex and ignoring blank - data=[["Min", "Max"], [np.nan, ""]], - columns=["A", "C"], - index=["x", "y"], - ), - DataFrame( # Test non-referenced columns, reversed col names, short index - data=[["Max", "Min", "Bad-Col"]], columns=["C", "A", "D"], index=["x"] - ), - ], -) -def test_tooltip_render(ttips, styler): - # GH 21266 - result = styler.set_tooltips(ttips).to_html() - - # test tooltip table level class - assert "#T_ .pd-t {\n visibility: hidden;\n" in result - - # test 'Min' tooltip added - assert "#T_ #T__row0_col0:hover .pd-t {\n visibility: visible;\n}" in result - assert '#T_ #T__row0_col0 .pd-t::after {\n content: "Min";\n}' in result - assert 'class="data row0 col0" >0<span class="pd-t"></span></td>' in result - - # test 'Max' tooltip added - assert "#T_ #T__row0_col2:hover .pd-t {\n visibility: visible;\n}" in result - assert '#T_ #T__row0_col2 .pd-t::after {\n content: "Max";\n}' in result - assert 'class="data row0 col2" >2<span class="pd-t"></span></td>' in result - - # test Nan, empty string and bad column ignored - assert "#T_ #T__row1_col0:hover .pd-t {\n visibility: visible;\n}" not in result - assert "#T_ #T__row1_col1:hover .pd-t {\n visibility: visible;\n}" not in result - assert "#T_ #T__row0_col1:hover .pd-t {\n visibility: visible;\n}" not in result - assert "#T_ #T__row1_col2:hover .pd-t {\n visibility: visible;\n}" not in result - assert "Bad-Col" not in result - - -def test_tooltip_ignored(styler): - # GH 21266 - result = styler.to_html() # no set_tooltips() creates no <span> - assert '<style type="text/css">\n</style>' in result - assert '<span class="pd-t"></span>' not in result - - -def test_tooltip_css_class(styler): - # GH 21266 - result = styler.set_tooltips( - DataFrame([["tooltip"]], index=["x"], columns=["A"]), - css_class="other-class", - props=[("color", "green")], - ).to_html() - assert "#T_ .other-class {\n color: green;\n" in result - assert '#T_ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in result - - # GH 39563 - result = styler.set_tooltips( # set_tooltips overwrites previous - DataFrame([["tooltip"]], index=["x"], columns=["A"]), - css_class="another-class", - props="color:green;color:red;", - ).to_html() - assert "#T_ .another-class {\n color: green;\n color: red;\n}" in result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py deleted file mode 100644 index 38693f4fc6e33766f7a6b4f1227867ae86d2da32..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/response.py +++ /dev/null @@ -1,821 +0,0 @@ -from __future__ import absolute_import - -import io -import logging -import zlib -from contextlib import contextmanager -from socket import error as SocketError -from socket import timeout as SocketTimeout - -try: - import brotli -except ImportError: - brotli = None - -from ._collections import HTTPHeaderDict -from .connection import BaseSSLError, HTTPException -from .exceptions import ( - BodyNotHttplibCompatible, - DecodeError, - HTTPError, - IncompleteRead, - InvalidChunkLength, - InvalidHeader, - ProtocolError, - ReadTimeoutError, - ResponseNotChunked, - SSLError, -) -from .packages import six -from .util.response import is_fp_closed, is_response_to_head - -log = logging.getLogger(__name__) - - -class DeflateDecoder(object): - def __init__(self): - self._first_try = True - self._data = b"" - self._obj = zlib.decompressobj() - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - - if not self._first_try: - return self._obj.decompress(data) - - self._data += data - try: - decompressed = self._obj.decompress(data) - if decompressed: - self._first_try = False - self._data = None - return decompressed - except zlib.error: - self._first_try = False - self._obj = zlib.decompressobj(-zlib.MAX_WBITS) - try: - return self.decompress(self._data) - finally: - self._data = None - - -class GzipDecoderState(object): - - FIRST_MEMBER = 0 - OTHER_MEMBERS = 1 - SWALLOW_DATA = 2 - - -class GzipDecoder(object): - def __init__(self): - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - self._state = GzipDecoderState.FIRST_MEMBER - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - ret = bytearray() - if self._state == GzipDecoderState.SWALLOW_DATA or not data: - return bytes(ret) - while True: - try: - ret += self._obj.decompress(data) - except zlib.error: - previous_state = self._state - # Ignore data after the first error - self._state = GzipDecoderState.SWALLOW_DATA - if previous_state == GzipDecoderState.OTHER_MEMBERS: - # Allow trailing garbage acceptable in other gzip clients - return bytes(ret) - raise - data = self._obj.unused_data - if not data: - return bytes(ret) - self._state = GzipDecoderState.OTHER_MEMBERS - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - - -if brotli is not None: - - class BrotliDecoder(object): - # Supports both 'brotlipy' and 'Brotli' packages - # since they share an import name. The top branches - # are for 'brotlipy' and bottom branches for 'Brotli' - def __init__(self): - self._obj = brotli.Decompressor() - if hasattr(self._obj, "decompress"): - self.decompress = self._obj.decompress - else: - self.decompress = self._obj.process - - def flush(self): - if hasattr(self._obj, "flush"): - return self._obj.flush() - return b"" - - -class MultiDecoder(object): - """ - From RFC7231: - If one or more encodings have been applied to a representation, the - sender that applied the encodings MUST generate a Content-Encoding - header field that lists the content codings in the order in which - they were applied. - """ - - def __init__(self, modes): - self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] - - def flush(self): - return self._decoders[0].flush() - - def decompress(self, data): - for d in reversed(self._decoders): - data = d.decompress(data) - return data - - -def _get_decoder(mode): - if "," in mode: - return MultiDecoder(mode) - - if mode == "gzip": - return GzipDecoder() - - if brotli is not None and mode == "br": - return BrotliDecoder() - - return DeflateDecoder() - - -class HTTPResponse(io.IOBase): - """ - HTTP Response container. - - Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. This - class is also compatible with the Python standard library's :mod:`io` - module, and can hence be treated as a readable object in the context of that - framework. - - Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: - - :param preload_content: - If True, the response's body will be preloaded during construction. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param original_response: - When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` - object, it's convenient to include the original for debug purposes. It's - otherwise unused. - - :param retries: - The retries contains the last :class:`~urllib3.util.retry.Retry` that - was used during the request. - - :param enforce_content_length: - Enforce content length checking. Body returned by server must match - value of Content-Length header, if present. Otherwise, raise error. - """ - - CONTENT_DECODERS = ["gzip", "deflate"] - if brotli is not None: - CONTENT_DECODERS += ["br"] - REDIRECT_STATUSES = [301, 302, 303, 307, 308] - - def __init__( - self, - body="", - headers=None, - status=0, - version=0, - reason=None, - strict=0, - preload_content=True, - decode_content=True, - original_response=None, - pool=None, - connection=None, - msg=None, - retries=None, - enforce_content_length=False, - request_method=None, - request_url=None, - auto_close=True, - ): - - if isinstance(headers, HTTPHeaderDict): - self.headers = headers - else: - self.headers = HTTPHeaderDict(headers) - self.status = status - self.version = version - self.reason = reason - self.strict = strict - self.decode_content = decode_content - self.retries = retries - self.enforce_content_length = enforce_content_length - self.auto_close = auto_close - - self._decoder = None - self._body = None - self._fp = None - self._original_response = original_response - self._fp_bytes_read = 0 - self.msg = msg - self._request_url = request_url - - if body and isinstance(body, (six.string_types, bytes)): - self._body = body - - self._pool = pool - self._connection = connection - - if hasattr(body, "read"): - self._fp = body - - # Are we using the chunked-style of transfer encoding? - self.chunked = False - self.chunk_left = None - tr_enc = self.headers.get("transfer-encoding", "").lower() - # Don't incur the penalty of creating a list and then discarding it - encodings = (enc.strip() for enc in tr_enc.split(",")) - if "chunked" in encodings: - self.chunked = True - - # Determine length of response - self.length_remaining = self._init_length(request_method) - - # If requested, preload the body. - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) - - def get_redirect_location(self): - """ - Should we redirect and where to? - - :returns: Truthy redirect location string if we got a redirect status - code and valid location. ``None`` if redirect status and no - location. ``False`` if not a redirect status code. - """ - if self.status in self.REDIRECT_STATUSES: - return self.headers.get("location") - - return False - - def release_conn(self): - if not self._pool or not self._connection: - return - - self._pool._put_conn(self._connection) - self._connection = None - - def drain_conn(self): - """ - Read and discard any remaining HTTP response data in the response connection. - - Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. - """ - try: - self.read() - except (HTTPError, SocketError, BaseSSLError, HTTPException): - pass - - @property - def data(self): - # For backwards-compat with earlier urllib3 0.4 and earlier. - if self._body: - return self._body - - if self._fp: - return self.read(cache_content=True) - - @property - def connection(self): - return self._connection - - def isclosed(self): - return is_fp_closed(self._fp) - - def tell(self): - """ - Obtain the number of bytes pulled over the wire so far. May differ from - the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` - if bytes are encoded on the wire (e.g, compressed). - """ - return self._fp_bytes_read - - def _init_length(self, request_method): - """ - Set initial length value for Response content if available. - """ - length = self.headers.get("content-length") - - if length is not None: - if self.chunked: - # This Response will fail with an IncompleteRead if it can't be - # received as chunked. This method falls back to attempt reading - # the response before raising an exception. - log.warning( - "Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked." - ) - return None - - try: - # RFC 7230 section 3.3.2 specifies multiple content lengths can - # be sent in a single Content-Length header - # (e.g. Content-Length: 42, 42). This line ensures the values - # are all valid ints and that as long as the `set` length is 1, - # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(",")]) - if len(lengths) > 1: - raise InvalidHeader( - "Content-Length contained multiple " - "unmatching values (%s)" % length - ) - length = lengths.pop() - except ValueError: - length = None - else: - if length < 0: - length = None - - # Convert status to int for comparison - # In some cases, httplib returns a status of "_UNKNOWN" - try: - status = int(self.status) - except ValueError: - status = 0 - - # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": - length = 0 - - return length - - def _init_decoder(self): - """ - Set-up the _decoder attribute if necessary. - """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get("content-encoding", "").lower() - if self._decoder is None: - if content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) - elif "," in content_encoding: - encodings = [ - e.strip() - for e in content_encoding.split(",") - if e.strip() in self.CONTENT_DECODERS - ] - if len(encodings): - self._decoder = _get_decoder(content_encoding) - - DECODER_ERROR_CLASSES = (IOError, zlib.error) - if brotli is not None: - DECODER_ERROR_CLASSES += (brotli.error,) - - def _decode(self, data, decode_content, flush_decoder): - """ - Decode the data passed in and potentially flush the decoder. - """ - if not decode_content: - return data - - try: - if self._decoder: - data = self._decoder.decompress(data) - except self.DECODER_ERROR_CLASSES as e: - content_encoding = self.headers.get("content-encoding", "").lower() - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, - e, - ) - if flush_decoder: - data += self._flush_decoder() - - return data - - def _flush_decoder(self): - """ - Flushes the decoder. Should only be called if the decoder is actually - being used. - """ - if self._decoder: - buf = self._decoder.decompress(b"") - return buf + self._decoder.flush() - - return b"" - - @contextmanager - def _error_catcher(self): - """ - Catch low-level python exceptions, instead re-raising urllib3 - variants, so that low-level exceptions are not leaked in the - high-level api. - - On exit, release the connection back to the pool. - """ - clean_exit = False - - try: - try: - yield - - except SocketTimeout: - # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but - # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, "Read timed out.") - - except BaseSSLError as e: - # FIXME: Is there a better way to differentiate between SSLErrors? - if "read operation timed out" not in str(e): - # SSL errors related to framing/MAC get wrapped and reraised here - raise SSLError(e) - - raise ReadTimeoutError(self._pool, None, "Read timed out.") - - except (HTTPException, SocketError) as e: - # This includes IncompleteRead. - raise ProtocolError("Connection broken: %r" % e, e) - - # If no exception is thrown, we should avoid cleaning up - # unnecessarily. - clean_exit = True - finally: - # If we didn't terminate cleanly, we need to throw away our - # connection. - if not clean_exit: - # The response may not be closed but we're not going to use it - # anymore so close it now to ensure that the connection is - # released back to the pool. - if self._original_response: - self._original_response.close() - - # Closing the response may not actually be sufficient to close - # everything, so if we have a hold of the connection close that - # too. - if self._connection: - self._connection.close() - - # If we hold the original response but it's closed now, we should - # return the connection back to the pool. - if self._original_response and self._original_response.isclosed(): - self.release_conn() - - def read(self, amt=None, decode_content=None, cache_content=False): - """ - Similar to :meth:`http.client.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) - """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - fp_closed = getattr(self._fp, "closed", False) - - with self._error_catcher(): - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() if not fp_closed else b"" - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) if not fp_closed else b"" - if ( - amt != 0 and not data - ): # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True - if self.enforce_content_length and self.length_remaining not in ( - 0, - None, - ): - # This is an edge case that httplib failed to cover due - # to concerns of backward compatibility. We're - # addressing it here to make sure IncompleteRead is - # raised during streaming, so all calls with incorrect - # Content-Length are caught. - raise IncompleteRead(self._fp_bytes_read, self.length_remaining) - - if data: - self._fp_bytes_read += len(data) - if self.length_remaining is not None: - self.length_remaining -= len(data) - - data = self._decode(data, decode_content, flush_decoder) - - if cache_content: - self._body = data - - return data - - def stream(self, amt=2 ** 16, decode_content=None): - """ - A generator wrapper for the read() method. A call will block until - ``amt`` bytes have been read from the connection or until the - connection is closed. - - :param amt: - How much of the content to read. The generator will return up to - much data per iteration, but may return less. This is particularly - likely when using compressed data. However, the empty string will - never be returned. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - if self.chunked and self.supports_chunked_reads(): - for line in self.read_chunked(amt, decode_content=decode_content): - yield line - else: - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) - - if data: - yield data - - @classmethod - def from_httplib(ResponseCls, r, **response_kw): - """ - Given an :class:`http.client.HTTPResponse` instance ``r``, return a - corresponding :class:`urllib3.response.HTTPResponse` object. - - Remaining parameters are passed to the HTTPResponse constructor, along - with ``original_response=r``. - """ - headers = r.msg - - if not isinstance(headers, HTTPHeaderDict): - if six.PY2: - # Python 2.7 - headers = HTTPHeaderDict.from_httplib(headers) - else: - headers = HTTPHeaderDict(headers.items()) - - # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, "strict", 0) - resp = ResponseCls( - body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw - ) - return resp - - # Backwards-compatibility methods for http.client.HTTPResponse - def getheaders(self): - return self.headers - - def getheader(self, name, default=None): - return self.headers.get(name, default) - - # Backwards compatibility for http.cookiejar - def info(self): - return self.headers - - # Overrides from io.IOBase - def close(self): - if not self.closed: - self._fp.close() - - if self._connection: - self._connection.close() - - if not self.auto_close: - io.IOBase.close(self) - - @property - def closed(self): - if not self.auto_close: - return io.IOBase.closed.__get__(self) - elif self._fp is None: - return True - elif hasattr(self._fp, "isclosed"): - return self._fp.isclosed() - elif hasattr(self._fp, "closed"): - return self._fp.closed - else: - return True - - def fileno(self): - if self._fp is None: - raise IOError("HTTPResponse has no file to get a fileno from") - elif hasattr(self._fp, "fileno"): - return self._fp.fileno() - else: - raise IOError( - "The file-like object this HTTPResponse is wrapped " - "around has no file descriptor" - ) - - def flush(self): - if ( - self._fp is not None - and hasattr(self._fp, "flush") - and not getattr(self._fp, "closed", False) - ): - return self._fp.flush() - - def readable(self): - # This method is required for `io` module compatibility. - return True - - def readinto(self, b): - # This method is required for `io` module compatibility. - temp = self.read(len(b)) - if len(temp) == 0: - return 0 - else: - b[: len(temp)] = temp - return len(temp) - - def supports_chunked_reads(self): - """ - Checks if the underlying file-like object looks like a - :class:`http.client.HTTPResponse` object. We do this by testing for - the fp attribute. If it is present we assume it returns raw chunks as - processed by read_chunked(). - """ - return hasattr(self._fp, "fp") - - def _update_chunk_length(self): - # First, we'll figure out length of a chunk and then - # we'll try to read it from socket. - if self.chunk_left is not None: - return - line = self._fp.fp.readline() - line = line.split(b";", 1)[0] - try: - self.chunk_left = int(line, 16) - except ValueError: - # Invalid chunked protocol response, abort. - self.close() - raise InvalidChunkLength(self, line) - - def _handle_chunk(self, amt): - returned_chunk = None - if amt is None: - chunk = self._fp._safe_read(self.chunk_left) - returned_chunk = chunk - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - elif amt < self.chunk_left: - value = self._fp._safe_read(amt) - self.chunk_left = self.chunk_left - amt - returned_chunk = value - elif amt == self.chunk_left: - value = self._fp._safe_read(amt) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - returned_chunk = value - else: # amt > self.chunk_left - returned_chunk = self._fp._safe_read(self.chunk_left) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - return returned_chunk - - def read_chunked(self, amt=None, decode_content=None): - """ - Similar to :meth:`HTTPResponse.read`, but with an additional - parameter: ``decode_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - self._init_decoder() - # FIXME: Rewrite this method and make it a class with a better structured logic. - if not self.chunked: - raise ResponseNotChunked( - "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing." - ) - if not self.supports_chunked_reads(): - raise BodyNotHttplibCompatible( - "Body should be http.client.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks." - ) - - with self._error_catcher(): - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - - # If a response is already read and closed - # then return immediately. - if self._fp.fp is None: - return - - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - decoded = self._decode( - chunk, decode_content=decode_content, flush_decoder=False - ) - if decoded: - yield decoded - - if decode_content: - # On CPython and PyPy, we should never need to flush the - # decoder. However, on Jython we *might* need to, so - # lets defensively do it anyway. - decoded = self._flush_decoder() - if decoded: # Platform-specific: Jython. - yield decoded - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b"\r\n": - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() - - def geturl(self): - """ - Returns the URL that was the source of this response. - If the request that generated this response redirected, this method - will return the final redirect location. - """ - if self.retries is not None and len(self.retries.history): - return self.retries.history[-1].redirect_location - else: - return self._request_url - - def __iter__(self): - buffer = [] - for chunk in self.stream(decode_content=True): - if b"\n" in chunk: - chunk = chunk.split(b"\n") - yield b"".join(buffer) + chunk[0] + b"\n" - for x in chunk[1:-1]: - yield x + b"\n" - if chunk[-1]: - buffer = [chunk[-1]] - else: - buffer = [] - else: - buffer.append(chunk) - if buffer: - yield b"".join(buffer) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_core_utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_core_utils.py deleted file mode 100644 index ebf12ec0fa17fe8a79a8c0ee0e4d79460a66633f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_core_utils.py +++ /dev/null @@ -1,580 +0,0 @@ -from __future__ import annotations - -import os -from collections import defaultdict -from typing import ( - Any, - Callable, - Hashable, - TypeVar, - Union, - _GenericAlias, # type: ignore - cast, -) - -from pydantic_core import CoreSchema, core_schema -from pydantic_core import validate_core_schema as _validate_core_schema -from typing_extensions import TypeAliasType, TypeGuard, get_args - -from . import _repr - -AnyFunctionSchema = Union[ - core_schema.AfterValidatorFunctionSchema, - core_schema.BeforeValidatorFunctionSchema, - core_schema.WrapValidatorFunctionSchema, - core_schema.PlainValidatorFunctionSchema, -] - - -FunctionSchemaWithInnerSchema = Union[ - core_schema.AfterValidatorFunctionSchema, - core_schema.BeforeValidatorFunctionSchema, - core_schema.WrapValidatorFunctionSchema, -] - -CoreSchemaField = Union[ - core_schema.ModelField, core_schema.DataclassField, core_schema.TypedDictField, core_schema.ComputedField -] -CoreSchemaOrField = Union[core_schema.CoreSchema, CoreSchemaField] - -_CORE_SCHEMA_FIELD_TYPES = {'typed-dict-field', 'dataclass-field', 'model-field', 'computed-field'} -_FUNCTION_WITH_INNER_SCHEMA_TYPES = {'function-before', 'function-after', 'function-wrap'} -_LIST_LIKE_SCHEMA_WITH_ITEMS_TYPES = {'list', 'tuple-variable', 'set', 'frozenset'} - -_DEFINITIONS_CACHE_METADATA_KEY = 'pydantic.definitions_cache' - -NEEDS_APPLY_DISCRIMINATED_UNION_METADATA_KEY = 'pydantic.internal.needs_apply_discriminated_union' -"""Used to mark a schema that has a discriminated union that needs to be checked for validity at the end of -schema building because one of it's members refers to a definition that was not yet defined when the union -was first encountered. -""" -HAS_INVALID_SCHEMAS_METADATA_KEY = 'pydantic.internal.invalid' -"""Used to mark a schema that is invalid because it refers to a definition that was not yet defined when the -schema was first encountered. -""" - - -def is_core_schema( - schema: CoreSchemaOrField, -) -> TypeGuard[CoreSchema]: - return schema['type'] not in _CORE_SCHEMA_FIELD_TYPES - - -def is_core_schema_field( - schema: CoreSchemaOrField, -) -> TypeGuard[CoreSchemaField]: - return schema['type'] in _CORE_SCHEMA_FIELD_TYPES - - -def is_function_with_inner_schema( - schema: CoreSchemaOrField, -) -> TypeGuard[FunctionSchemaWithInnerSchema]: - return schema['type'] in _FUNCTION_WITH_INNER_SCHEMA_TYPES - - -def is_list_like_schema_with_items_schema( - schema: CoreSchema, -) -> TypeGuard[ - core_schema.ListSchema | core_schema.TupleVariableSchema | core_schema.SetSchema | core_schema.FrozenSetSchema -]: - return schema['type'] in _LIST_LIKE_SCHEMA_WITH_ITEMS_TYPES - - -def get_type_ref(type_: type[Any], args_override: tuple[type[Any], ...] | None = None) -> str: - """Produces the ref to be used for this type by pydantic_core's core schemas. - - This `args_override` argument was added for the purpose of creating valid recursive references - when creating generic models without needing to create a concrete class. - """ - origin = type_ - args = get_args(type_) if isinstance(type_, _GenericAlias) else (args_override or ()) - generic_metadata = getattr(type_, '__pydantic_generic_metadata__', None) - if generic_metadata: - origin = generic_metadata['origin'] or origin - args = generic_metadata['args'] or args - - module_name = getattr(origin, '__module__', '<No __module__>') - if isinstance(origin, TypeAliasType): - type_ref = f'{module_name}.{origin.__name__}' - else: - try: - qualname = getattr(origin, '__qualname__', f'<No __qualname__: {origin}>') - except Exception: - qualname = getattr(origin, '__qualname__', '<No __qualname__>') - type_ref = f'{module_name}.{qualname}:{id(origin)}' - - arg_refs: list[str] = [] - for arg in args: - if isinstance(arg, str): - # Handle string literals as a special case; we may be able to remove this special handling if we - # wrap them in a ForwardRef at some point. - arg_ref = f'{arg}:str-{id(arg)}' - else: - arg_ref = f'{_repr.display_as_type(arg)}:{id(arg)}' - arg_refs.append(arg_ref) - if arg_refs: - type_ref = f'{type_ref}[{",".join(arg_refs)}]' - return type_ref - - -def get_ref(s: core_schema.CoreSchema) -> None | str: - """Get the ref from the schema if it has one. - This exists just for type checking to work correctly. - """ - return s.get('ref', None) - - -def collect_definitions(schema: core_schema.CoreSchema) -> dict[str, core_schema.CoreSchema]: - defs: dict[str, CoreSchema] = {} - - def _record_valid_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema: - ref = get_ref(s) - if ref: - defs[ref] = s - return recurse(s, _record_valid_refs) - - walk_core_schema(schema, _record_valid_refs) - - return defs - - -def define_expected_missing_refs( - schema: core_schema.CoreSchema, allowed_missing_refs: set[str] -) -> core_schema.CoreSchema | None: - if not allowed_missing_refs: - # in this case, there are no missing refs to potentially substitute, so there's no need to walk the schema - # this is a common case (will be hit for all non-generic models), so it's worth optimizing for - return None - - refs = collect_definitions(schema).keys() - - expected_missing_refs = allowed_missing_refs.difference(refs) - if expected_missing_refs: - definitions: list[core_schema.CoreSchema] = [ - # TODO: Replace this with a (new) CoreSchema that, if present at any level, makes validation fail - # Issue: https://github.com/pydantic/pydantic-core/issues/619 - core_schema.none_schema(ref=ref, metadata={HAS_INVALID_SCHEMAS_METADATA_KEY: True}) - for ref in expected_missing_refs - ] - return core_schema.definitions_schema(schema, definitions) - return None - - -def collect_invalid_schemas(schema: core_schema.CoreSchema) -> bool: - invalid = False - - def _is_schema_valid(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema: - nonlocal invalid - if 'metadata' in s: - metadata = s['metadata'] - if HAS_INVALID_SCHEMAS_METADATA_KEY in metadata: - invalid = metadata[HAS_INVALID_SCHEMAS_METADATA_KEY] - return s - return recurse(s, _is_schema_valid) - - walk_core_schema(schema, _is_schema_valid) - return invalid - - -T = TypeVar('T') - - -Recurse = Callable[[core_schema.CoreSchema, 'Walk'], core_schema.CoreSchema] -Walk = Callable[[core_schema.CoreSchema, Recurse], core_schema.CoreSchema] - -# TODO: Should we move _WalkCoreSchema into pydantic_core proper? -# Issue: https://github.com/pydantic/pydantic-core/issues/615 - - -class _WalkCoreSchema: - def __init__(self): - self._schema_type_to_method = self._build_schema_type_to_method() - - def _build_schema_type_to_method(self) -> dict[core_schema.CoreSchemaType, Recurse]: - mapping: dict[core_schema.CoreSchemaType, Recurse] = {} - key: core_schema.CoreSchemaType - for key in get_args(core_schema.CoreSchemaType): - method_name = f"handle_{key.replace('-', '_')}_schema" - mapping[key] = getattr(self, method_name, self._handle_other_schemas) - return mapping - - def walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema: - return f(schema, self._walk) - - def _walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema: - schema = self._schema_type_to_method[schema['type']](schema.copy(), f) - ser_schema: core_schema.SerSchema | None = schema.get('serialization') # type: ignore - if ser_schema: - schema['serialization'] = self._handle_ser_schemas(ser_schema, f) - return schema - - def _handle_other_schemas(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema: - sub_schema = schema.get('schema', None) - if sub_schema is not None: - schema['schema'] = self.walk(sub_schema, f) # type: ignore - return schema - - def _handle_ser_schemas(self, ser_schema: core_schema.SerSchema, f: Walk) -> core_schema.SerSchema: - schema: core_schema.CoreSchema | None = ser_schema.get('schema', None) - if schema is not None: - ser_schema['schema'] = self.walk(schema, f) # type: ignore - return_schema: core_schema.CoreSchema | None = ser_schema.get('return_schema', None) - if return_schema is not None: - ser_schema['return_schema'] = self.walk(return_schema, f) # type: ignore - return ser_schema - - def handle_definitions_schema(self, schema: core_schema.DefinitionsSchema, f: Walk) -> core_schema.CoreSchema: - new_definitions: list[core_schema.CoreSchema] = [] - for definition in schema['definitions']: - updated_definition = self.walk(definition, f) - if 'ref' in updated_definition: - # If the updated definition schema doesn't have a 'ref', it shouldn't go in the definitions - # This is most likely to happen due to replacing something with a definition reference, in - # which case it should certainly not go in the definitions list - new_definitions.append(updated_definition) - new_inner_schema = self.walk(schema['schema'], f) - - if not new_definitions and len(schema) == 3: - # This means we'd be returning a "trivial" definitions schema that just wrapped the inner schema - return new_inner_schema - - new_schema = schema.copy() - new_schema['schema'] = new_inner_schema - new_schema['definitions'] = new_definitions - return new_schema - - def handle_list_schema(self, schema: core_schema.ListSchema, f: Walk) -> core_schema.CoreSchema: - items_schema = schema.get('items_schema') - if items_schema is not None: - schema['items_schema'] = self.walk(items_schema, f) - return schema - - def handle_set_schema(self, schema: core_schema.SetSchema, f: Walk) -> core_schema.CoreSchema: - items_schema = schema.get('items_schema') - if items_schema is not None: - schema['items_schema'] = self.walk(items_schema, f) - return schema - - def handle_frozenset_schema(self, schema: core_schema.FrozenSetSchema, f: Walk) -> core_schema.CoreSchema: - items_schema = schema.get('items_schema') - if items_schema is not None: - schema['items_schema'] = self.walk(items_schema, f) - return schema - - def handle_generator_schema(self, schema: core_schema.GeneratorSchema, f: Walk) -> core_schema.CoreSchema: - items_schema = schema.get('items_schema') - if items_schema is not None: - schema['items_schema'] = self.walk(items_schema, f) - return schema - - def handle_tuple_variable_schema( - self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk - ) -> core_schema.CoreSchema: - schema = cast(core_schema.TupleVariableSchema, schema) - items_schema = schema.get('items_schema') - if items_schema is not None: - schema['items_schema'] = self.walk(items_schema, f) - return schema - - def handle_tuple_positional_schema( - self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk - ) -> core_schema.CoreSchema: - schema = cast(core_schema.TuplePositionalSchema, schema) - schema['items_schema'] = [self.walk(v, f) for v in schema['items_schema']] - extras_schema = schema.get('extras_schema') - if extras_schema is not None: - schema['extras_schema'] = self.walk(extras_schema, f) - return schema - - def handle_dict_schema(self, schema: core_schema.DictSchema, f: Walk) -> core_schema.CoreSchema: - keys_schema = schema.get('keys_schema') - if keys_schema is not None: - schema['keys_schema'] = self.walk(keys_schema, f) - values_schema = schema.get('values_schema') - if values_schema: - schema['values_schema'] = self.walk(values_schema, f) - return schema - - def handle_function_schema(self, schema: AnyFunctionSchema, f: Walk) -> core_schema.CoreSchema: - if not is_function_with_inner_schema(schema): - return schema - schema['schema'] = self.walk(schema['schema'], f) - return schema - - def handle_union_schema(self, schema: core_schema.UnionSchema, f: Walk) -> core_schema.CoreSchema: - new_choices: list[CoreSchema | tuple[CoreSchema, str]] = [] - for v in schema['choices']: - if isinstance(v, tuple): - new_choices.append((self.walk(v[0], f), v[1])) - else: - new_choices.append(self.walk(v, f)) - schema['choices'] = new_choices - return schema - - def handle_tagged_union_schema(self, schema: core_schema.TaggedUnionSchema, f: Walk) -> core_schema.CoreSchema: - new_choices: dict[Hashable, core_schema.CoreSchema] = {} - for k, v in schema['choices'].items(): - new_choices[k] = v if isinstance(v, (str, int)) else self.walk(v, f) - schema['choices'] = new_choices - return schema - - def handle_chain_schema(self, schema: core_schema.ChainSchema, f: Walk) -> core_schema.CoreSchema: - schema['steps'] = [self.walk(v, f) for v in schema['steps']] - return schema - - def handle_lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema, f: Walk) -> core_schema.CoreSchema: - schema['lax_schema'] = self.walk(schema['lax_schema'], f) - schema['strict_schema'] = self.walk(schema['strict_schema'], f) - return schema - - def handle_json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema, f: Walk) -> core_schema.CoreSchema: - schema['json_schema'] = self.walk(schema['json_schema'], f) - schema['python_schema'] = self.walk(schema['python_schema'], f) - return schema - - def handle_model_fields_schema(self, schema: core_schema.ModelFieldsSchema, f: Walk) -> core_schema.CoreSchema: - extras_schema = schema.get('extras_schema') - if extras_schema is not None: - schema['extras_schema'] = self.walk(extras_schema, f) - replaced_fields: dict[str, core_schema.ModelField] = {} - replaced_computed_fields: list[core_schema.ComputedField] = [] - for computed_field in schema.get('computed_fields', ()): - replaced_field = computed_field.copy() - replaced_field['return_schema'] = self.walk(computed_field['return_schema'], f) - replaced_computed_fields.append(replaced_field) - if replaced_computed_fields: - schema['computed_fields'] = replaced_computed_fields - for k, v in schema['fields'].items(): - replaced_field = v.copy() - replaced_field['schema'] = self.walk(v['schema'], f) - replaced_fields[k] = replaced_field - schema['fields'] = replaced_fields - return schema - - def handle_typed_dict_schema(self, schema: core_schema.TypedDictSchema, f: Walk) -> core_schema.CoreSchema: - extras_schema = schema.get('extras_schema') - if extras_schema is not None: - schema['extras_schema'] = self.walk(extras_schema, f) - replaced_computed_fields: list[core_schema.ComputedField] = [] - for computed_field in schema.get('computed_fields', ()): - replaced_field = computed_field.copy() - replaced_field['return_schema'] = self.walk(computed_field['return_schema'], f) - replaced_computed_fields.append(replaced_field) - if replaced_computed_fields: - schema['computed_fields'] = replaced_computed_fields - replaced_fields: dict[str, core_schema.TypedDictField] = {} - for k, v in schema['fields'].items(): - replaced_field = v.copy() - replaced_field['schema'] = self.walk(v['schema'], f) - replaced_fields[k] = replaced_field - schema['fields'] = replaced_fields - return schema - - def handle_dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema, f: Walk) -> core_schema.CoreSchema: - replaced_fields: list[core_schema.DataclassField] = [] - replaced_computed_fields: list[core_schema.ComputedField] = [] - for computed_field in schema.get('computed_fields', ()): - replaced_field = computed_field.copy() - replaced_field['return_schema'] = self.walk(computed_field['return_schema'], f) - replaced_computed_fields.append(replaced_field) - if replaced_computed_fields: - schema['computed_fields'] = replaced_computed_fields - for field in schema['fields']: - replaced_field = field.copy() - replaced_field['schema'] = self.walk(field['schema'], f) - replaced_fields.append(replaced_field) - schema['fields'] = replaced_fields - return schema - - def handle_arguments_schema(self, schema: core_schema.ArgumentsSchema, f: Walk) -> core_schema.CoreSchema: - replaced_arguments_schema: list[core_schema.ArgumentsParameter] = [] - for param in schema['arguments_schema']: - replaced_param = param.copy() - replaced_param['schema'] = self.walk(param['schema'], f) - replaced_arguments_schema.append(replaced_param) - schema['arguments_schema'] = replaced_arguments_schema - if 'var_args_schema' in schema: - schema['var_args_schema'] = self.walk(schema['var_args_schema'], f) - if 'var_kwargs_schema' in schema: - schema['var_kwargs_schema'] = self.walk(schema['var_kwargs_schema'], f) - return schema - - def handle_call_schema(self, schema: core_schema.CallSchema, f: Walk) -> core_schema.CoreSchema: - schema['arguments_schema'] = self.walk(schema['arguments_schema'], f) - if 'return_schema' in schema: - schema['return_schema'] = self.walk(schema['return_schema'], f) - return schema - - -_dispatch = _WalkCoreSchema().walk - - -def walk_core_schema(schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema: - """Recursively traverse a CoreSchema. - - Args: - schema (core_schema.CoreSchema): The CoreSchema to process, it will not be modified. - f (Walk): A function to apply. This function takes two arguments: - 1. The current CoreSchema that is being processed - (not the same one you passed into this function, one level down). - 2. The "next" `f` to call. This lets you for example use `f=functools.partial(some_method, some_context)` - to pass data down the recursive calls without using globals or other mutable state. - - Returns: - core_schema.CoreSchema: A processed CoreSchema. - """ - return f(schema.copy(), _dispatch) - - -def simplify_schema_references(schema: core_schema.CoreSchema) -> core_schema.CoreSchema: # noqa: C901 - definitions: dict[str, core_schema.CoreSchema] = {} - ref_counts: dict[str, int] = defaultdict(int) - involved_in_recursion: dict[str, bool] = {} - current_recursion_ref_count: dict[str, int] = defaultdict(int) - - def collect_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema: - if s['type'] == 'definitions': - for definition in s['definitions']: - ref = get_ref(definition) - assert ref is not None - if ref not in definitions: - definitions[ref] = definition - recurse(definition, collect_refs) - return recurse(s['schema'], collect_refs) - else: - ref = get_ref(s) - if ref is not None: - new = recurse(s, collect_refs) - new_ref = get_ref(new) - if new_ref: - definitions[new_ref] = new - return core_schema.definition_reference_schema(schema_ref=ref) - else: - return recurse(s, collect_refs) - - schema = walk_core_schema(schema, collect_refs) - - def count_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema: - if s['type'] != 'definition-ref': - return recurse(s, count_refs) - ref = s['schema_ref'] - ref_counts[ref] += 1 - - if ref_counts[ref] >= 2: - # If this model is involved in a recursion this should be detected - # on its second encounter, we can safely stop the walk here. - if current_recursion_ref_count[ref] != 0: - involved_in_recursion[ref] = True - return s - - current_recursion_ref_count[ref] += 1 - recurse(definitions[ref], count_refs) - current_recursion_ref_count[ref] -= 1 - return s - - schema = walk_core_schema(schema, count_refs) - - assert all(c == 0 for c in current_recursion_ref_count.values()), 'this is a bug! please report it' - - def can_be_inlined(s: core_schema.DefinitionReferenceSchema, ref: str) -> bool: - if ref_counts[ref] > 1: - return False - if involved_in_recursion.get(ref, False): - return False - if 'serialization' in s: - return False - if 'metadata' in s: - metadata = s['metadata'] - for k in ( - 'pydantic_js_functions', - 'pydantic_js_annotation_functions', - 'pydantic.internal.union_discriminator', - ): - if k in metadata: - # we need to keep this as a ref - return False - return True - - def inline_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema: - if s['type'] == 'definition-ref': - ref = s['schema_ref'] - # Check if the reference is only used once, not involved in recursion and does not have - # any extra keys (like 'serialization') - if can_be_inlined(s, ref): - # Inline the reference by replacing the reference with the actual schema - new = definitions.pop(ref) - ref_counts[ref] -= 1 # because we just replaced it! - # put all other keys that were on the def-ref schema into the inlined version - # in particular this is needed for `serialization` - if 'serialization' in s: - new['serialization'] = s['serialization'] - s = recurse(new, inline_refs) - return s - else: - return recurse(s, inline_refs) - else: - return recurse(s, inline_refs) - - schema = walk_core_schema(schema, inline_refs) - - def_values = [v for v in definitions.values() if ref_counts[v['ref']] > 0] # type: ignore - - if def_values: - schema = core_schema.definitions_schema(schema=schema, definitions=def_values) - return schema - - -def _strip_metadata(schema: CoreSchema) -> CoreSchema: - def strip_metadata(s: CoreSchema, recurse: Recurse) -> CoreSchema: - s = s.copy() - s.pop('metadata', None) - if s['type'] == 'model-fields': - s = s.copy() - s['fields'] = {k: v.copy() for k, v in s['fields'].items()} - for field_name, field_schema in s['fields'].items(): - field_schema.pop('metadata', None) - s['fields'][field_name] = field_schema - computed_fields = s.get('computed_fields', None) - if computed_fields: - s['computed_fields'] = [cf.copy() for cf in computed_fields] - for cf in computed_fields: - cf.pop('metadata', None) - else: - s.pop('computed_fields', None) - elif s['type'] == 'model': - # remove some defaults - if s.get('custom_init', True) is False: - s.pop('custom_init') - if s.get('root_model', True) is False: - s.pop('root_model') - if {'title'}.issuperset(s.get('config', {}).keys()): - s.pop('config', None) - - return recurse(s, strip_metadata) - - return walk_core_schema(schema, strip_metadata) - - -def pretty_print_core_schema( - schema: CoreSchema, - include_metadata: bool = False, -) -> None: - """Pretty print a CoreSchema using rich. - This is intended for debugging purposes. - - Args: - schema: The CoreSchema to print. - include_metadata: Whether to include metadata in the output. Defaults to `False`. - """ - from rich import print # type: ignore # install it manually in your dev env - - if not include_metadata: - schema = _strip_metadata(schema) - - return print(schema) - - -def validate_core_schema(schema: CoreSchema) -> CoreSchema: - if 'PYDANTIC_SKIP_VALIDATING_CORE_SCHEMAS' in os.environ: - return schema - return _validate_core_schema(schema) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/cells.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/cells.py deleted file mode 100644 index 9354f9e3140999702ec8c140636c511d71c340b2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/cells.py +++ /dev/null @@ -1,154 +0,0 @@ -import re -from functools import lru_cache -from typing import Callable, List - -from ._cell_widths import CELL_WIDTHS - -# Regex to match sequence of the most common character ranges -_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match - - -@lru_cache(4096) -def cached_cell_len(text: str) -> int: - """Get the number of cells required to display text. - - This method always caches, which may use up a lot of memory. It is recommended to use - `cell_len` over this method. - - Args: - text (str): Text to display. - - Returns: - int: Get the number of cells required to display text. - """ - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) - return total_size - - -def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int: - """Get the number of cells required to display text. - - Args: - text (str): Text to display. - - Returns: - int: Get the number of cells required to display text. - """ - if len(text) < 512: - return _cell_len(text) - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) - return total_size - - -@lru_cache(maxsize=4096) -def get_character_cell_size(character: str) -> int: - """Get the cell size of a character. - - Args: - character (str): A single character. - - Returns: - int: Number of cells (0, 1 or 2) occupied by that character. - """ - return _get_codepoint_cell_size(ord(character)) - - -@lru_cache(maxsize=4096) -def _get_codepoint_cell_size(codepoint: int) -> int: - """Get the cell size of a character. - - Args: - codepoint (int): Codepoint of a character. - - Returns: - int: Number of cells (0, 1 or 2) occupied by that character. - """ - - _table = CELL_WIDTHS - lower_bound = 0 - upper_bound = len(_table) - 1 - index = (lower_bound + upper_bound) // 2 - while True: - start, end, width = _table[index] - if codepoint < start: - upper_bound = index - 1 - elif codepoint > end: - lower_bound = index + 1 - else: - return 0 if width == -1 else width - if upper_bound < lower_bound: - break - index = (lower_bound + upper_bound) // 2 - return 1 - - -def set_cell_size(text: str, total: int) -> str: - """Set the length of a string to fit within given number of cells.""" - - if _is_single_cell_widths(text): - size = len(text) - if size < total: - return text + " " * (total - size) - return text[:total] - - if total <= 0: - return "" - cell_size = cell_len(text) - if cell_size == total: - return text - if cell_size < total: - return text + " " * (total - cell_size) - - start = 0 - end = len(text) - - # Binary search until we find the right size - while True: - pos = (start + end) // 2 - before = text[: pos + 1] - before_len = cell_len(before) - if before_len == total + 1 and cell_len(before[-1]) == 2: - return before[:-1] + " " - if before_len == total: - return before - if before_len > total: - end = pos - else: - start = pos - - -# TODO: This is inefficient -# TODO: This might not work with CWJ type characters -def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: - """Break text in to equal (cell) length strings, returning the characters in reverse - order""" - _get_character_cell_size = get_character_cell_size - characters = [ - (character, _get_character_cell_size(character)) for character in text - ] - total_size = position - lines: List[List[str]] = [[]] - append = lines[-1].append - - for character, size in reversed(characters): - if total_size + size > max_size: - lines.append([character]) - append = lines[-1].append - total_size = size - else: - total_size += size - append(character) - - return ["".join(line) for line in lines] - - -if __name__ == "__main__": # pragma: no cover - - print(get_character_cell_size("😽")) - for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8): - print(line) - for n in range(80, 1, -1): - print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|") - print("x" * n) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/measure.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/measure.py deleted file mode 100644 index a508ffa80bd715b47c190ed9d747dbc388fa5b19..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/measure.py +++ /dev/null @@ -1,151 +0,0 @@ -from operator import itemgetter -from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence - -from . import errors -from .protocol import is_renderable, rich_cast - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType - - -class Measurement(NamedTuple): - """Stores the minimum and maximum widths (in characters) required to render an object.""" - - minimum: int - """Minimum number of cells required to render.""" - maximum: int - """Maximum number of cells required to render.""" - - @property - def span(self) -> int: - """Get difference between maximum and minimum.""" - return self.maximum - self.minimum - - def normalize(self) -> "Measurement": - """Get measurement that ensures that minimum <= maximum and minimum >= 0 - - Returns: - Measurement: A normalized measurement. - """ - minimum, maximum = self - minimum = min(max(0, minimum), maximum) - return Measurement(max(0, minimum), max(0, max(minimum, maximum))) - - def with_maximum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are <= width. - - Args: - width (int): Maximum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - return Measurement(min(minimum, width), min(maximum, width)) - - def with_minimum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are >= width. - - Args: - width (int): Minimum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - width = max(0, width) - return Measurement(max(minimum, width), max(maximum, width)) - - def clamp( - self, min_width: Optional[int] = None, max_width: Optional[int] = None - ) -> "Measurement": - """Clamp a measurement within the specified range. - - Args: - min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. - max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. - - Returns: - Measurement: New Measurement object. - """ - measurement = self - if min_width is not None: - measurement = measurement.with_minimum(min_width) - if max_width is not None: - measurement = measurement.with_maximum(max_width) - return measurement - - @classmethod - def get( - cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" - ) -> "Measurement": - """Get a measurement for a renderable. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderable (RenderableType): An object that may be rendered with Rich. - - Raises: - errors.NotRenderableError: If the object is not renderable. - - Returns: - Measurement: Measurement object containing range of character widths required to render the object. - """ - _max_width = options.max_width - if _max_width < 1: - return Measurement(0, 0) - if isinstance(renderable, str): - renderable = console.render_str( - renderable, markup=options.markup, highlight=False - ) - renderable = rich_cast(renderable) - if is_renderable(renderable): - get_console_width: Optional[ - Callable[["Console", "ConsoleOptions"], "Measurement"] - ] = getattr(renderable, "__rich_measure__", None) - if get_console_width is not None: - render_width = ( - get_console_width(console, options) - .normalize() - .with_maximum(_max_width) - ) - if render_width.maximum < 1: - return Measurement(0, 0) - return render_width.normalize() - else: - return Measurement(0, _max_width) - else: - raise errors.NotRenderableError( - f"Unable to get render width for {renderable!r}; " - "a str, Segment, or object with __rich_console__ method is required" - ) - - -def measure_renderables( - console: "Console", - options: "ConsoleOptions", - renderables: Sequence["RenderableType"], -) -> "Measurement": - """Get a measurement that would fit a number of renderables. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderables (Iterable[RenderableType]): One or more renderable objects. - - Returns: - Measurement: Measurement object containing range of character widths required to - contain all given renderables. - """ - if not renderables: - return Measurement(0, 0) - get_measurement = Measurement.get - measurements = [ - get_measurement(console, options, renderable) for renderable in renderables - ] - measured_width = Measurement( - max(measurements, key=itemgetter(0)).minimum, - max(measurements, key=itemgetter(1)).maximum, - ) - return measured_width diff --git a/spaces/pycui/RealChar/alembic/versions/ead242c61258_added_user_table.py b/spaces/pycui/RealChar/alembic/versions/ead242c61258_added_user_table.py deleted file mode 100644 index 100bf96f2e8487a4e3376672472b6dda52ab0ce1..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/alembic/versions/ead242c61258_added_user_table.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Added user table - -Revision ID: ead242c61258 -Revises: -Create Date: 2023-06-26 16:25:00.614978 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'ead242c61258' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade() -> None: - op.create_table('users', - sa.Column('id', sa.Integer(), primary_key=True), - sa.Column('name', sa.String(), nullable=True), - sa.Column('email', sa.String(), - nullable=False, unique=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True) - - -def downgrade() -> None: - op.drop_index(op.f('ix_users_email'), table_name='users') - op.drop_table('users') diff --git a/spaces/qinzhu/diy-girlfriend/README.md b/spaces/qinzhu/diy-girlfriend/README.md deleted file mode 100644 index 60ff61148b8b7293d213471126b19d4a67911088..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Moe TTS -emoji: 😊🎙️ -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: AsakuraMizu/moe-tts ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -netsh advfirewall firewall add rule name="diy_chat_girl_web" dir=in action=allow protocol=TCP localport=7860,7870 -netsh advfirewall firewall add rule name="diy_chat_girl_web" dir=out action=allow protocol=TCP localport=7860,7870 \ No newline at end of file diff --git a/spaces/qiufenge/bingo/README.md b/spaces/qiufenge/bingo/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/qiufenge/bingo/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -<div align="center"> - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -</div> - - diff --git a/spaces/radames/ComfyUI-data-index/README.md b/spaces/radames/ComfyUI-data-index/README.md deleted file mode 100644 index 13c90018b150caef60c18c46260f2b2b5663d9fb..0000000000000000000000000000000000000000 --- a/spaces/radames/ComfyUI-data-index/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ComfyUI + Data Index -emoji: 📈 -colorFrom: green -colorTo: pink -sdk: docker -pinned: false -app_port: 4444 -duplicated_from: SpacesExamples/ComfyUI ---- - -model: https://huggingface.co/stabilityai/control-lora \ No newline at end of file diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/demo/analyze/__init__.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/demo/analyze/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/radames/gradio-request-get-client-ip/app.py b/spaces/radames/gradio-request-get-client-ip/app.py deleted file mode 100644 index fa9e6ee3b6f602373b33e7c24094f6bbf7159348..0000000000000000000000000000000000000000 --- a/spaces/radames/gradio-request-get-client-ip/app.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr -import socket - -def predict(text, request: gr.Request): - client_ip = request.client.host - local_ip = socket.gethostbyname(socket.gethostbyname("")) - x_forwarded_for = request.headers.get('x-forwarded-for') - headers = request.headers - if x_forwarded_for: - client_ip = x_forwarded_for - print("client_ip", client_ip, text) - print("x_forwarded_for", x_forwarded_for) - return text, {"client_ip": client_ip, - "local_ip": local_ip, - "headers": headers} - - -with gr.Blocks() as block: - gr.Markdown("## Gradio get client IP") - text = gr.Textbox(label="dummy input") - output = gr.JSON({}) - btn = gr.Button("Test") - - btn.click(predict, inputs=[text], outputs=[text, output]) - -block.queue() -block.launch(share=False, server_name='0.0.0.0', show_api=True) diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Algoriddim Djay V4.0.1 Pc FREE Full Software Download Free.rar Mix Scratch and Remix Your Music with Ease.md b/spaces/raedeXanto/academic-chatgpt-beta/Algoriddim Djay V4.0.1 Pc FREE Full Software Download Free.rar Mix Scratch and Remix Your Music with Ease.md deleted file mode 100644 index f13af52d54e1aea567ba2e69d6c5308d01fda48b..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Algoriddim Djay V4.0.1 Pc FREE Full Software Download Free.rar Mix Scratch and Remix Your Music with Ease.md +++ /dev/null @@ -1,129 +0,0 @@ - -<h1>Free Acme ID Card Maker 5.0 Serial Keygen: What You Need to Know</h1> -<p>If you are looking for a simple and effective way to create professional-looking ID cards for yourself or your business, you might have heard of Acme ID Card Maker. This is a software program that allows you to design and print your own ID cards with ease. You can choose from hundreds of templates or create your own custom design with various fonts, colors, shapes, logos, and images.</p> -<h2>free acme id card maker 5.0 serial keygen</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://tinourl.com/2uL1zd">https://tinourl.com/2uL1zd</a></b></p><br /><br /> -<p>However, there is one catch: Acme ID Card Maker is not free. You have to pay a license fee to use the full version of the program, which costs $49.95 for a single user or $99.95 for a multi-user license. If you don't want to pay that much, you might be tempted to look for a free serial keygen that can generate a valid activation code for you.</p> -<p>A serial keygen is a software tool that can create random serial numbers that can be used to register or unlock a program. Some people use serial keygens to bypass the license verification process and use paid software for free. However, this practice is not only illegal but also risky, as you might end up downloading malware or violating the terms of service of the software developer.</p> -<p>In this article, we will tell you everything you need to know about free acme id card maker 5.0 serial keygen, including how to download and install the program, how to use it, how to get a free serial keygen, what are the pros and cons of using one, and what are some alternatives to Acme ID Card Maker.</p> - <h2>How to Download and Install Acme ID Card Maker 5.0</h2> -<p>The first step to use Acme ID Card Maker is to download and install it on your computer. Here are the steps you need to follow:</p> -<p>free acme id card maker 5.0 serial keygen download<br /> -free acme id card maker 5.0 serial keygen crack<br /> -free acme id card maker 5.0 serial keygen online<br /> -free acme id card maker 5.0 serial keygen generator<br /> -free acme id card maker 5.0 serial keygen software<br /> -free acme id card maker 5.0 serial keygen npm<br /> -free acme id card maker 5.0 serial keygen pdf<br /> -free acme id card maker 5.0 serial keygen full<br /> -free acme id card maker 5.0 serial keygen version<br /> -free acme id card maker 5.0 serial keygen windows<br /> -free acme id card maker 5.0 serial keygen mac<br /> -free acme id card maker 5.0 serial keygen linux<br /> -free acme id card maker 5.0 serial keygen review<br /> -free acme id card maker 5.0 serial keygen tutorial<br /> -free acme id card maker 5.0 serial keygen video<br /> -free acme id card maker 5.0 serial keygen youtube<br /> -free acme id card maker 5.0 serial keygen reddit<br /> -free acme id card maker 5.0 serial keygen quora<br /> -free acme id card maker 5.0 serial keygen blog<br /> -free acme id card maker 5.0 serial keygen website<br /> -free acme id card maker 5.0 serial keygen app<br /> -free acme id card maker 5.0 serial keygen tool<br /> -free acme id card maker 5.0 serial keygen program<br /> -free acme id card maker 5.0 serial keygen code<br /> -free acme id card maker 5.0 serial keygen license<br /> -free acme id card maker 5.0 serial keygen activation<br /> -free acme id card maker 5.0 serial keygen registration<br /> -free acme id card maker 5.0 serial keygen patch<br /> -free acme id card maker 5.0 serial keygen update<br /> -free acme id card maker 5.0 serial keygen upgrade<br /> -free acme id card maker 5.0 serial keygen install<br /> -free acme id card maker 5.0 serial keygen uninstall<br /> -free acme id card maker 5.0 serial keygen support<br /> -free acme id card maker 5.0 serial keygen help<br /> -free acme id card maker 5.0 serial keygen tips<br /> -free acme id card maker 5.0 serial keygen tricks<br /> -free acme id card maker 5.0 serial keygen hacks<br /> -free acme id card maker 5.0 serial keygen cheats<br /> -free acme id card maker 5.0 serial keygen mods<br /> -free acme id card maker 5.0 serial keygen features<br /> -free acme id card maker 5.0 serial keygen benefits<br /> -free acme id card maker 5.0 serial keygen pros and cons<br /> -free acme id card maker 5.0 serial keygen comparison<br /> -free acme id card maker 5.0 serial keygen alternatives<br /> -free acme id card maker 5.0 serial keygen competitors<br /> -free acme id card maker 5.0 serial keygen reviews and ratings<br /> -free acme id card maker 5.0 serial keygen testimonials and feedbacks<br /> -free acme id card maker 5.0 serial keygen coupons and discounts<br /> -free acme id card maker 5.0 serial keygen offers and deals</p> -<ol> -<li>Go to the official website of Acme ID Card Maker at <a href="https://www.acmesoftwares.com/id-card-maker.html">https://www.acmesoftwares.com/id-card-maker.html</a> or a trusted source that offers the program for download.</li> -<li>Choose the right version for your system (Windows or Mac) and click on the download button.</li> -<li>Once the download is complete, open the setup file and follow the installation instructions.</li> -</ol> -<p>Note that you can download and install Acme ID Card Maker for free, but you will only be able to use it for a limited time (30 days) and with limited features (watermark on output, no batch printing, no export options). To use the full version of the program, you will need to purchase a license or use a serial keygen.</p> - <h2>How to Use Acme ID Card Maker 5.0</h2> -<p>Once you have installed Acme ID Card Maker on your computer, you can start using it to create your own ID cards. Here are the steps you need to follow:</p> -<ol> -<li>Launch the program and choose a template or create your own design from scratch.</li> -<li>Add your personal or business information and customize the design with various fonts, colors, shapes, logos, and images.</li> -<li>Print or save your ID card as a PDF or image file.</li> -</ol> -<p>You can also use Acme ID Card Maker to create other types of cards, such as business cards, membership cards, loyalty cards, student cards, employee cards, etc.</p> - <h2>How to Get a Free Serial Keygen for Acme ID Card Maker 5.0</h2> -<p>If you don't want to pay for a license or use the trial version of Acme ID Card Maker, you might be looking for a free serial keygen that can generate a valid activation code for you. There are several ways to get one:</p> - <h3>Option 1: Use a free online generator</h3> -<p>One of the easiest ways to get a free serial keygen for Acme ID Card Maker is to use an online generator that can create random codes for you. There are many websites that offer this service, such as <a href="https://serialkeygenerator.com/">https://serialkeygenerator.com/</a>, <a href="https://keygenninja.com/">https://keygenninja.com/</a>, <a href="https://keygens.pro/">https://keygens.pro/</a>, etc.</p> -<p>All you have to do is enter the name of the program (Acme ID Card Maker) and click on the generate button. You will get a list of possible serial numbers that you can copy and paste into the registration window of Acme ID Card Maker.</p> - <h3>Option 2: Use a free npm package</h3> -<p>If you are familiar with Node.js and npm (Node Package Manager), you can also use a free npm package that can generate serial keygens for various programs. One example is <a href="https://www.npmjs.com/package/free_acme_id_card_maker_5_0_serial_keygen_yu20">free_acme_id_card_maker_5_0_serial_keygen_yu20</a>, which claims to be able to create valid codes for Acme ID Card Maker.</p> -<p>To use this package, you need to install Node.js on your computer and run the following command in your terminal:</p> - ```bash npm i free_acme_id_card_maker_5_0_serial_keygen_yu20 ``` <p>This will install the package in your project folder. Then, you can run the following command to generate a serial keygen:</p> - ```bash node index.js ``` <p>You will get an output like this:</p> - ```text Free Acme Id Card Maker 5.0 Serial Keygen Click Here >> https://bytlly.com/2tfYTw ``` <p>You can then follow the link and copy the code into the registration window of Acme ID Card Maker.</p> - <h3>Option 3: Use a free crack file</h3> -<p>Another way to get a free serial keygen for Acme ID Card Maker is to use a crack file that can modify or bypass the license verification process of the program. A crack file is usually an executable file that can replace or patch the original file of the program.</p> -<p>There are many websites that offer crack files for various programs, such as <a href="https://crack4windows.com/">https://crack4windows.com/</a>, <a href="https://crackzsoft.me/">https://crackzsoft.me/</a>, <a href="https://crackhomes.com/">https://crackhomes.com/</a>, etc.</p> -<p>To use a crack file for Acme ID Card Maker, you need to download it from one of these websites and follow the instructions provided by them. Usually, you have to copy and paste the crack file into the installation folder of Acme ID Card Maker and replace it with the cracked one. You will then be able to use Acme ID Card Maker without entering a serial number.</p> - <h2>Pros and Cons of Using a Free Serial Keygen for Acme ID Card Maker 5.0</h2> -<p>Using a free serial keygen for Acme ID Card Maker might seem like a good idea, but it also comes with some drawbacks. Here are some of the pros and cons of using one:</p> - <h3>Pros:</h3> -<ul> -<li>You can save money by not paying for a license.</li> -<li>You can unlock all the features of the program and use it without limitations.</li> -<li>You can use the program as long as you want without worrying about expiration dates.</li> -</ul> - <h3>Cons:</h3> -<ul> -<li>You risk downloading malware that can infect your computer and steal your data.</li> -<li>You violate the copyright law and the terms of service of the software developer, which can result in legal consequences or penalties.</li> -<li>You compromise the quality and security of the software, which might not work properly or cause errors.</li> -<li>You deprive the software developer of their rightful income, which can affect their ability to maintain and improve the software.</li> -<li>You act unethically by using someone else's work without permission or compensation.</li> -</ul> - <h2>Alternatives to Acme ID Card Maker 5.0</h2> -<p>If you are not comfortable with using a free serial keygen for Acme ID Card Maker, or you want to try other options, there are some alternatives to consider. Here are some of them:</p> - <h3>Business Card Maker</h3> -<p>This is another software program that allows you to create and print various types of cards, including ID cards, business cards, membership cards, loyalty cards, etc. It has a user-friendly interface and a large collection of templates and design elements. You can also import your own images and logos and customize the layout and colors. You can print your cards on any paper or cardstock, or save them as PDF or image files.</p> -<p>Business Card Maker is not free, but it offers a 30-day trial version that you can use to test its features. The full version costs $27 for a single user license or $47 for a multi-user license. You can download it from <a href="https://business-card-maker.com/">https://business-card-maker.com/</a>.</p> - <h3>Easy Bulk ID Card Creator</h3> -<p>This is a web-based tool that lets you create and print multiple ID cards at once. You can upload your own data and photos from a CSV file or an Excel spreadsheet, or enter them manually. You can also choose from various templates or create your own design with different fonts, colors, shapes, backgrounds, etc. You can preview your cards before printing them on any printer or saving them as PDF files.</p> -<p>Easy Bulk ID Card Creator is free to use for up to 10 cards per month. If you need more cards, you can upgrade to a premium plan that costs $9.99 per month for up to 100 cards, $19.99 per month for up to 500 cards, or $29.99 per month for unlimited cards. You can access it from <a href="https://easybulkidcardcreator.com/">https://easybulkidcardcreator.com/</a>.</p> - <h3>Advanced ID Creator Personal</h3> -<p>This is another software program that enables you to design and print your own ID cards with ease. You can select from over 200 templates or create your own design with various fonts, colors, shapes, logos, images, barcodes, etc. You can also import your own data and photos from a database or a text file, or enter them manually. You can print your cards on any printer or save them as PDF files.</p> -<p>Advanced ID Creator Personal is free to use for personal purposes only. If you want to use it for commercial purposes, you need to purchase a license that costs $54.95 for a single user license or $99.95 for a multi-user license. You can download it from <a href="https://www.advancedidcreator.com/">https://www.advancedidcreator.com/</a>.</p> - <h2>Conclusion</h2> -<p>In conclusion, Acme ID Card Maker is a software program that allows you to create and print professional-looking ID cards for yourself or your business. However, it is not free and requires a license fee to use the full version of the program. If you don't want to pay for a license, you might be tempted to look for a free serial keygen that can generate a valid activation code for you.</p> -<p>However, using a free serial keygen for Acme ID Card Maker is not only illegal but also risky, as you might end up downloading malware, violating the law, compromising the software quality and security, depriving the software developer of their income, and acting unethically. Therefore, we do not recommend using a free serial keygen for Acme ID Card Maker.</p> -<p>If you are looking for alternatives to Acme ID Card Maker, you can try other software programs such as Business Card Maker, Easy Bulk ID Card Creator, or Advanced ID Creator Personal. These programs offer similar features and functionality as Acme ID Card Maker but have different pricing plans and terms of use.</p> - <h2>Frequently Asked Questions</h2> -<ol> -<li><b>What is Acme ID Card Maker?</b><br>Acme ID Card Maker is a software program that allows you to design and print your own ID cards with ease.</li> -<li><b>What is a serial keygen?</b><br>A serial keygen is a software tool that can create random serial numbers that can be used to register or unlock a program.</li> -<li><b>How can I get a free serial keygen for Acme ID Card Maker?</b><br>You can get a free serial keygen for Acme ID Card Maker by using an online generator, an npm package, or a crack file.</li> -<li><b>What are the pros and cons of using a free serial keygen for Acme ID Card Maker?</b><br>The pros are that you can save money, unlock all features, and use the program without limitations. The cons are that you risk malware infections, legal issues, ethical concerns, software quality and security problems.</li> -<li><b>What are some alternatives to Acme ID Card Maker?</b><br>Some alternatives to Acme ID Card Maker are Business Card Maker, Easy Bulk ID Card Creator, and Advanced ID Creator Personal.</li> -</ol> - </p> 0a6ba089eb<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Cash Register Express Version 12.5 Crack Easy to Use and Reliable.md b/spaces/raedeXanto/academic-chatgpt-beta/Cash Register Express Version 12.5 Crack Easy to Use and Reliable.md deleted file mode 100644 index ee065bd4650066dcfb5699d55f80fe0f7a107b1a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Cash Register Express Version 12.5 Crack Easy to Use and Reliable.md +++ /dev/null @@ -1,150 +0,0 @@ - -<h1>Cash Register Express Version 12.5 Crack: What You Need to Know</h1> -<p>If you are looking for a way to use Cash Register Express version 12.5 without paying for it, you might have come across some websites that offer a crack for this software. But before you download and install it, you should know what a crack is, why people use it, and what are the risks and consequences of using it. In this article, we will explain everything you need to know about Cash Register Express version 12.5 crack and provide some alternatives to using it.</p> -<h2>cash register express version 12.5 crack</h2><br /><p><b><b>DOWNLOAD</b> &#8230; <a href="https://tinourl.com/2uL3Op">https://tinourl.com/2uL3Op</a></b></p><br /><br /> - <h2>Introduction</h2> -<h3>What is Cash Register Express?</h3> -<p>Cash Register Express (CRE) is a retail point of sale (POS) software system that helps you manage your inventory, sales, customers, employees, and reports. It is designed for various types of retail stores, such as convenience stores, grocery stores, liquor stores, gift shops, car washes, and more. It can work with various hardware devices, such as barcode scanners, receipt printers, cash drawers, scales, and credit card readers. It can also integrate with other software applications, such as QuickBooks, Sage 50, and pcAmerica Restaurant Pro Express.</p> -<p>Cash Register Express is developed by pcAmerica, a company that has been providing POS solutions since 1985. The latest version of Cash Register Express is 12.5, which was released in 2012. It has many features and improvements over the previous versions, such as:</p> -<ul> -<li>A new user interface that is more intuitive and user-friendly</li> -<li>A new labor scheduler that helps you plan your staff shifts and payroll</li> -<li>A new customer loyalty program that allows you to reward your customers with points and discounts</li> -<li>A new gift card module that lets you sell and redeem gift cards</li> -<li>A new inventory matrix that allows you to track items with multiple attributes, such as size, color, style, etc.</li> -<li>A new multi-store module that allows you to manage multiple locations from one central database</li> -<li>A new e-commerce module that allows you to sell your products online through your own website or third-party platforms</li> -<li>And many more...</li> -</ul> -<p>Cash Register Express is a powerful and versatile software system that can help you run your retail business more efficiently and profitably. However, it is not a cheap software system. The price of Cash Register Express depends on the number of licenses, modules, and hardware devices you need. According to the official website of pcAmerica, the price range of Cash Register Express is from $599 to $4,999 per license.</p> - <h3>What is a crack?</h3> -<p>A crack is a type of software modification that bypasses or removes the copy protection or activation mechanism of another software program. A crack can be a file that replaces or modifies the original executable file of the program, or a code that generates a valid serial number or license key for the program. A crack can also be a combination of both.</p> -<p>The purpose of a crack is to allow the user to use the program without paying for it or without following the terms and conditions of the license agreement. A crack can also enable the user to access some features or functions that are otherwise restricted or disabled by the program.</p> - <h3>Why do people use cracks?</h3> -<p>People use cracks for various reasons, but the most common ones are:</p> -<ul> -<li>To save money: Some people use cracks because they cannot afford or do not want to pay for the program they want to use.</li> -<li>To test the program: Some people use cracks because they want to try the program before buying it or because they want to compare it with other similar programs.</li> -<li>To bypass restrictions: Some people use cracks because they want to use the program in a way that is not allowed or supported by the program itself or by the law.</li> -</ul> - <h2>Risks and Consequences of Using Cracks</h2> -<h3>Legal issues</h3> -<p>Using cracks is illegal in most countries and jurisdictions. It violates the intellectual property rights of the software developers and publishers who own the program. It also breaches the license agreement that you agree to when you install the program.</p> -<p>Using cracks can expose you to legal actions from the software owners or authorities. You can face civil lawsuits that can result in fines or damages. You can also face criminal charges that can result in imprisonment or community service.</p> -<p>cash register express 12.5 serials generator<br /> -cash register express 12.5 keygen<br /> -cash register express 12.5 activation code<br /> -cash register express 12.5 free trial download<br /> -cash register express 12.5 full version crack<br /> -cash register express 12.5 patch<br /> -cash register express 12.5 license key<br /> -cash register express 12.5 retail pos software<br /> -cash register express 12.5 inventory management<br /> -cash register express 12.5 review<br /> -cash register express 12.5 manual<br /> -cash register express 12.5 support<br /> -cash register express 12.5 update<br /> -cash register express 12.5 features<br /> -cash register express 12.5 system requirements<br /> -cash register express 12.5 price<br /> -cash register express 12.5 demo<br /> -cash register express 12.5 tutorial<br /> -cash register express 12.5 installation guide<br /> -cash register express 12.5 user guide<br /> -cash register express 12.5 error codes<br /> -cash register express 12.5 database error<br /> -cash register express 12.5 kitchen printer setup<br /> -cash register express 12.5 barcode scanner setup<br /> -cash register express 12.5 credit card processing<br /> -cash register express 12.5 gift card setup<br /> -cash register express 12.5 loyalty program setup<br /> -cash register express 12.5 coupon code setup<br /> -cash register express 12.5 discount setup<br /> -cash register express 12.5 tax setup<br /> -cash register express 12.5 refund policy setup<br /> -cash register express 12.5 customer display setup<br /> -cash register express 12.5 employee management<br /> -cash register express 12.5 labor scheduler<br /> -cash register express 12.5 time clock<br /> -cash register express 12.5 payroll reports<br /> -cash register express 12.5 sales reports<br /> -cash register express 12.5 inventory reports<br /> -cash register express 12.5 profit and loss reports<br /> -cash register express 12.5 security settings<br /> -cash register express 12.5 backup and restore data<br /> -cash register express 12.5 network setup<br /> -cash register express 12.5 multi-store setup<br /> -cash register express 12.5 cloud-based access<br /> -cash register express 12.5 integration with other software<br /> -cash register express 12.5 hardware compatibility list<br /> -cash register express 12.5 troubleshooting tips<br /> -cash register express version comparison chart</p> - <h3>Security threats</h3> -<p>Using cracks can expose you to security risks from malicious software or hackers. Many cracks are infected with viruses, malware, spyware, ransomware, trojans, worms, keyloggers, rootkits, or other harmful programs that can damage your computer system or steal your personal information.</p> -<p>Using cracks can also expose you to hacking attacks from other users who have access to your computer network or online accounts. They can exploit the vulnerabilities or backdoors created by the cracks to gain unauthorized access to your data or resources.</p> - <h3>Performance problems</h3> -<p>Using cracks can affect the performance and functionality of your computer system or software program. Many cracks are poorly designed or incompatible with your system specifications or software updates. They can cause errors, crashes, freezes, glitches, bugs, conflicts, or other problems that can interfere with your work or enjoyment.</p> -<p>Using cracks can also prevent you from receiving technical support or customer service from the software developers or providers. They can detect if you are using a cracked version of their program and deny you any assistance or warranty.</p> - <h3>Ethical concerns</h3> -<p>Using cracks can raise ethical questions about your integrity and responsibility as a user and consumer. By using cracks, you are depriving the software developers and publishers of their rightful income and recognition for their work and investment. You are also undermining their incentive and ability to improve their products and services.</p> -<p>Using cracks can also affect other users and consumers who pay for their software programs legitimately. You are creating an unfair competition and market distortion that can reduce the quality and availability of software products and services.</p> - <h2>Alternatives to Using Cracks</h2> -<h3>Buy a legitimate license</h3> -<p>The best alternative to using cracks is to buy a legitimate license for Cash Register Express version 12.5 from pcAmerica or its authorized resellers. This way, you can enjoy all the benefits and features of this software system without any risks or consequences.</p> -<p>You can also save money by choosing a license option that suits your needs and budget. For example:</p> -<ul> -<li>You can buy a single-user license if you only need one computer station for your store.</li> -<li>You can buy a multi-user license if you need more than one computer station for your store.</li> -<li>You can buy a multi-store license if you need to manage multiple locations from one central database.</li> -<li>You can buy additional modules if you need more functionality for your store.</li> -<li>You can buy hardware devices if you need them for your store.</li> -</ul> - <h3>Use a free or open source software</h3> -<p>If you cannot afford or do not want to buy Cash Register Express version 12.5 license, you can look for other free or open source software programs that offer similar features and functions for retail point of sale systems.</p> - <p>Free software programs are those that are available at no cost for anyone to download and use without any restrictions. Open source software programs are those that have their source code available for anyone to inspect, modify, or distribute without any restrictions. Some examples of free or open source software programs for retail point of sale systems are:</p> - <ul><li><a href="https://www.chromis.co.uk/">Chromis POS</a>: A free POS system based on Java that works with Windows, Linux, or Mac OS X. It supports multiple languages, currencies, taxes, and payment methods. It also integrates with barcode scanners, receipt printers, cash drawers, <h3>Use a trial or demo version</h3> -<p>If you want to try Cash Register Express version 12.5 before buying it, you can download and install a trial or demo version from the official website of pcAmerica. The trial or demo version allows you to use the software system for a limited period of time or with limited features and functions.</p> -<p>The trial or demo version can help you evaluate the software system and decide if it meets your needs and expectations. However, you should not use the trial or demo version for commercial purposes or beyond the allowed period or scope. You should also uninstall the trial or demo version after you finish using it.</p> - <h2>Conclusion</h2> -<p>Cash Register Express version 12.5 is a retail point of sale software system that can help you manage your inventory, sales, customers, employees, and reports. It is a powerful and versatile software system that can work with various types of retail stores and hardware devices. It is also a costly software system that requires a license to use legally and properly.</p> -<p>A crack is a type of software modification that bypasses or removes the copy protection or activation mechanism of another software program. A crack can allow you to use Cash Register Express version 12.5 without paying for it or without following the terms and conditions of the license agreement. However, using a crack can also expose you to legal issues, security threats, performance problems, and ethical concerns.</p> -<p>Therefore, using cracks is not a good idea and not worth the risks and consequences. Instead, you should consider some alternatives to using cracks, such as buying a legitimate license, using a free or open source software, or using a trial or demo version. These alternatives can help you use Cash Register Express version 12.5 or similar software systems in a safe and legal way.</p> - <h2>FAQs</h2> -<ul> -<li><b>Q: Where can I buy Cash Register Express version 12.5 license?</b></li> -<li>A: You can buy Cash Register Express version 12.5 license from pcAmerica or its authorized resellers. You can visit their official website at <a href="https://www.pcamerica.com/">https://www.pcamerica.com/</a> for more information.</li> -<li><b>Q: How much does Cash Register Express version 12.5 license cost?</b></li> -<li>A: The price of Cash Register Express version 12.5 license depends on the number of licenses, modules, and hardware devices you need. According to the official website of pcAmerica, the price range of Cash Register Express is from $599 to $4,999 per license.</li> -<li><b>Q: What are the system requirements for Cash Register Express version 12.5?</b></li> -<li>A: The system requirements for Cash Register Express version 12.5 are:</li> -<ul> -<li>Operating system: Windows XP SP3, Windows Vista SP1, Windows 7 SP1, Windows 8/8.1, Windows 10</li> -<li>Processor: Intel Pentium 4 2 GHz or higher</li> -<li>Memory: 2 GB RAM or higher</li> -<li>Hard disk space: 20 GB free space or higher</li> -<li>Display: 1024 x 768 resolution or higher</li> -<li>Internet connection: Required for activation and updates</li> -</ul> -<li><b>Q: What are some features of Cash Register Express version 12.5?</b></li> -<li>A: Some features of Cash Register Express version 12.5 are:</li> -<ul> -<li>A new user interface that is more intuitive and user-friendly</li> -<li>A new labor scheduler that helps you plan your staff shifts and payroll</li> -<li>A new customer loyalty program that allows you to reward your customers with points and discounts</li> -<li>A new gift card module that lets you sell and redeem gift cards</li> -<li>A new inventory matrix that allows you to track items with multiple attributes, such as size, color, style, etc.</li> -<li>A new multi-store module that allows you to manage multiple locations from one central database</li> -<li>A new e-commerce module that allows you to sell your products online through your own website or third-party platforms</li> -<li>And many more...</li> -</ul> -<li><b>Q: What are some risks and consequences of using cracks?</b></li> -<li>A: Some risks and consequences of using cracks are:</li> -<ul> -<li>Legal issues: You can face civil lawsuits or criminal charges from the software owners or authorities for violating their intellectual property rights or license agreement.</li> -<li>Security threats: You can get infected with viruses, malware, spyware, ransomware, trojans, worms, keyloggers, rootkits, or other harmful programs that can damage your computer system or steal your personal information.</li> -<li>Performance problems: You can experience errors, crashes, freezes, glitches, bugs, conflicts, or other problems that can interfere with your work or enjoyment.</li> -<li>Ethical concerns: You can raise ethical questions about your integrity and responsibility as a user and consumer by depriving the software developers and publishers of their rightful income and recognition for their work and investment.</li> -</ul> - </p> 0a6ba089eb<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Descarga Gratis el PDF de Analisis E Interpretacion De Estados Financieros por Abraham Perdomo Moreno.md b/spaces/raedeXanto/academic-chatgpt-beta/Descarga Gratis el PDF de Analisis E Interpretacion De Estados Financieros por Abraham Perdomo Moreno.md deleted file mode 100644 index 1ddf9da53b8c71e826c06af03e630b5611aa90fe..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Descarga Gratis el PDF de Analisis E Interpretacion De Estados Financieros por Abraham Perdomo Moreno.md +++ /dev/null @@ -1,152 +0,0 @@ - -<h1>Analisis E Interpretacion De Estados Financieros Abraham Perdomo Moreno PDF</h1> - <p>Los estados financieros son documentos que reflejan la situacion economica y financiera de una empresa en un periodo determinado. Son herramientas indispensables para la toma de decisiones de los administradores, inversionistas, acreedores y demas interesados en el desempeno de la organizacion.</p> - <p>Pero para que los estados financieros sean utiles, no basta con elaborarlos y presentarlos. Es necesario analizarlos e interpretarlos correctamente, aplicando tecnicas y metodos que permitan extraer informacion relevante y valiosa sobre la rentabilidad, liquidez, solvencia, eficiencia y crecimiento de la empresa.</p> -<h2>Analisis E Interpretacion De Estados Financieros Abraham Perdomo Moreno PDF</h2><br /><p><b><b>Download Zip</b> &#9999; <a href="https://tinourl.com/2uL3Ta">https://tinourl.com/2uL3Ta</a></b></p><br /><br /> - <p>En este articulo, te presentamos un libro que te ayudara a dominar el analisis e interpretacion de estados financieros de forma practica y sencilla. Se trata del libro "Analisis e Interpretacion de Estados Financieros" de Abraham Perdomo Moreno, un reconocido autor y experto en el tema. Te contaremos quien es Abraham Perdomo Moreno, que contiene su libro, como puedes descargarlo en PDF gratis y que beneficios te ofrece. Sigue leyendo y descubre todo lo que este libro puede hacer por ti.</p> - <h2>Introduccion</h2> - <h3>Que son los estados financieros y para que sirven</h3> - <p>Los estados financieros son reportes que muestran la situacion economica y financiera de una empresa en un periodo determinado. Los principales estados financieros son el balance general, el estado de resultados, el estado de cambios en el patrimonio, el estado de flujos de efectivo y las notas a los estados financieros.</p> -<p>Libro de Analisis E Interpretacion de Estados Financieros de Abraham Perdomo Moreno<br /> -Descargar Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno PDF Gratis<br /> -Resumen de Analisis E Interpretacion de Estados Financieros por Abraham Perdomo Moreno<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno 7ma Edicion<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Google Books<br /> -Solucionario de Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Ejercicios Resueltos de Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Editorial Thomson<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno ISBN 9706862633<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno 288 Paginas<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Economico Administrativas<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno VI Ciclo<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Tecnicas de Analisis<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Estructura Financiera<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Razones o Ratios Financieros<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Valores Absolutos y Relativos<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Rendimiento de los Recursos<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Cuadro de Mando de Dupont<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Activos o Inversiones Circulantes<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Inmovilizaciones o Activos Fijos<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Deudas a Corto Plazo<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Capitales Permanentes<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Análisis Horizontal y Vertical<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Utilización de Razones Financieras<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Partes Interesadas<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Tipos de Comparaciones<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Razones Financieras Básicas<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Caso de Estudio<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Análisis y Discusión<br /> -Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Resultado de las Operaciones y Situación Financiera<br /> -Comprar Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Online<br /> -Leer Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno en Linea<br /> -Opiniones sobre Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Reseña del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Indice del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Contenido del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Bibliografia del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno<br /> -Autor del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Biografia<br /> -Editorial del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Historia<br /> -Año del Libro Analisis E Interpretacion de Estados Financieros Abraham Perdomo Moreno Publicación</p> - <p>Los estados financieros sirven para:</p> - <ul> -<li>Mostrar la realidad financiera de la empresa a los administradores, inversionistas, acreedores y demas interesados.</li> -<li>Evaluar el desempeno historico y actual de la empresa en terminos de rentabilidad, liquidez, solvencia, eficiencia y crecimiento.</li> -<li>Proyectar el futuro financiero de la empresa y establecer metas y planes de accion.</li> -<li>Comparar la situacion financiera de la empresa con la de otras empresas del mismo sector o del mercado en general.</li> -<li>Cumplir con las obligaciones legales y tributarias de la empresa.</li> -</ul> - <h3>Que es el analisis e interpretacion de estados financieros y por que es importante</h3> - <p>El analisis e interpretacion de estados financieros es el proceso de examinar los datos contenidos en los estados financieros con el fin de extraer informacion relevante y valiosa sobre la situacion financiera de la empresa. El analisis e interpretacion de estados financieros se basa en la aplicacion de tecnicas y metodos como:</p> - <ul> -<li>El analisis vertical, que consiste en expresar cada partida del estado financiero como un porcentaje del total o del subtotal correspondiente.</li> -<li>El analisis horizontal, que consiste en comparar las partidas del estado financiero entre dos o mas periodos consecutivos o no consecutivos.</li> -<li>El analisis por ratios o razones financieras, que consiste en calcular indicadores que relacionan dos o mas partidas del estado financiero para medir aspectos como la rentabilidad, liquidez, solvencia, eficiencia y crecimiento.</li> -<li>El analisis por tendencias, que consiste en identificar patrones o comportamientos recurrentes o cambiantes en las partidas del estado financiero a lo largo del tiempo.</li> -<li>El analisis por comparacion, que consiste en contrastar las partidas del estado financiero con las de otras empresas del mismo sector o del mercado en general.</li> -</ul> - <p>El analisis e interpretacion de estados financieros es importante porque:</p> - <ul> -<li>Permite conocer la situacion financiera real de la empresa y detectar sus fortalezas y debilidades.</li> -<li>Ayuda a tomar decisiones acertadas sobre la gestion financiera de la empresa y mejorar su desempeno.</li> -<li>Facilita el control financiero de la empresa y la prevencion o solucion de problemas potenciales.</li> -<li>Aporta informacion util para la negociacion con proveedores, clientes, bancos y demas agentes externos.</li> -<li>Aumenta la confianza y credibilidad de la empresa ante los inversionistas, acreedores y demas interesados.</li> -</ul> - <h2>El libro de Abraham Perdomo Moreno</h2> - <h3>Quien es Abraham Perdomo Moreno y que experiencia tiene</h3> - <p>Abraham Perdomo Moreno es un contador publico titulado por la Universidad Nacional Autonoma de Mexico (UNAM) con mas de 40 anos de experiencia profesional. Ha sido profesor titular e investigador en diversas universidades e instituciones educativas como el Instituto Tecnologico Autonomo de Mexico (ITAM), el Instituto Tecnologico y de Estudios Superiores de Monterrey (ITESM), el Instituto Mexicano de Contadores Publicos (IMCP) y el Colegio Nacional de Educación Profesional Técnica (CONALEP).</p> - <p>Tambien ha sido consultor independiente en materia contable, financiera, fiscal y administrativa para empresas publicas y privadas. Ha participado como ponente y conferencista en diversos foros nacionales e internacionales sobre temas relacionados con su especialidad. Ha publicado varios libros y articulos sobre contabilidad, finanzas, impuestos, auditoria y administración.</p> - <h3>Que contiene el libro y como esta organizado</h3> - <p>El libro "Analisis e Interpretacion de Estados Financieros" es una obra que comprende el desarrollo de las tecnicas mas usuales para el analisis de los estados financieros. En el se incluyen las tecnicas de analisis basados en las situaciones de la estructura financiera, los metodos comparativos basados en porcentajes y numeros indice, asi como los ratios o razones financieras para luego considerar los aspectos relacionados con las tendencias, la comparación interempresarial y los indicadores no financieros.</p> - <p>El libro esta organizado en 10 capitulos que abordan los siguientes temas:</p> - <ol> -<li>Concepto e importancia del analisis e interpretacion de estados financieros.</li> -<li>Estructura basica de los estados financieros basicos: balance general, estado de resultados, estado de cambios en el patrimonio, estado de flujos de efectivo y notas a los estados financieros.</li> -<li>Tecnicas de analisis vertical: concepto, objetivo, metodologia, ejemplos y aplicaciones.</li> -<li>Tecnicas de analisis horizontal: concepto, objetivo, metodologia, ejemplos y aplicaciones.</li> -<li>Ratios o razones financieras: concepto, clasificacion, formulas, interpretacion, ejemplos y aplicaciones.</li> -<li>Analisis por tendencias: concepto, objetivo, metodologia, ejemplos <h3>Que beneficios ofrece el libro y a quien va dirigido</h3> - <p>El libro "Analisis e Interpretacion de Estados Financieros" ofrece los siguientes beneficios:</p> - <ul> -<li>Explica de forma clara y sencilla los conceptos, tecnicas y metodos para el analisis e interpretacion de estados financieros.</li> -<li>Proporciona ejemplos practicos y casos reales de empresas mexicanas e internacionales para ilustrar la aplicacion de las tecnicas y metodos.</li> -<li>Incluye ejercicios resueltos y propuestos para reforzar el aprendizaje y la autoevaluacion.</li> -<li>Actualiza la informacion contable y financiera de acuerdo con las normas internacionales de informacion financiera (NIIF).</li> -<li>Presenta un enfoque integral que abarca tanto el analisis financiero como el analisis no financiero de la empresa.</li> -</ul> - <p>El libro va dirigido a:</p> - <ul> -<li>Estudiantes de contabilidad, finanzas, administracion y carreras afines que quieran aprender o profundizar en el analisis e interpretacion de estados financieros.</li> -<li>Profesores de contabilidad, finanzas, administracion y carreras afines que quieran utilizar el libro como material de apoyo o referencia para sus clases.</li> -<li>Profesionales de contabilidad, finanzas, administracion y carreras afines que quieran actualizar o mejorar sus conocimientos y habilidades en el analisis e interpretacion de estados financieros.</li> -<li>Empresarios, inversionistas, acreedores y demas interesados en el desempeno financiero de las empresas que quieran aplicar el analisis e interpretacion de estados financieros para tomar mejores decisiones.</li> -</ul> - <h2>Como descargar el libro en PDF gratis</h2> - <h3>Los requisitos para acceder al libro en PDF</h3> - <p>Para descargar el libro en PDF gratis, necesitas cumplir con los siguientes requisitos:</p> - <ul> -<li>Tener una conexion a internet estable y segura.</li> -<li>Tener un dispositivo electronico compatible con el formato PDF, como una computadora, una tablet o un celular.</li> -<li>Tener un programa o aplicacion que te permita abrir y leer archivos PDF, como Adobe Reader, Google Chrome o Microsoft Edge.</li> -<li>Tener espacio suficiente en tu dispositivo electronico para almacenar el archivo PDF del libro.</li> -</ul> - <h3>Los pasos para descargar el libro en PDF</h3> - <p>Para descargar el libro en PDF gratis, debes seguir los siguientes pasos:</p> - <ol> -<li>Ingresa al sitio web https://contabilidadparatodos.com/libro-interpretacion-estados-financieros/ desde tu navegador preferido.</li> -<li>Desplazate hasta el final de la pagina y haz clic en el boton verde que dice "Descargar Libro".</li> -<li>Espera a que se abra una nueva ventana con el archivo PDF del libro.</li> -<li>Haz clic en el icono de descarga que aparece en la esquina superior derecha de la ventana o en la barra inferior del navegador.</li> -<li>Selecciona la carpeta o ubicacion donde quieres guardar el archivo PDF del libro en tu dispositivo electronico.</li> -<li>Haz clic en "Guardar" o "Aceptar" para confirmar la descarga.</li> -<li>Ahora puedes abrir y leer el archivo PDF del libro desde tu dispositivo electronico cuando quieras.</li> -</ol> - <h3>Las precauciones al descargar el libro en PDF</h3> - <p>Al descargar el libro en PDF gratis, debes tener en cuenta las siguientes precauciones:</p> - <ul> -<li>Asegurate de que el sitio web desde donde descargas el libro sea confiable y seguro. Evita los sitios web que te pidan datos personales, contrasenas o pagos para acceder al libro.</li> -<li>Asegurate de que el archivo PDF del libro sea original y completo. Evita los archivos PDF que esten incompletos, corruptos, modificados o infectados con virus.</li> -<li>Asegurate de respetar los derechos de autor del libro. Evita copiar, distribuir o comercializar el libro sin la autorizacion del autor o del editor. Recuerda que el libro es solo para uso personal y educativo.</li> -</ul> - <h2>Conclusion</h2> - <h3>Resumen de los puntos principales del articulo</h3> - <p>En este articulo, te hemos presentado un libro que te ayudara a dominar el analisis e interpretacion de estados financieros de forma practica y sencilla. Se trata del libro "Analisis e Interpretacion de Estados Financieros" de Abraham Perdomo Moreno, un reconocido autor y experto en el tema. Te hemos contado quien es Abraham Perdomo Moreno, que contiene su libro, como puedes descargarlo en PDF gratis y que beneficios te ofrece. Esperamos que este articulo te haya sido util e interesante.</p> - <h3>Recomendacion del libro y llamado a la accion</h3> - <p>Si quieres aprender o mejorar tus conocimientos y habilidades en el analisis e interpretacion de estados financieros, te recomendamos que descargues y leas el libro "Analisis e Interpretacion de Estados Financieros" de Abraham Perdomo Moreno. Estamos seguros de que este libro te aportara valiosa informacion y orientacion para entender y aplicar las tecnicas y metodos mas usuales para el analisis de los estados financieros. No esperes mas y descarga el libro ahora mismo desde este link: https://contabilidadparatodos.com/libro-interpretacion-estados-financieros/ . No te arrepentiras!</p> - <h3>FAQs</h3> - <p>A continuacion, te presentamos algunas preguntas frecuentes sobre el libro "Analisis e Interpretacion de Estados Financieros" de Abraham Perdomo Moreno:</p> - <ol> -<li><b>Pregunta:</b> ¿En que ano se publico la primera edicion del libro?</li> -<li><b>Respuesta:</b> La primera edicion del libro se publico en 1978 por la editorial ECASA.</li> -<li><b>Pregunta:</b> ¿Cuantas paginas tiene el libro?</li> -<li><b>Respuesta:</b> El libro tiene 288 paginas en su ultima edicion publicada en 2000 por la editorial International Thomson Editores.</li> -<li><b>Pregunta:</b> ¿El libro esta actualizado con las normas internacionales de informacion financiera (NIIF)?</li> -<li><b>Respuesta:</b> Si, el libro esta actualizado con las NIIF vigentes hasta la fecha de su publicacion.</li> -<li><b>Pregunta:</b> ¿El libro incluye ejercicios resueltos y propuestos?</li> -<li><b>Respuesta:</b> Si, el libro incluye ejercicios resueltos y propuestos al final de cada capitulo para reforzar el aprendizaje y la autoevaluacion.</li> -<li><b>Pregunta:</b> ¿El libro se puede conseguir en formato fisico?</li> -<li><b>Respuesta:</b> Si, el libro se puede conseguir en formato fisico en algunas librerias especializadas o por internet. Sin embargo, puede ser dificil encontrarlo debido a su antiguedad o escasez. Por eso te recomendamos que lo descargues en formato PDF gratis desde este link: https://contabilidadparatodos.com/libro-interpretacion-estados-financieros/ .</li> -</ol> - </p> 0a6ba089eb<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/ramiin2/AutoGPT/tests/smoke_test.py b/spaces/ramiin2/AutoGPT/tests/smoke_test.py deleted file mode 100644 index 1b9d643fc21f3703384a2bb4f2bd1d725f4dd418..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/tests/smoke_test.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Smoke test for the autogpt package.""" -import os -import subprocess -import sys - -import pytest - -from autogpt.commands.file_operations import delete_file, read_file - - -@pytest.mark.integration_test -def test_write_file() -> None: - """ - Test case to check if the write_file command can successfully write 'Hello World' to a file - named 'hello_world.txt'. - - Read the current ai_settings.yaml file and store its content. - """ - env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"} - ai_settings = None - if os.path.exists("ai_settings.yaml"): - with open("ai_settings.yaml", "r") as f: - ai_settings = f.read() - os.remove("ai_settings.yaml") - - try: - if os.path.exists("hello_world.txt"): - # Clean up any existing 'hello_world.txt' file before testing. - delete_file("hello_world.txt") - # Prepare input data for the test. - input_data = """write_file-GPT -an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task. -Use the write_file command to write 'Hello World' into a file named "hello_world.txt". -Use the task_complete command to complete the task. -Do not use any other commands. - -y -5 -EOF""" - command = f"{sys.executable} -m autogpt" - - # Execute the script with the input data. - process = subprocess.Popen( - command, - stdin=subprocess.PIPE, - shell=True, - env={**os.environ, **env_vars}, - ) - process.communicate(input_data.encode()) - - # Read the content of the 'hello_world.txt' file created during the test. - content = read_file("hello_world.txt") - finally: - if ai_settings: - # Restore the original ai_settings.yaml file. - with open("ai_settings.yaml", "w") as f: - f.write(ai_settings) - - # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'. - assert content == "Hello World", f"Expected 'Hello World', got {content}" diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blur (RUSENG) RePack By R.G Mechanics NASWARI ZOHAIB Serial Key LINK Keygen.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blur (RUSENG) RePack By R.G Mechanics NASWARI ZOHAIB Serial Key LINK Keygen.md deleted file mode 100644 index 2f1129d35bc19ee1c6e2eb12e0f22b007b232c65..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blur (RUSENG) RePack By R.G Mechanics NASWARI ZOHAIB Serial Key LINK Keygen.md +++ /dev/null @@ -1,10 +0,0 @@ -<h2>Blur (RUS|ENG) RePack By R.G Mechanics NASWARI ZOHAIB Serial Key Keygen</h2><br /><p><b><b>DOWNLOAD</b> &#128279; <a href="https://urlgoal.com/2uCL0w">https://urlgoal.com/2uCL0w</a></b></p><br /><br /> - -. -blur-rus-eng-repack-by-rg-mechanics-naswari-zohaib-serial-key-keygen-dougenr -v1.0-activator.jpg -blur-rus-eng-repack-by-rg-mechanics-naswari -zohaib-serial-key-keygen-dougenr-v1.0-activator.jpg. -Title: Series, Locke's Keys - 12 episodes out of 12. -Keys of Locke. -The Conjuring series watch online. -Download the series The Conjuring 1, 2, 3, 4 season for free via torrent all episodes in good quality hd 1080 will be available after the release in the cinema, which is scheduled for November 13, 2013 Night falls and evil descends on the city. 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Clip Studio Paint EX 1.9.7 Crack Key 2020 Download __HOT__.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Clip Studio Paint EX 1.9.7 Crack Key 2020 Download __HOT__.md deleted file mode 100644 index d9276559745dd049cb119d9470a7e1107cdfd93e..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Clip Studio Paint EX 1.9.7 Crack Key 2020 Download __HOT__.md +++ /dev/null @@ -1,18 +0,0 @@ -<h2>Clip Studio Paint EX 1.9.7 Crack Key 2020 Download</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://urlgoal.com/2uCMbG">https://urlgoal.com/2uCMbG</a></b></p><br /><br /> -<br /> -D.T. & Studio Recorderis the best tool for recording audio. It allows you to create the professional-sounding audio file for the presentation. With this app, you can record sound, audio for the presentation, voice, and then edit the audio file. It can also save the audio file to your music library or choose to save it on your PC. The application contains all the necessary features to edit and record sound and audio. - -A small and handy app to record audio from the microphone, CD, computer, and more. Just select your sound source and click the Start button. Easy and quick! You can record all the sound that is present in your room, from your computer, from a source outside the home, or even from the microphone. Audio recorder has an impressive set of features. - -The Clip Studio Audio Recorder Crack is the best sound recorder for Windows. With this audio recorder, you can record sound. This application lets you record and play the sound. By using the application, you can select the source of your sound, whether it is from the computer, the microphone, a digital recorder, or the music. By recording your sound, you can use it in the future for any purpose. - -The D3D Audio Recorder is an ideal software tool for recording sound. It records sound from various sound sources, like CD, microphone, network, and more. The application comes with a tool to help you record sound easily. You can easily access the audio recorder from the interface. It is possible to configure the volume and other related parameters. - -Have you ever wished to capture audio from a digital music player, a digital camera, a mobile phone or from an external audio input? Audio Recorder takes over and does it for you. The application has a great feature: You can import media into it as an audio input. You can record any sound you like. You can record a movie or a music. - -This audio recorder has a great function to record sound from your digital music player. This application allows you to record any sound as a digital file. This audio recorder will record the sound from your digital music player in a wav file and save it to your PC. To save the wav file, choose ‘Sound Recorder’ from your system’s ‘Start’ button. - -If you have an Android device or use any Android-powered device, you must install an audio recorder. The Android Audio Recorder is an excellent application that 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Surah As Sajdah Pdf Printer.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Surah As Sajdah Pdf Printer.md deleted file mode 100644 index 19189d39fd3d2d89b33e59b5d28b4714c7817bc3..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Surah As Sajdah Pdf Printer.md +++ /dev/null @@ -1,44 +0,0 @@ -<h2>Download Surah As Sajdah Pdf Printer</h2><br /><p><b><b>Download Zip</b> &#10038;&#10038;&#10038; <a href="https://urlgoal.com/2uCMOp">https://urlgoal.com/2uCMOp</a></b></p><br /><br /> -<br /> -.Best Places to Work - -Our Best Places to Work program is based on the most recent edition of the EEOC's Workplace Fairness Handbook and a best practice manual for human resources professionals. The book contains hundreds of best practices and ideas and reflects the EEOC's vision for how every workplace should be a safe and supportive place to work. - -For managers and supervisors, the EEOC is pleased to recognize the companies that recognize and value their employees as productive workers. The EEOC awards these Best Places to Work only to companies that meet or exceed the qualifications for the program. - -To qualify for consideration, a company must have a minimum of 500 employees (the vast majority must be in the United States), offer employees access to a training program, and offer ongoing training and professional development. - -All Best Places to Work recipients are notified in early April and will be recognized at a reception in April or May, depending on their company size.Q: - -Getting "Document Has No Headers" error with mongodb - -I am trying to connect to mongodb with driver 2.0.0 (Python 2.7) and this line is giving me an error: - -from pymongo import MongoClient, Connections - -print MongoClient(host="localhost",port=27017,database="test") - -What am I doing wrong? - -A: - -If you are using PyMongo version = 2.10, then use this: - -print MongoClient("localhost", 27017, connect_options='user_agent': 'test') - -Use the connect_options dictionary to specify: - -"database": the name of the database you want to use - -"ssl": use SSL for the connection (only for connections to MongoDB Atlas and MongoDB Enterprise) - -"user_agent": a user agent name to specify the software that sent the request - -var searchData= - -[ - - ['_4emixmap_5fiter_5fiter_5f',['_Mixmap_iter_iter',['../structtjva__lib_1_1Mixmap__iter.html#aeeb4d7480 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fm8 Ritma Tallava Virtuales Caligrafia Iberica Mspaint Trailers.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fm8 Ritma Tallava Virtuales Caligrafia Iberica Mspaint Trailers.md deleted file mode 100644 index 1d8076132d127f768ba9c2220faef66fdb8c79f4..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fm8 Ritma Tallava Virtuales Caligrafia Iberica Mspaint Trailers.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Fm8 Ritma Tallava virtuales caligrafia iberica mspaint trailers</h2><br /><p><b><b>Download</b> &#9999; <a href="https://urlgoal.com/2uCL1N">https://urlgoal.com/2uCL1N</a></b></p><br /><br /> - -34b9be2e56. Fm8 Ritma Tallava virtuales caligrafia iberica mspaint trailers · HD Online Player (patron mutlu son istiyor full hd izl) · EaseUS Partition Master 13.8 ... 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/renatotn7/EspacoTeste/README.md b/spaces/renatotn7/EspacoTeste/README.md deleted file mode 100644 index d3394c57eb306b753e7fae7b0cff3090220dda70..0000000000000000000000000000000000000000 --- a/spaces/renatotn7/EspacoTeste/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: EspacoTeste -emoji: 📉 -colorFrom: yellow -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rishi9440/remove-photo-background/README.md b/spaces/rishi9440/remove-photo-background/README.md deleted file mode 100644 index 39c4d94a24ea8c116fe3752caccc373539ed1cb7..0000000000000000000000000000000000000000 --- a/spaces/rishi9440/remove-photo-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Remove Photo Background -emoji: 😻 -colorFrom: green -colorTo: indigo -sdk: streamlit -sdk_version: 1.2.0 -python_version: 3.9.5 -app_file: app.py -pinned: false -duplicated_from: aryadytm/remove-photo-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/riyueyiming/gpt/chatgpt - windows.bat b/spaces/riyueyiming/gpt/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/riyueyiming/gpt/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/DP Technology Esprit 2013 Crack A Step-by-Step Tutorial for Beginners.md b/spaces/rorallitri/biomedical-language-models/logs/DP Technology Esprit 2013 Crack A Step-by-Step Tutorial for Beginners.md deleted file mode 100644 index 0acfd18ca82585cbfa9a999f378c069f8a9f4340..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/DP Technology Esprit 2013 Crack A Step-by-Step Tutorial for Beginners.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>dp technology esprit 2013 crack</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://tinurll.com/2uzlq0">https://tinurll.com/2uzlq0</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/rorallitri/biomedical-language-models/logs/Gili-sms Full Version NEW.md b/spaces/rorallitri/biomedical-language-models/logs/Gili-sms Full Version NEW.md deleted file mode 100644 index bc79bc2e22a8e8f35015197de16fa141c0899eee..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Gili-sms Full Version NEW.md +++ /dev/null @@ -1,135 +0,0 @@ -<br /> -<h1>What You Need to Know About Gili-sms Full Version</h1> -<p>If you are looking for a software that can help you send and receive SMS from your computer or laptop, you might want to check out Gili-sms Full Version. Gili-sms is a software that connects your computer with a modem GSM or phone, and allows you to send and receive SMS easily from its interface. You can send up to 1.600 characters in one SMS, which is 10 times more than the standard limit. In this article, we will tell you more about the features, benefits and specifications of Gili-sms Full Version.</p> - -<h2>Features of Gili-sms Full Version</h2> -<p>Gili-sms Full Version has many features that make it a powerful and versatile software for SMS communication. Some of the features are:</p> -<h2>Gili-sms Full Version</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://tinurll.com/2uznrC">https://tinurll.com/2uznrC</a></b></p><br /><br /> -<ul> -<li>You can send and receive SMS from your computer or laptop, without using your phone.</li> -<li>You can use multiple modems or phones to send and receive SMS simultaneously.</li> -<li>You can send long SMS up to 1.600 characters, which is equivalent to 10 normal SMS.</li> -<li>You can import and export contacts from Excel, CSV or TXT files.</li> -<li>You can create groups of contacts and send SMS to them easily.</li> -<li>You can schedule SMS to be sent at a specific date and time.</li> -<li>You can use templates and variables to personalize your SMS messages.</li> -<li>You can use auto-reply and auto-forward features to manage your incoming SMS.</li> -<li>You can use filters and rules to sort and organize your SMS messages.</li> -<li>You can backup and restore your SMS data securely.</li> -</ul> - -<h2>Benefits of Gili-sms Full Version</h2> -<p>Gili-sms Full Version can be used for various purposes and benefits, such as:</p> -<ul> -<li>You can use it for SMS marketing, to promote your products or services to your customers or prospects.</li> -<li>You can use it for SMS communication, to send information, reminders, notifications or alerts to your clients, partners, members or community.</li> -<li>You can use it for SMS service, to provide support, feedback, assistance or consultation to your users, subscribers or audience.</li> -<li>You can use it for SMS education, to send educational materials, quizzes, assignments or exams to your students, teachers or learners.</li> -<li>You can use it for SMS entertainment, to send jokes, quotes, greetings or trivia to your friends, family or fans.</li> -</ul> - -<h2>Specifications of Gili-sms Full Version</h2> -<p>Gili-sms Full Version is a computer application that can be installed and used on Windows 32-bit and 64-bit operating systems, from Windows 2000, XP to Windows 10. The installation includes the application and the database. Gili-sms Full Version can only be used for one computer and cannot be accessed with a LAN network. The database of Gili-sms Full Version cannot be accessed directly by the user. Gili-sms Full Version can store data up to 99 million contacts and 9 billion SMS messages. Gili-sms Full Version can connect up to 8 modems or phones on one computer.</p> - -<h2>How to Download and Install Gili-sms Full Version</h2> -<p>If you are interested in trying out Gili-sms Full Version, you can download it from the official website of the developer. You will need a key to download the file, which you can get by contacting the developer. You will also need a password to unzip or extract the file, which is Theboegis. After downloading and extracting the file, you can follow the installation and configuration guide that is included in the file. You will also need a modem GSM or phone that is compatible with Gili-sms Full Version, as well as a SIM card that has enough credit or balance for sending SMS.</p> - -<h2>Conclusion</h2> -<p>Gili-sms Full Version is a software that can help you send and receive SMS from your computer or laptop easily and efficiently. It has many features that make it a powerful and versatile software for SMS communication. It can be used for various purposes and benefits, such as marketing, communication, service, education and entertainment. It has some specifications that you need to consider before downloading and installing it on your computer. If you want to try out Gili-sms Full Version, you can download it from the official website of the developer with a key and a password.</p> -<h2>How to Use Gili-sms Full Version</h2> -<p>Gili-sms Full Version is easy to use and has a user-friendly interface. To use Gili-sms Full Version, you need to follow these steps:</p> -<ol> -<li>Install Gili-sms Full Version on your computer or laptop.</li> -<li>Connect your modem GSM or phone to your computer or laptop using a cable or Bluetooth.</li> -<li>Open Gili-sms Full Version and configure the settings, such as the port, the baud rate, the SMS center number and the sender name.</li> -<li>Create a new contact or import contacts from a file.</li> -<li>Type your SMS message or use a template or variable.</li> -<li>Select the recipient or group of recipients.</li> -<li>Click send or schedule your SMS to be sent later.</li> -<li>View the status and report of your SMS messages.</li> -</ol> - -<h2>Advantages of Gili-sms Full Version</h2> -<p>Gili-sms Full Version has many advantages that make it a superior software for SMS communication. Some of the advantages are:</p> -<p></p> -<ul> -<li>You can save time and money by sending and receiving SMS from your computer or laptop, without using your phone.</li> -<li>You can reach more people and increase your response rate by sending long SMS up to 1.600 characters.</li> -<li>You can improve your customer loyalty and satisfaction by sending personalized and customized SMS messages.</li> -<li>You can automate your SMS communication by using auto-reply, auto-forward, schedule, filter and rule features.</li> -<li>You can secure your SMS data by using backup and restore features.</li> -</ul> - -<h2>Testimonials of Gili-sms Full Version Users</h2> -<p>Gili-sms Full Version has been used by many users from different fields and sectors, such as business, education, religion, social and entertainment. Here are some testimonials of Gili-sms Full Version users:</p> -<blockquote>"Gili-sms Full Version is a great software for SMS marketing. I use it to send promotional SMS to my customers and prospects. It helps me increase my sales and profits." - Rizky, owner of an online shop</blockquote> -<blockquote>"Gili-sms Full Version is a useful software for SMS education. I use it to send educational materials, quizzes, assignments and exams to my students. It helps me enhance their learning outcomes." - Siti, teacher of an elementary school</blockquote> -<blockquote>"Gili-sms Full Version is a wonderful software for SMS entertainment. I use it to send jokes, quotes, greetings and trivia to my friends, family and fans. It helps me make them happy and entertained." - Dian, comedian and influencer</blockquote> -<h2>How to Get Gili-sms Full Version</h2> -<p>Gili-sms Full Version is not a free software, but you can get it at a reasonable price from the official website of the developer. You can choose from different packages and licenses, depending on your needs and budget. You can also get a free trial version for 30 days, which has some limitations and restrictions. To get Gili-sms Full Version, you need to follow these steps:</p> -<ol> -<li>Visit the official website of the developer and click on the download button.</li> -<li>Fill in the form with your name, email and phone number.</li> -<li>Choose the package and license that suits your needs and budget.</li> -<li>Make the payment using the available methods, such as bank transfer, credit card or PayPal.</li> -<li>Wait for the confirmation email with the download link and the activation code.</li> -<li>Download and install Gili-sms Full Version on your computer or laptop.</li> -<li>Enter the activation code to activate Gili-sms Full Version.</li> -<li>Enjoy using Gili-sms Full Version for your SMS communication.</li> -</ol> - -<h2>How to Get Support for Gili-sms Full Version</h2> -<p>If you have any questions, problems or issues with Gili-sms Full Version, you can get support from the developer or the community. You can use one of these methods to get support:</p> -<ul> -<li>You can visit the official website of the developer and check the FAQ section, which has answers to common questions and issues.</li> -<li>You can visit the official website of the developer and check the tutorial section, which has guides and videos on how to use Gili-sms Full Version.</li> -<li>You can visit the official website of the developer and check the forum section, which has discussions and tips from other users and experts.</li> -<li>You can contact the developer directly by email, phone or WhatsApp, and get a response within 24 hours.</li> -</ul> - -<h2>How to Update Gili-sms Full Version</h2> -<p>Gili-sms Full Version is constantly updated by the developer to improve its performance, features and security. You can update Gili-sms Full Version manually or automatically, depending on your preference. To update Gili-sms Full Version, you need to follow these steps:</p> -<ol> -<li>Open Gili-sms Full Version and click on the help menu.</li> -<li>Click on the check for updates option.</li> -<li>If there is a new update available, you will see a notification with the download link and the changelog.</li> -<li>Click on the download link and save the file on your computer or laptop.</li> -<li>Close Gili-sms Full Version and run the update file.</li> -<li>Follow the instructions on the screen to complete the update process.</li> -<li>Restart Gili-sms Full Version and enjoy using the latest version.</li> -</ol> -<h2>How to Uninstall Gili-sms Full Version</h2> -<p>If you want to uninstall Gili-sms Full Version from your computer or laptop, you need to follow these steps:</p> -<ol> -<li>Close Gili-sms Full Version and make sure it is not running in the background.</li> -<li>Open the control panel and click on the uninstall a program option.</li> -<li>Find Gili-sms Full Version in the list of programs and click on it.</li> -<li>Click on the uninstall button and follow the instructions on the screen to complete the uninstallation process.</li> -<li>Restart your computer or laptop to remove any leftover files or registry entries.</li> -</ol> - -<h2>How to Backup and Restore Gili-sms Full Version Data</h2> -<p>Gili-sms Full Version has a backup and restore feature that allows you to save and recover your SMS data securely. You can use this feature to prevent data loss or corruption due to system failure, virus attack, accidental deletion or other reasons. To backup and restore Gili-sms Full Version data, you need to follow these steps:</p> -<ol> -<li>Open Gili-sms Full Version and click on the file menu.</li> -<li>Click on the backup database option and choose a location to save your backup file.</li> -<li>Enter a name for your backup file and click on the save button.</li> -<li>Wait for the backup process to finish and close Gili-sms Full Version.</li> -<li>To restore your data, open Gili-sms Full Version and click on the file menu.</li> -<li>Click on the restore database option and choose the backup file that you want to restore.</li> -<li>Click on the open button and wait for the restore process to finish.</li> -<li>Close and reopen Gili-sms Full Version to see your restored data.</li> -</ol> - -<h2>How to Compare Gili-sms Full Version with Other Software</h2> -<p>Gili-sms Full Version is not the only software that can help you send and receive SMS from your computer or laptop. There are other software that have similar or different features, benefits and specifications. You can compare Gili-sms Full Version with other software to see which one suits your needs and preferences better. Here are some criteria that you can use to compare Gili-sms Full Version with other software:</p> -<ul> -<li>The price and license of the software.</li> -<li>The compatibility and requirements of the software.</li> -<li>The features and functions of the software.</li> -<li>The ease of use and interface of the software.</li> -<li>The support and update of the software.</li> -<li>The reviews and ratings of the software.</li> -</ul></p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/saad-abdullah/knn-for-gdp-to-happiness-predictor/app.py b/spaces/saad-abdullah/knn-for-gdp-to-happiness-predictor/app.py deleted file mode 100644 index 415ff44e4de9ba3df09e676c2860578f2cd06e45..0000000000000000000000000000000000000000 --- a/spaces/saad-abdullah/knn-for-gdp-to-happiness-predictor/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import sklearn.linear_model -import os -import sklearn.neighbors -import gradio as gr - -datapath = os.path.join("datasets", "lifesat", "") -# Download the data -import urllib.request -# DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/" -# os.makedirs(datapath, exist_ok=True) -# for filename in ("oecd_bli_2015.csv", "gdp_per_capita.csv"): -# print("Downloading", filename) -# url = DOWNLOAD_ROOT + "datasets/lifesat/" + filename -# urllib.request.urlretrieve(url, datapath + filename) - -def prepare_country_stats(oecd_bli, gdp_per_capita): - oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] - oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator",values="Value") - gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) - gdp_per_capita.set_index("Country", inplace=True) - full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, - left_index=True, right_index=True) - full_country_stats.sort_values(by="GDP per capita", inplace=True) - - remove_indices = [0, 1, 6, 8, 33, 34, 35] - keep_indices = list(set(range(36)) - set(remove_indices)) - # print(full_country_stats.head()) - return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] - -# oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') -# gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t', -# encoding='latin1', na_values="n/a") - -oecd_bli = pd.read_csv("oecd_bli_2015.csv", thousands=',') -gdp_per_capita = pd.read_csv("gdp_per_capita.csv",thousands=',',delimiter='\t', -encoding='latin1', na_values="n/a") - - -country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) -X = np.c_[country_stats["GDP per capita"]] -y = np.c_[country_stats["Life satisfaction"]] - -models = [] -# Select a linear model -model1 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=1) -model1.fit(X, y) -models.append(model1) - -model2 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=2) -model2.fit(X, y) -models.append(model2) - -model3 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) -model3.fit(X, y) -models.append(model3) - -model4 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=4) -model4.fit(X, y) -models.append(model4) - -model5 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=5) -model5.fit(X, y) -models.append(model5) - -model6 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=6) -model6.fit(X, y) -models.append(model6) - -model7 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=7) -model7.fit(X, y) -models.append(model7) - -model8 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=8) -model8.fit(X, y) -models.append(model8) - -model9 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=9) -model9.fit(X, y) -models.append(model9) - - - - - -import gradio as gr - -def sentence_builder(gdp, value_of_k): - # return f"""The Value of Happiness for {gdp} and K = {value_of_k} is {models[int(value_of_k)].predict([[gdp]])[0][0]}""" - return f"""The Value of Happiness for {gdp} and K = {value_of_k} is {models[int(value_of_k) - 1].predict([[gdp]])[0][0]}""" - - -demo = gr.Interface( - sentence_builder, - [ - gr.Slider(0, 99999, value=4), - gr.Dropdown([1, 2, 3, 4, 5, 6, 7, 8, 9]), - ], - "text", - examples=[ - [2000, 1], - [5508, 5], - ], -) - -demo.launch() diff --git a/spaces/samcaicn/bingai/src/app/page.tsx b/spaces/samcaicn/bingai/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/samcaicn/bingai/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> - <div className="loading-spinner" /> - <DynamicComponentWithNoSSR /> - </> - ) -} diff --git a/spaces/scedlatioru/img-to-music/Microsoft-Plus-SuperPack-For-Windows-XP-VERIFIED-Full-Version.md b/spaces/scedlatioru/img-to-music/Microsoft-Plus-SuperPack-For-Windows-XP-VERIFIED-Full-Version.md deleted file mode 100644 index f0278b67b8d8cdfb50622aecdcd69a711c9f50df..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/Microsoft-Plus-SuperPack-For-Windows-XP-VERIFIED-Full-Version.md +++ /dev/null @@ -1,65 +0,0 @@ -## Microsoft Plus! SuperPack For Windows XP Full Version - - - -**Click Here ⭐ [https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2twIRz&sa=D&sntz=1&usg=AOvVaw10fmEkzT9AfpXvmFf2fBvA](https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2twIRz&sa=D&sntz=1&usg=AOvVaw10fmEkzT9AfpXvmFf2fBvA)** - - - -# How to Download and Install Microsoft Plus! SuperPack For Windows XP Full Version - - - -If you are looking for a way to enhance your Windows XP experience, you might want to check out Microsoft Plus! SuperPack For Windows XP Full Version. This is a bundle of two products that were released by Microsoft in 2004: Microsoft Plus! for Windows XP and Microsoft Plus! Digital Media Edition. These products offer a variety of features and applications that can make your Windows XP more fun, productive, and secure. - - - -In this article, we will show you how to download and install Microsoft Plus! SuperPack For Windows XP Full Version on your computer. We will also give you a brief overview of what you can expect from this package. - - - -## What is Microsoft Plus! SuperPack For Windows XP Full Version? - - - -Microsoft Plus! SuperPack For Windows XP Full Version is a collection of software that adds extra functionality and entertainment to your Windows XP operating system. It includes the following components: - - - -- **Microsoft Plus! for Windows XP**: This product was launched alongside the Windows XP operating system in 2001. It features desktop themes, screen savers, games, and utilities that can customize and optimize your PC. Some of the highlights are: - - - Plus! Themes: You can choose from four themes that change your wallpaper, icons, sounds, and cursor: Aquarium, Nature, da Vinci, and Space. - - - Plus! Screen Savers: You can enjoy eight screen savers that showcase stunning graphics and animations: Aquarium, Nature, da Vinci, Space, Robot Circus, Sand Pendulum, Mercury Pool, and Plus! My Pictures Premium. - - - Plus! Voice Command for Windows Media Player: You can control your Windows Media Player with voice commands such as "Play", "Pause", "Next", "Previous", etc. - - - Plus! Personal DJ: You can create personalized playlists based on your mood, genre, artist, or song preferences. - - - Plus! MP3 Converter: You can convert your audio files to MP3 format with high quality and speed. - - - Plus! CD Label Maker: You can design and print your own CD labels and jewel cases with templates and images. - - - Plus! Speaker Enhancement: You can improve the sound quality of your speakers with virtual surround sound and bass boost effects. - - - Plus! 3D Visualizations for Windows Media Player: You can watch three 3D visualizations that react to the music you play: Oddworld: Munch's Oddysee, Maxx's Kingdom, and Plus! Undersea Wonders. - - - Plus! Skins for Windows Media Player: You can change the appearance of your Windows Media Player with four skins that match the themes: Aquarium, Nature, da Vinci, and Space. - - - Plus! Hyperbowl: You can play a 3D bowling game that takes you through different environments such as ancient Rome, San Francisco, Tokyo, etc. - - - Plus! Russian Square: You can play a Tetris-like game with colorful blocks and Russian music. - - - Plus! Labyrinth: You can play a 3D maze game where you have to find your way out of various levels. - -- **Microsoft Plus! Digital Media Edition**: This product was released in 2003 as an update to Microsoft Plus! for Windows XP. It focuses on enhancing your digital media experience with tools for editing, organizing, sharing, and enjoying your photos, music, and videos. Some of the highlights are: - - - Plus! Photo Story 2: You can create slideshows from your digital photos with transitions, narration, music, and effects. - - - Plus! Party Mode for Windows Media Player: You can turn your PC into a jukebox that lets your guests choose songs from your music library. - - - Plus! Dancer: You can watch animated characters dance to the music you play on your PC. - - - Plus! Analog Recorder: You can digitize your old vinyl records and cassette tapes with noise reduction and automatic track splitting features. - - - Plus! Audio Converter: You can convert 1b8d091108 \ No newline at end of file diff --git a/spaces/sczhou/ProPainter/RAFT/extractor.py b/spaces/sczhou/ProPainter/RAFT/extractor.py deleted file mode 100644 index 9a9c759d1243d4694e8656c2f6f8a37e53edd009..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/RAFT/extractor.py +++ /dev/null @@ -1,267 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class ResidualBlock(nn.Module): - def __init__(self, in_planes, planes, norm_fn='group', stride=1): - super(ResidualBlock, self).__init__() - - self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) - self.relu = nn.ReLU(inplace=True) - - num_groups = planes // 8 - - if norm_fn == 'group': - self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) - self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) - if not stride == 1: - self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) - - elif norm_fn == 'batch': - self.norm1 = nn.BatchNorm2d(planes) - self.norm2 = nn.BatchNorm2d(planes) - if not stride == 1: - self.norm3 = nn.BatchNorm2d(planes) - - elif norm_fn == 'instance': - self.norm1 = nn.InstanceNorm2d(planes) - self.norm2 = nn.InstanceNorm2d(planes) - if not stride == 1: - self.norm3 = nn.InstanceNorm2d(planes) - - elif norm_fn == 'none': - self.norm1 = nn.Sequential() - self.norm2 = nn.Sequential() - if not stride == 1: - self.norm3 = nn.Sequential() - - if stride == 1: - self.downsample = None - - else: - self.downsample = nn.Sequential( - nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) - - - def forward(self, x): - y = x - y = self.relu(self.norm1(self.conv1(y))) - y = self.relu(self.norm2(self.conv2(y))) - - if self.downsample is not None: - x = self.downsample(x) - - return self.relu(x+y) - - - -class BottleneckBlock(nn.Module): - def __init__(self, in_planes, planes, norm_fn='group', stride=1): - super(BottleneckBlock, self).__init__() - - self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) - self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) - self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) - self.relu = nn.ReLU(inplace=True) - - num_groups = planes // 8 - - if norm_fn == 'group': - self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) - self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) - self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) - if not stride == 1: - self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) - - elif norm_fn == 'batch': - self.norm1 = nn.BatchNorm2d(planes//4) - self.norm2 = nn.BatchNorm2d(planes//4) - self.norm3 = nn.BatchNorm2d(planes) - if not stride == 1: - self.norm4 = nn.BatchNorm2d(planes) - - elif norm_fn == 'instance': - self.norm1 = nn.InstanceNorm2d(planes//4) - self.norm2 = nn.InstanceNorm2d(planes//4) - self.norm3 = nn.InstanceNorm2d(planes) - if not stride == 1: - self.norm4 = nn.InstanceNorm2d(planes) - - elif norm_fn == 'none': - self.norm1 = nn.Sequential() - self.norm2 = nn.Sequential() - self.norm3 = nn.Sequential() - if not stride == 1: - self.norm4 = nn.Sequential() - - if stride == 1: - self.downsample = None - - else: - self.downsample = nn.Sequential( - nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) - - - def forward(self, x): - y = x - y = self.relu(self.norm1(self.conv1(y))) - y = self.relu(self.norm2(self.conv2(y))) - y = self.relu(self.norm3(self.conv3(y))) - - if self.downsample is not None: - x = self.downsample(x) - - return self.relu(x+y) - -class BasicEncoder(nn.Module): - def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): - super(BasicEncoder, self).__init__() - self.norm_fn = norm_fn - - if self.norm_fn == 'group': - self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) - - elif self.norm_fn == 'batch': - self.norm1 = nn.BatchNorm2d(64) - - elif self.norm_fn == 'instance': - self.norm1 = nn.InstanceNorm2d(64) - - elif self.norm_fn == 'none': - self.norm1 = nn.Sequential() - - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) - self.relu1 = nn.ReLU(inplace=True) - - self.in_planes = 64 - self.layer1 = self._make_layer(64, stride=1) - self.layer2 = self._make_layer(96, stride=2) - self.layer3 = self._make_layer(128, stride=2) - - # output convolution - self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) - - self.dropout = None - if dropout > 0: - self.dropout = nn.Dropout2d(p=dropout) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): - if m.weight is not None: - nn.init.constant_(m.weight, 1) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def _make_layer(self, dim, stride=1): - layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) - layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - - def forward(self, x): - - # if input is list, combine batch dimension - is_list = isinstance(x, tuple) or isinstance(x, list) - if is_list: - batch_dim = x[0].shape[0] - x = torch.cat(x, dim=0) - - x = self.conv1(x) - x = self.norm1(x) - x = self.relu1(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - x = self.conv2(x) - - if self.training and self.dropout is not None: - x = self.dropout(x) - - if is_list: - x = torch.split(x, [batch_dim, batch_dim], dim=0) - - return x - - -class SmallEncoder(nn.Module): - def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): - super(SmallEncoder, self).__init__() - self.norm_fn = norm_fn - - if self.norm_fn == 'group': - self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32) - - elif self.norm_fn == 'batch': - self.norm1 = nn.BatchNorm2d(32) - - elif self.norm_fn == 'instance': - self.norm1 = nn.InstanceNorm2d(32) - - elif self.norm_fn == 'none': - self.norm1 = nn.Sequential() - - self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3) - self.relu1 = nn.ReLU(inplace=True) - - self.in_planes = 32 - self.layer1 = self._make_layer(32, stride=1) - self.layer2 = self._make_layer(64, stride=2) - self.layer3 = self._make_layer(96, stride=2) - - self.dropout = None - if dropout > 0: - self.dropout = nn.Dropout2d(p=dropout) - - self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): - if m.weight is not None: - nn.init.constant_(m.weight, 1) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def _make_layer(self, dim, stride=1): - layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride) - layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1) - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - - def forward(self, x): - - # if input is list, combine batch dimension - is_list = isinstance(x, tuple) or isinstance(x, list) - if is_list: - batch_dim = x[0].shape[0] - x = torch.cat(x, dim=0) - - x = self.conv1(x) - x = self.norm1(x) - x = self.relu1(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.conv2(x) - - if self.training and self.dropout is not None: - x = self.dropout(x) - - if is_list: - x = torch.split(x, [batch_dim, batch_dim], dim=0) - - return x diff --git a/spaces/sdfhg5243/segmind-tiny-sd/README.md b/spaces/sdfhg5243/segmind-tiny-sd/README.md deleted file mode 100644 index 25b662699d81b9c8a295fd78f235dfaed371404a..0000000000000000000000000000000000000000 --- a/spaces/sdfhg5243/segmind-tiny-sd/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Segmind Tiny Sd -emoji: 🏢 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/segments-tobias/conex/espnet/nets/chainer_backend/rnn/__init__.py b/spaces/segments-tobias/conex/espnet/nets/chainer_backend/rnn/__init__.py deleted file mode 100644 index b7f177368e62a5578b8706300e101f831a3972ac..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/chainer_backend/rnn/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Initialize sub package.""" diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py deleted file mode 100644 index f490c4bbd598a35de43d36ceafcbd769e7ff21bf..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py +++ /dev/null @@ -1,43 +0,0 @@ -batch_size = 1 -modelname = "groundingdino" -backbone = "swin_B_384_22k" -position_embedding = "sine" -pe_temperatureH = 20 -pe_temperatureW = 20 -return_interm_indices = [1, 2, 3] -backbone_freeze_keywords = None -enc_layers = 6 -dec_layers = 6 -pre_norm = False -dim_feedforward = 2048 -hidden_dim = 256 -dropout = 0.0 -nheads = 8 -num_queries = 900 -query_dim = 4 -num_patterns = 0 -num_feature_levels = 4 -enc_n_points = 4 -dec_n_points = 4 -two_stage_type = "standard" -two_stage_bbox_embed_share = False -two_stage_class_embed_share = False -transformer_activation = "relu" -dec_pred_bbox_embed_share = True -dn_box_noise_scale = 1.0 -dn_label_noise_ratio = 0.5 -dn_label_coef = 1.0 -dn_bbox_coef = 1.0 -embed_init_tgt = True -dn_labelbook_size = 2000 -max_text_len = 256 -text_encoder_type = "bert-base-uncased" -use_text_enhancer = True -use_fusion_layer = True -use_checkpoint = True -use_transformer_ckpt = True -use_text_cross_attention = True -text_dropout = 0.0 -fusion_dropout = 0.0 -fusion_droppath = 0.1 -sub_sentence_present = True diff --git a/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/__init__.py b/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/__init__.py deleted file mode 100644 index 38e906243d898d7fc071c0fe218338c5cace3ea1..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything/segment_anything/segment_anything/modeling/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from .sam import Sam -from .image_encoder import ImageEncoderViT -from .mask_decoder import MaskDecoder -from .prompt_encoder import PromptEncoder -from .transformer import TwoWayTransformer diff --git a/spaces/sentencebird/image-color-vectorization/cv_funcs.py b/spaces/sentencebird/image-color-vectorization/cv_funcs.py deleted file mode 100644 index 5012a4b6ba849d95cae87a570dd87c135f095907..0000000000000000000000000000000000000000 --- a/spaces/sentencebird/image-color-vectorization/cv_funcs.py +++ /dev/null @@ -1,67 +0,0 @@ -import cv2 -from PIL import Image -import numpy as np - -def get_concat_h(im1, im2): - dst = Image.new('RGB', (im1.width + im2.width, im1.height)) - dst.paste(im1, (0, 0)) - dst.paste(im2, (im1.width, 0)) - return dst - -def get_concat_v(im1, im2): - dst = Image.new('RGB', (im1.width, im1.height + im2.height)) - dst.paste(im1, (0, 0)) - dst.paste(im2, (0, im1.height)) - return dst - -def hsv_to_rgb(h, s, v): - bgr = cv2.cvtColor(np.array([[[h, s, v]]], dtype=np.uint8), cv2.COLOR_HSV2BGR)[0][0] - return [bgr[2]/255, bgr[1]/255, bgr[0]/255] - -# def remove_bg( -# path, -# BLUR = 21, -# CANNY_THRESH_1 = 10, -# CANNY_THRESH_2 = 200, -# MASK_DILATE_ITER = 10, -# MASK_ERODE_ITER = 10, -# MASK_COLOR = (0.0,0.0,1.0), -# ): -# img = cv2.imread(path) -# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) - -# edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2) -# edges = cv2.dilate(edges, None) -# edges = cv2.erode(edges, None) - -# contour_info = [] -# contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) -# for c in contours: -# contour_info.append(( -# c, -# cv2.isContourConvex(c), -# cv2.contourArea(c), -# )) -# contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) -# max_contour = contour_info[0] - -# mask = np.zeros(edges.shape) -# cv2.fillConvexPoly(mask, max_contour[0], (255)) - -# mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER) -# mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER) -# mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0) -# mask_stack = np.dstack([mask]*3) # Create 3-channel alpha mask - -# mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices, -# img = img.astype('float32') / 255.0 # for easy blending - -# masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR) # Blend -# masked = (masked * 255).astype('uint8') # Convert back to 8-bit - -# c_blue, c_green, c_red = cv2.split(img) - -# img_a = cv2.merge((c_red, c_green, c_blue, mask.astype('float32') / 255.0)) -# index = np.where(img_a[:, :, 3] == 0) -# #img_a[index] = [1.0, 1.0, 1.0, 1.0] -# return img_a \ No newline at end of file diff --git a/spaces/shabnam91/Sanskrit-TTS/utils/cleaner_utils.py b/spaces/shabnam91/Sanskrit-TTS/utils/cleaner_utils.py deleted file mode 100644 index 6cf6058850f2dad34e43a7946fc513a904e9620e..0000000000000000000000000000000000000000 --- a/spaces/shabnam91/Sanskrit-TTS/utils/cleaner_utils.py +++ /dev/null @@ -1,112 +0,0 @@ -import re -def run(): - - # The path to the local git repo for Indic NLP library - INDIC_NLP_LIB_HOME=r"./indic_nlp_library" - - # The path to the local git repo for Indic NLP Resources - INDIC_NLP_RESOURCES=r"./indic_nlp_resources" - import sys - sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME)) - - from indicnlp import common - common.set_resources_path(INDIC_NLP_RESOURCES) - - from indicnlp import loader - loader.load() - -run() - -from indicnlp.normalize.indic_normalize import IndicNormalizerFactory -from indicnlp.tokenize import sentence_tokenize -from indicnlp.syllable import syllabifier - -lang='sa' -factory=IndicNormalizerFactory() -normalizer=factory.get_normalizer("hi") -DEPENDENT_VOWELS = ["ा", "ि", "ी", "ु", "ू", "े", "ै", "ो", "ौ", "ं", "ः", "ृ", "ॄ"] - -dict_num = {"०": "शून्य", "१": "एक", "२": "द्वि", "३": "त्रि", - "४": "चतुर्", "५": "पञ्च", "६": "षट्", "७": "सप्त", "८": "अष्ट", "९": "नव"} - -def tokenize_sentence(text): - '''Tokenize a paragraph into sentences''' - sentences = sentence_tokenize.sentence_split(text, lang='sa') - return sentences - -def clean_text(text): - processed_text = re.sub(r'\+ +', '', text) - processed_text = re.sub(': +', '\n \n', processed_text) - processed_text = re.sub(r'\+ ।', '\n \n', processed_text) - processed_text = re.sub(r'\+$', '', processed_text) - return processed_text - -def syllabify_text(text): - text_list = [] - #Syllabify text - for char in text: - if char in DEPENDENT_VOWELS: - char = "(" + char + ")" - text_list.append(char) - else: - text_list.append(char) - - full_text = " + ".join(text_list).replace("'", "") - return full_text - - -def normalize_text(text): - output_string = "" - #Map sanskrit numbers to their normalized form. - for char in text: - if char in dict_num: - output_string += dict_num[char] - else: - output_string += char - return output_string - - -def preprocess_text(text): - '''Cleans, tokenizes and normalizes text''' - #Normalize text - normalized_text = normalize_text(text) - - #Tokenize text. - tokenized_text = tokenize_sentence(normalized_text) - tokenized_text = "\n".join(tokenized_text) - - #Syllabify_text - syllabified_text = syllabify_text(tokenized_text) - - #Clean text - cleaned_text = clean_text(syllabified_text) - - #Remove unnecessary characters from a string. - text_cleaned = [] - for index, text in enumerate(cleaned_text.split('\n')): - if text.startswith('+'): - text = text[2:] - - elif text.startswith(' +'): - text = text[3:] - - elif text.endswith('+') or text.endswith(' +'): - text = text[:-2] - - text_cleaned.append(text) - - text_cleaned_str = "\n".join(text_cleaned) - - return text_cleaned_str - - -# DEFAULT_TEXT = """तो क्या विश्व कप 2019 में मैच का बॉस टॉस है? यानी मैच में हार-जीत में \ -# टॉस की भूमिका अहम है? आप ऐसा सोच सकते हैं। विश्वकप के अपने-अपने पहले मैच में बुरी तरह हारने वाली एशिया की दो टीमों \ -# पाकिस्तान और श्रीलंका के कप्तान ने हालांकि अपने हार के पीछे टॉस की दलील तो नहीं दी, लेकिन यह जरूर कहा था कि वह एक अहम टॉस हार गए थे।""" -# DEFAULT_TEXT='संस्कृतम् जगतः एकतमा अतिप्राचीना समृद्धा शास्त्रीया च भाषासु वर्तते । संस्कृतं भारतस्य जगत: वा भाषासु एकतमा‌ प्राचीनतमा ।' -DEFAULT_TEXT = "अयं द्वितीयशब्दः २ अस्ति। प्रथमः शब्दः १ अस्ति। अन्ये शब्दाः सर्वे द्वितीयं शब्दं प्रयोजयन्ति। इत्थं सप्ततिः शब्दाः लिखिताः सन्ति। अस्मिन लेखने सर्वे अक्षराः संस्कृते लिखिताः सन्ति। अन्ये लिखन्ति ३, ४, ५ इत्यादि। तथापि, अहं एकं अक्षरं एव उपयोगामि।" - -print(f"Default text is: {DEFAULT_TEXT}") -print('\n \n') -NORMALIZED_TEXT = preprocess_text(DEFAULT_TEXT) -print(f"Syllabified text is: {NORMALIZED_TEXT}") diff --git a/spaces/shengyi-qian/3DOI/monoarti/transformer.py b/spaces/shengyi-qian/3DOI/monoarti/transformer.py deleted file mode 100644 index 2b0e5b8e6ee06a30de4593a81835a620da446bd2..0000000000000000000000000000000000000000 --- a/spaces/shengyi-qian/3DOI/monoarti/transformer.py +++ /dev/null @@ -1,432 +0,0 @@ -from typing import List, Optional, Tuple -import torch -from torch import nn -import torch.nn.functional as F - -from . import axis_ops, ilnr_loss -from .vnl_loss import VNL_Loss -from .midas_loss import MidasLoss -from .detr.detr import MLP -from .detr.transformer import Transformer -from .detr.backbone import Backbone, Joiner -from .detr.position_encoding import PositionEmbeddingSine -from .detr.misc import nested_tensor_from_tensor_list, interpolate -from .detr import box_ops -from .detr.segmentation import ( - MHAttentionMap, MaskHeadSmallConv, dice_loss, sigmoid_focal_loss -) - - -class INTR(torch.nn.Module): - """ - Implement Interaction 3D Transformer. - """ - - def __init__( - self, - backbone_name = 'resnet50', - image_size = [192, 256], - ignore_index = -100, - num_classes = 1, - num_queries = 15, - freeze_backbone = False, - transformer_hidden_dim = 256, - transformer_dropout = 0.1, - transformer_nhead = 8, - transformer_dim_feedforward = 2048, - transformer_num_encoder_layers = 6, - transformer_num_decoder_layers = 6, - transformer_normalize_before = False, - transformer_return_intermediate_dec = True, - layers_movable = 3, - layers_rigid = 3, - layers_kinematic = 3, - layers_action = 3, - layers_axis = 2, - layers_affordance = 3, - affordance_focal_alpha = 0.95, - axis_bins = 30, - depth_on = True, - ): - """ Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_classes: number of object classes - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - - self._ignore_index = ignore_index - self._image_size = image_size - self._axis_bins = axis_bins - self._affordance_focal_alpha = affordance_focal_alpha - - # backbone - backbone_base = Backbone(backbone_name, not freeze_backbone, True, False) - N_steps = transformer_hidden_dim // 2 - position_embedding = PositionEmbeddingSine(N_steps, normalize=True) - backbone = Joiner(backbone_base, position_embedding) - backbone.num_channels = backbone_base.num_channels - self.backbone = backbone - - self.transformer = Transformer( - d_model=transformer_hidden_dim, - dropout=transformer_dropout, - nhead=transformer_nhead, - dim_feedforward=transformer_dim_feedforward, - num_encoder_layers=transformer_num_encoder_layers, - num_decoder_layers=transformer_num_decoder_layers, - normalize_before=transformer_normalize_before, - return_intermediate_dec=transformer_return_intermediate_dec, - ) - hidden_dim = self.transformer.d_model - self.hidden_dim = hidden_dim - nheads = self.transformer.nhead - - self.num_queries = num_queries - - # before transformer, input_proj maps 2048 channel resnet50 output to 512-channel - # transformer input - self.input_proj = nn.Conv2d(self.backbone.num_channels, hidden_dim, kernel_size=1) - - # query mlp maps 2d keypoint coordinates to 256-dim positional encoding - self.query_mlp = MLP(2, hidden_dim, hidden_dim, 2) - - # bbox MLP - self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - - if layers_movable > 1: - self.movable_embed = MLP(hidden_dim, hidden_dim, 3, layers_movable) - elif layers_movable == 1: - self.movable_embed = nn.Linear(hidden_dim, 3) - else: - raise ValueError("not supported") - - if layers_rigid > 1: - self.rigid_embed = MLP(hidden_dim, hidden_dim, 2, layers_rigid) - elif layers_rigid == 1: - #self.rigid_embed = nn.Linear(hidden_dim, 2) - self.rigid_embed = nn.Linear(hidden_dim, 3) - else: - raise ValueError("not supported") - - if layers_kinematic > 1: - self.kinematic_embed = MLP(hidden_dim, hidden_dim, 3, layers_kinematic) - elif layers_kinematic == 1: - self.kinematic_embed = nn.Linear(hidden_dim, 3) - else: - raise ValueError("not supported") - - if layers_action > 1: - self.action_embed = MLP(hidden_dim, hidden_dim, 3, layers_action) - elif layers_action == 1: - self.action_embed = nn.Linear(hidden_dim, 3) - else: - raise ValueError("not supported") - - if layers_axis > 1: - #self.axis_embed = MLP(hidden_dim, hidden_dim, 4, layers_axis) - self.axis_embed = MLP(hidden_dim, hidden_dim, 3, layers_axis) - - # classification - # self.axis_embed = MLP(hidden_dim, hidden_dim, self._axis_bins * 2, layers_axis) - elif layers_axis == 1: - self.axis_embed = nn.Linear(hidden_dim, 3) - else: - raise ValueError("not supported") - - # affordance - if layers_affordance > 1: - self.aff_embed = MLP(hidden_dim, hidden_dim, 2, layers_affordance) - elif layers_affordance == 1: - self.aff_embed = nn.Linear(hidden_dim, 2) - else: - raise ValueError("not supported") - - # affordance head - self.aff_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) - self.aff_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim, nheads) - - # mask head - self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) - self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim, nheads) - - # depth head - self._depth_on = depth_on - if self._depth_on: - self.depth_query = nn.Embedding(1, hidden_dim) - self.depth_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) - self.depth_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim, nheads) - self.depth_loss = ilnr_loss.MEADSTD_TANH_NORM_Loss() - fov = torch.tensor(1.0) - focal_length = (image_size[1] / 2 / torch.tan(fov / 2)).item() - self.vnl_loss = VNL_Loss(focal_length, focal_length, image_size) - self.midas_loss = MidasLoss(alpha=0.1) - - def freeze_layers(self, names): - """ - Freeze layers in 'names'. - """ - for name, param in self.named_parameters(): - for freeze_name in names: - if freeze_name in name: - #print(name + ' ' + freeze_name) - param.requires_grad = False - - def forward( - self, - image: torch.Tensor, - valid: torch.Tensor, - keypoints: torch.Tensor, - bbox: torch.Tensor, - masks: torch.Tensor, - movable: torch.Tensor, - rigid: torch.Tensor, - kinematic: torch.Tensor, - action: torch.Tensor, - affordance: torch.Tensor, - affordance_map: torch.FloatTensor, - depth: torch.Tensor, - axis: torch.Tensor, - fov: torch.Tensor, - backward: bool = True, - **kwargs, - ): - """ - Model forward. Set backward = False if the model is inference only. - """ - device = image.device - - # number of queries can be different in runtime - num_queries = keypoints.shape[1] - - # DETR forward - samples = image - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.backbone(samples) - bs = features[-1].tensors.shape[0] - src, mask = features[-1].decompose() - assert mask is not None - - # sample keypoint queries from the positional embedding - use_sine = False - if use_sine: - anchors = keypoints.float() - anchors_float = anchors.clone() - anchors_float = anchors_float.reshape(-1, 2) - anchors_float[:, 0] = ((anchors_float[:, 0] / self._image_size[1]) - 0.5) * 2 - anchors_float[:, 1] = ((anchors_float[:, 1] / self._image_size[0]) - 0.5) * 2 - anchors_float = anchors_float.unsqueeze(1).unsqueeze(1) - # 4x256x1x1 - keypoint_queries = F.grid_sample( - #pos[0].repeat(self.num_queries, 1, 1, 1), - pos[-1].repeat(self.num_queries, 1, 1, 1), - anchors_float, - mode='nearest', - align_corners=True - ) - # 4 x 10 (number of object queires) x 256 - keypoint_queries = keypoint_queries.squeeze().reshape(-1, self.num_queries, self.hidden_dim) - else: - # use learned MLP to map postional encoding - anchors = keypoints.float() - anchors_float = anchors.clone() - anchors_float[:, :, 0] = ((anchors_float[:, :, 0] / self._image_size[1]) - 0.5) * 2 - anchors_float[:, :, 1] = ((anchors_float[:, :, 1] / self._image_size[0]) - 0.5) * 2 - keypoint_queries = self.query_mlp(anchors_float) - - # append depth_query if the model is learning depth. - if self._depth_on: - bs = keypoint_queries.shape[0] - depth_query = self.depth_query.weight.unsqueeze(0).repeat(bs, 1, 1) - keypoint_queries = torch.cat((keypoint_queries, depth_query), dim=1) - - # transformer forward - src_proj = self.input_proj(src) - hs, memory = self.transformer(src_proj, mask, keypoint_queries, pos[-1]) - - if self._depth_on: - depth_hs = hs[-1][:, -1:] - ord_hs = hs[-1][:, :-1] - else: - ord_hs = hs[-1] - - outputs_coord = self.bbox_embed(ord_hs).sigmoid() - outputs_movable = self.movable_embed(ord_hs) - outputs_rigid = self.rigid_embed(ord_hs) - outputs_kinematic = self.kinematic_embed(ord_hs) - outputs_action = self.action_embed(ord_hs) - - # axis forward - outputs_axis = self.axis_embed(ord_hs).sigmoid() - # sigmoid range is 0 to 1, we want it to be -1 to 1 - outputs_axis = (outputs_axis - 0.5) * 2 - - # affordance forward - bbox_aff = self.aff_attention(ord_hs, memory, mask=mask) - aff_masks = self.aff_head(src_proj, bbox_aff, [features[2].tensors, features[1].tensors, features[0].tensors]) - outputs_aff_masks = aff_masks.view(bs, num_queries, aff_masks.shape[-2], aff_masks.shape[-1]) - - # mask forward - bbox_mask = self.bbox_attention(ord_hs, memory, mask=mask) - seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) - outputs_seg_masks = seg_masks.view(bs, num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) - - # depth forward - outputs_depth = None - if self._depth_on: - depth_att = self.depth_attention(depth_hs, memory, mask=mask) - depth_masks = self.depth_head( - src_proj, - depth_att, - [features[2].tensors, features[1].tensors, features[0].tensors] - ) - outputs_depth = depth_masks.view(bs, 1, depth_masks.shape[-2], depth_masks.shape[-1]) - - out = { - 'pred_boxes': box_ops.box_cxcywh_to_xyxy(outputs_coord), - 'pred_movable': outputs_movable, - 'pred_rigid': outputs_rigid, - 'pred_kinematic': outputs_kinematic, - 'pred_action': outputs_action, - 'pred_masks': outputs_seg_masks, - 'pred_axis': outputs_axis, - 'pred_depth': outputs_depth, - 'pred_affordance': outputs_aff_masks, - } - - if not backward: - return out - - # backward - src_boxes = outputs_coord - target_boxes = bbox - target_boxes = box_ops.box_xyxy_to_cxcywh(target_boxes) - bbox_valid = bbox[:, :, 0] > -0.5 - num_boxes = bbox_valid.sum() - if num_boxes == 0: - out['loss_bbox'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_giou'] = torch.tensor(0.0, requires_grad=True).to(device) - else: - loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') - loss_bbox = loss_bbox * bbox_valid.unsqueeze(2) # remove invalid - out['loss_bbox'] = loss_bbox.sum() / num_boxes - - loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( - box_ops.box_cxcywh_to_xyxy(src_boxes).reshape(-1, 4), - box_ops.box_cxcywh_to_xyxy(target_boxes).reshape(-1, 4), - )).reshape(-1, self.num_queries) - loss_giou = loss_giou * bbox_valid # remove invalid - out['loss_giou'] = loss_giou.sum() / num_boxes - - # affordance - affordance_valid = affordance[:, :, 0] > -0.5 - if affordance_valid.sum() == 0: - out['loss_affordance'] = torch.tensor(0.0, requires_grad=True).to(device) - else: - src_aff_masks = outputs_aff_masks[affordance_valid] - tgt_aff_masks = affordance_map[affordance_valid] - src_aff_masks = src_aff_masks.flatten(1) - tgt_aff_masks = tgt_aff_masks.flatten(1) - loss_aff = sigmoid_focal_loss( - src_aff_masks, - tgt_aff_masks, - affordance_valid.sum(), - alpha=self._affordance_focal_alpha, - ) - out['loss_affordance'] = loss_aff - - # axis - axis_valid = axis[:, :, 0] > 0.0 - num_axis = axis_valid.sum() - if num_axis == 0: - out['loss_axis_angle'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_axis_offset'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_eascore'] = torch.tensor(0.0, requires_grad=True).to(device) - else: - # regress angle - src_axis_angle = outputs_axis[axis_valid] - src_axis_angle_norm = F.normalize(src_axis_angle[:, :2]) - src_axis_angle = torch.cat((src_axis_angle_norm, src_axis_angle[:, 2:]), dim=-1) - target_axis_xyxy = axis[axis_valid] - - axis_center = target_boxes[axis_valid].clone() - axis_center[:, 2:] = axis_center[:, :2] - target_axis_angle = axis_ops.line_xyxy_to_angle(target_axis_xyxy, center=axis_center) - - loss_axis_angle = F.l1_loss(src_axis_angle[:, :2], target_axis_angle[:, :2], reduction='sum') / num_axis - loss_axis_offset = F.l1_loss(src_axis_angle[:, 2:], target_axis_angle[:, 2:], reduction='sum') / num_axis - out['loss_axis_angle'] = loss_axis_angle - out['loss_axis_offset'] = loss_axis_offset - - src_axis_xyxy = axis_ops.line_angle_to_xyxy(src_axis_angle, center=axis_center) - target_axis_xyxy = axis_ops.line_angle_to_xyxy(target_axis_angle, center=axis_center) - - axis_eascore, _, _ = axis_ops.ea_score(src_axis_xyxy, target_axis_xyxy) - loss_eascore = 1 - axis_eascore - out['loss_eascore'] = loss_eascore.mean() - - loss_movable = F.cross_entropy(outputs_movable.permute(0, 2, 1), movable, ignore_index=self._ignore_index) - if torch.isnan(loss_movable): - loss_movable = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_movable'] = loss_movable - - loss_rigid = F.cross_entropy(outputs_rigid.permute(0, 2, 1), rigid, ignore_index=self._ignore_index) - if torch.isnan(loss_rigid): - loss_rigid = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_rigid'] = loss_rigid - - loss_kinematic = F.cross_entropy(outputs_kinematic.permute(0, 2, 1), kinematic, ignore_index=self._ignore_index) - if torch.isnan(loss_kinematic): - loss_kinematic = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_kinematic'] = loss_kinematic - - loss_action = F.cross_entropy(outputs_action.permute(0, 2, 1), action, ignore_index=self._ignore_index) - if torch.isnan(loss_action): - loss_action = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_action'] = loss_action - - # depth backward - if self._depth_on: - # (bs, 1, H, W) - src_depths = interpolate(outputs_depth, size=depth.shape[-2:], mode='bilinear', align_corners=False) - src_depths = src_depths.clamp(min=0.0, max=1.0) - tgt_depths = depth.unsqueeze(1) # (bs, H, W) - valid_depth = depth[:, 0, 0] > 0 - if valid_depth.any(): - src_depths = src_depths[valid_depth] - tgt_depths = tgt_depths[valid_depth] - depth_mask = tgt_depths > 1e-8 - midas_loss, ssi_loss, reg_loss = self.midas_loss(src_depths, tgt_depths, depth_mask) - loss_vnl = self.vnl_loss(tgt_depths, src_depths) - out['loss_depth'] = midas_loss - out['loss_vnl'] = loss_vnl - else: - out['loss_depth'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_vnl'] = torch.tensor(0.0, requires_grad=True).to(device) - else: - out['loss_depth'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_vnl'] = torch.tensor(0.0, requires_grad=True).to(device) - - # mask backward - tgt_masks = masks - src_masks = interpolate(outputs_seg_masks, size=tgt_masks.shape[-2:], mode='bilinear', align_corners=False) - valid_mask = tgt_masks.sum(dim=-1).sum(dim=-1) > 10 - if valid_mask.sum() == 0: - out['loss_mask'] = torch.tensor(0.0, requires_grad=True).to(device) - out['loss_dice'] = torch.tensor(0.0, requires_grad=True).to(device) - else: - num_masks = valid_mask.sum() - src_masks = src_masks[valid_mask] - tgt_masks = tgt_masks[valid_mask] - src_masks = src_masks.flatten(1) - tgt_masks = tgt_masks.flatten(1) - tgt_masks = tgt_masks.view(src_masks.shape) - out['loss_mask'] = sigmoid_focal_loss(src_masks, tgt_masks.float(), num_masks) - out['loss_dice'] = dice_loss(src_masks, tgt_masks, num_masks) - - return out diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/canny/__init__.py b/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/canny/__init__.py deleted file mode 100644 index ace985839d3fc18dd4947f6c38e9f5d5a2625aca..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/canny/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -import cv2 - - -def apply_canny(img, low_threshold, high_threshold): - return cv2.Canny(img, low_threshold, high_threshold) diff --git a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/transformer_decoder/__init__.py b/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/transformer_decoder/__init__.py deleted file mode 100644 index ddcf38e78f3bbb2380b0a246000bcb5e5b385619..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/transformer_decoder/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .maskformer_transformer_decoder import StandardTransformerDecoder -from .mask2former_transformer_decoder import MultiScaleMaskedTransformerDecoder diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/.NET Core 2.0 SDK A Powerful Tool for Web and Server Applications.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/.NET Core 2.0 SDK A Powerful Tool for Web and Server Applications.md deleted file mode 100644 index 2952feed981ff8be90f8e7c71db1fd62460bbb5d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/.NET Core 2.0 SDK A Powerful Tool for Web and Server Applications.md +++ /dev/null @@ -1,156 +0,0 @@ -<br /> -<h1>What is .NET Core SDK 2.0 and why you should download it</h1> -<p>If you are a web developer, you might have heard of .NET Core, a free, cross-platform, open-source framework for building web applications. But do you know what .NET Core SDK 2.0 is and why you should download it? In this article, we will explain what .NET Core and .NET Core SDK are, what are the benefits of using .NET Core SDK 2.0, and how to download and install it on your machine.</p> -<h2>Introduction</h2> -<h3>What is .NET Core</h3> -<p>.NET Core is a subset of the .NET Framework, which is a popular platform for developing Windows applications. Unlike the .NET Framework, which only runs on Windows, .NET Core can run on Linux, macOS, and Windows. This means that you can use the same code and tools to create web applications that can run on any operating system.</p> -<h2>.net core sdk download 2.0</h2><br /><p><b><b>Download Zip</b> &#9913;&#9913;&#9913; <a href="https://ssurll.com/2uNWGY">https://ssurll.com/2uNWGY</a></b></p><br /><br /> -<h3>What is .NET Core SDK</h3> -<p>.NET Core SDK stands for Software Development Kit. It is a collection of tools and libraries that you need to build and run .NET Core applications. The SDK includes the following components:</p> -<ul> -<li>The .NET Core Runtime, which contains the components needed to run a console app or a web server.</li> -<li>The ASP.NET Core Runtime, which contains the components needed to run a web app or an API.</li> -<li>The dotnet command-line tool, which allows you to create, build, run, test, and publish your applications.</li> -<li>The C# and Visual Basic compilers, which translate your code into executable files.</li> -<li>The NuGet package manager, which allows you to install and manage third-party libraries.</li> -<li>The MSBuild tool, which orchestrates the build process.</li> -<li>The Roslyn analyzers, which provide code quality checks and suggestions.</li> -<li>The .NET Standard libraries, which provide common functionality across different platforms.</li> -</ul> -<h3>What are the benefits of using .NET Core SDK 2.0</h3> -<p>.NET Core SDK 2.0 was released in August 2017 as an update to the previous version, .NET Core SDK 1.x. It introduced several improvements and new features, such as:</p> -<ul> -<li>Better performance and compatibility with the .NET Framework.</li> -<li>Support for more languages and platforms, including Visual Basic and F#.</li> -<li>Support for more project types, such as web applications, console applications, class libraries, unit tests, and xUnit tests.</li> -<li>Support for more frameworks, such as ASP.NET Core 2.0, Entity Framework Core 2.0, and Xamarin.Forms.</li> -<li>Support for more tools and editors, such as Visual Studio 2017, Visual Studio Code, Visual Studio for Mac, and JetBrains Rider.</li> -<li>Support for more deployment options, such as self-contained deployments, framework-dependent deployments, Docker containers, and Azure App Service.</li> -</ul> -<p>By using .NET Core SDK 2.0, you can take advantage of these benefits and create faster, more reliable, and more portable web applications.</p> -<h2>How to download and install .NET Core SDK 2.0</h2> -<h3>Download options for different operating systems</h3> -<p>To download .NET Core SDK 2.0, you need to choose the appropriate installer or binary for your operating system. You can find the download links on the official website. Here are some of the options available:</p> - <h4>Linux</h4> - <table border="1"> -<tr><th>OS</ <th>OS</th><th>Installer</th><th>Binary</th></tr> -<tr><td>Ubuntu 16.04 or later</td><td>.deb</td><td>.tar.gz</td></tr> -<tr><td>Debian 9 or later</td><td>.deb</td><td>.tar.gz</td></tr> -<tr><td>CentOS 7 or later</td><td>.rpm</td><td>.tar.gz</td></tr> -<tr><td>Fedora 27 or later</td><td>.rpm</td><td>.tar.gz</td></tr> -<tr><td>OpenSUSE 42.3 or later</td><td>.rpm</td><td>.tar.gz</td></tr> -<tr><td>SUSE Linux Enterprise Server 12 SP2 or later</td><td>.rpm</td><td>.tar.gz</td></tr> -<tr><td>Alpine Linux 3.6 or later</td><td>N/A</td><td>.tar.gz</td></tr> -<tr><td>Snap Package (any Linux distro)</td><td>snapd</td><td>N/A</td></tr> -<tr><td>Docker Image (any Linux distro)</td><td>N/A</td><td>N/A</td></tr> -</table> - <h4>macOS</h4> - <table border="1"> -<tr><th>OS Version</th><th>Installer</th></tr> -<tr><td>macOS 10.12 (Sierra) or later</ <td>.pkg or .zip (for x64)</tr> -<tr><td>macOS 11.0 (Big Sur) or later (for Apple Silicon)</ <td>.zip (for arm64)</tr> -<tr><td>Docker Image (any macOS version)</ <td>N/A</tr> -</table> - <h4>Windows</h4> - <table border="1"> -<tr><th>OS Version</th><th>Installer (x86)</th><th>Installer (x64)</th></tr> -<tr><td>Windows 7 SP1 or later, Windows Server 2008 R2 SP1 or later <td>.exe or .zip <td>.exe or .zip <tr> -<tr><td>Windows 10 Fall Creators Update (version 1709) or later, Windows Server 2016 or later <td>N/A <td>.exe, .zip, or .msi <tr> -<tr><td>Docker Image (any Windows version) <td>N/A <td>N/A <tr> -</table> - <h3>Installation steps for different operating systems</h3> - <p>The installation steps vary depending on the operating system and the download option you choose. Here are some general guidelines:</p> - <h4>Linux</h4> - <p>If you choose the installer option, you need to use the appropriate package manager for your Linux distribution to install the .NET Core SDK 2.0. For example, on Ubuntu, you can use the following commands:</p> - <code> -sudo apt-get update sudo apt-get install dotnet-sdk-2.0.0 <code> - <p>If you choose the binary option, you need to extract the .tar.gz file to a folder of your choice and add it to your PATH environment variable. For example, on Ubuntu, you can use the following commands:</p> - <code> -mkdir -p $HOME/dotnet && tar zxf dotnet-sdk-2.0.0-linux-x64.tar.gz -C $HOME/dotnet export PATH=$PATH:$HOME/dotnet <code> - <p>If you choose the snap package option, you need to install snapd on your Linux distribution and then install the dotnet-sdk snap. For example, on Ubuntu, you can use the following commands:</p> -<p>.net core sdk 2.0 installers for windows, linux, and macos<br /> -.net core sdk 2.0 release notes and end of support date<br /> -.net core sdk 2.0 package manager instructions for linux<br /> -.net core sdk 2.0 dotnet-install scripts for all platforms<br /> -.net core sdk 2.0 hosting bundle for windows<br /> -.net core sdk 2.0 runtime binaries for x64, x86, and arm64<br /> -.net core sdk 2.0 vs .net core sdk 2.1<br /> -.net core sdk 2.0 compatibility with visual studio versions<br /> -.net core sdk 2.0 security patch updates<br /> -.net core sdk 2.0 supported operating systems and architectures<br /> -.net core sdk 2.0 asp.net core runtime download<br /> -.net core sdk 2.0 included runtimes and frameworks<br /> -.net core sdk 2.0 offline installer download<br /> -.net core sdk 2.0 docker images and containers<br /> -.net core sdk 2.0 web development tools and templates<br /> -.net core sdk 2.0 console app development tutorial<br /> -.net core sdk 2.0 migration guide from previous versions<br /> -.net core sdk 2.0 performance improvements and benchmarks<br /> -.net core sdk 2.0 new features and enhancements<br /> -.net core sdk 2.0 known issues and troubleshooting tips<br /> -.net core sdk 2.0 source code and license information<br /> -.net core sdk 2.0 global.json file configuration options<br /> -.net core sdk 2.0 command-line interface (CLI) reference<br /> -.net core sdk 2.0 project file format and msbuild integration<br /> -.net core sdk 2.0 nuget package management and restore<br /> -.net core sdk 2.0 testing tools and frameworks support<br /> -.net core sdk 2.0 debugging tools and symbols download<br /> -.net core sdk 2.0 deployment options and strategies<br /> -.net core sdk 2.0 self-contained and framework-dependent executables<br /> -.net core sdk 2.0 single file applications and trimming options<br /> -.net core sdk 2.0 ready to run (R2R) compilation and crossgen tool<br /> -.net core sdk 2.0 native compilation and ahead-of-time (AOT) toolchain<br /> -.net core sdk 2.0 blazor webassembly and server-side development<br /> -.net core sdk 2.0 entity framework core database access and migrations<br /> -.net core sdk 2.0 identity server authentication and authorization<br /> -.net core sdk 2.0 signalr real-time communication and websockets support<br /> -.net core sdk 2.0 grpc services and protobuf serialization support<br /> -.net core sdk 2.0 razor pages and mvc development support<br /> -.net core sdk 2.0 web api development and swagger documentation support<br /> -.net core sdk 2.0 health checks and monitoring support<br /> -.net core sdk 2.0 logging and configuration providers support<br /> -.net core sdk 2.0 dependency injection and inversion of control support<br /> -.net core sdk 2.0 localization and globalization support <br /> -.net core sdk 2.0 data protection and encryption support <br /> -.net core sdk 2.0 background tasks and hosted services support <br /> -.net core sdk 2.0 middleware pipeline and filters support <br /> -.net core sdk 2.0 model binding and validation support <br /> -.net core sdk 2.0 view components and tag helpers support</p> - <code> -sudo apt-get update sudo apt-get install snapd sudo snap install dotnet-sdk --classic <code> - <p>If you choose the docker image option, you need to have Docker installed on your Linux distribution and then pull the microsoft/dotnet image from Docker Hub. For example, on Ubuntu, you can use the following commands:</p> - <code> -sudo apt-get update sudo apt-get install docker.io sudo docker pull microsoft/dotnet:2.0-sdk <code> - <h4>macOS</h4> - <p>If you choose the installer option, you need to run the .pkg file and follow the instructions on the screen to install the .NET Core SDK 2.0. If you choose the .zip option, you need to extract the .zip file to a folder of your choice and add it to your PATH environment variable. For example, you can use the following commands:</p> - <code> -unzip dotnet-sdk-2.0.0-osx-x64.zip - unzip dotnet-sdk-2.0.0-osx-x64.zip -d ~/dotnet export PATH=$PATH:~/dotnet <code> - <p>If you choose the docker image option, you need to have Docker installed on your macOS and then pull the microsoft/dotnet image from Docker Hub. For example, you can use the following commands:</p> - <code> -brew install docker docker pull microsoft/dotnet:2.0-sdk <code> - <h4>Windows</h4> - <p>If you choose the installer option, you need to run the .exe or .msi file and follow the instructions on the screen to install the .NET Core SDK 2.0. If you choose the .zip option, you need to extract the .zip file to a folder of your choice and add it to your PATH environment variable. For example, you can use the following commands:</p> - <code> -Expand-Archive dotnet-sdk-2.0.0-win-x64.zip -DestinationPath C:\dotnet $env:PATH += ";C:\dotnet" <code> - <p>If you choose the docker image option, you need to have Docker installed on your Windows and then pull the microsoft/dotnet image from Docker Hub. For example, you can use the following commands:</p> - <code> -Install-Module -Name DockerMsftProvider -Repository PSGallery -Force Install-Package -Name docker -ProviderName DockerMsftProvider Restart-Computer -Force docker pull microsoft/dotnet:2.0-sdk <code> - <h2>Conclusion</h2> - <p>In this article, we have learned what .NET Core SDK 2.0 is and why you should download it. We have also seen how to download and install it on different operating systems. By using .NET Core SDK 2.0, you can create web applications that are fast, reliable, and portable across different platforms.</p> - <p>If you want to learn more about .NET Core SDK 2.0, you can visit the official documentation or check out some of the tutorials available online. You can also join the .NET community and get help from other developers.</p> - <p>Are you ready to start building your web applications with .NET Core SDK 2.0? Download it today and see what you can create!</p> - <h3>FAQs</h3> - <ul> -<li><b>What is the difference between .NET Core and .NET Framework?</b> -.NET Core is a subset of the .NET Framework that is cross-platform, open-source, and modular. .NET Framework is a platform for developing Windows applications that is proprietary, Windows-only, and monolithic.</li> -<li><b>What is the difference between .NET Core SDK and .NET Core Runtime?</b> -.NET Core SDK is a collection of tools and libraries that you need to build and run .NET Core applications. .NET Core Runtime is a component that contains the components needed to run a console app or a web server.</li> -<li><b>What are some of the features of .NET Core SDK 2.0?</b> -Some of the features of .NET Core SDK 2.0 are better performance and compatibility with the .NET Framework, support for more languages and platforms, support for more project types and frameworks, support for more tools and editors, and support for more deployment options.</li> -<li><b>How do I check if I have .NET Core SDK 2.0 installed on my machine?</b> -You can use the dotnet --version command to check the version of the .NET Core SDK installed on your machine.</li> -<li><b>How do I update my existing .NET Core SDK to version 2.0?</b> -You can download and install the latest version of the .NET Core SDK from the official website. The new version will overwrite the existing version.</li> -</ul></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Create Amazing Art with Adobe Firefly the AI Art Generator.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Create Amazing Art with Adobe Firefly the AI Art Generator.md deleted file mode 100644 index 56a2e5768f56bf0f25b7a00131629d77388a5a8a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Create Amazing Art with Adobe Firefly the AI Art Generator.md +++ /dev/null @@ -1,74 +0,0 @@ -<br /> -<br> - Step 1: Visit the Adobe Illustrator free trial page and sign in or create an Adobe ID <br> - Step 2: Download and install the Illustrator app on your device <br> - Step 3: Launch Illustrator and start creating beautiful vector art and illustrations | | H2: What Can You Do with Illustrator? | - Create logos, icons, sketches, typography, and complex illustrations for any project <br> - Draw anything with ease using tools that help you create what you envision <br> - Recolor your images with Generative Recolor (beta), a new feature powered by Adobe Firefly generative AI <br> - Explore vector art and line drawing styles to make your designs more unique and expressive | | H2: Frequently Asked Questions about Your Free Trial | - How long does the Illustrator free trial last? <br> - Can I download Illustrator for free? <br> - Will this free Illustrator trial work on macOS and Windows? <br> - Can I download a trial version of Illustrator CS6? <br> - Do students get a discount if they decide to purchase after the free trial? | | H2: Conclusion | - Summarize the main points of the article and encourage the reader to try Illustrator for free | Table 2: Article with HTML formatting <h1>How to Download Adobe Illustrator for Free</h1> -<p>If you are looking for a powerful and versatile tool to create vector art and illustrations, you might want to try Adobe Illustrator. Illustrator is one of the most popular and widely used applications in the Adobe Creative Cloud suite. It allows you to create logos, icons, sketches, typography, and complex illustrations for any project, whether it's online graphics, billboards, product packaging, or anything else.</p> -<h2>adobe download ai</h2><br /><p><b><b>Download File</b> &hArr; <a href="https://ssurll.com/2uNYYR">https://ssurll.com/2uNYYR</a></b></p><br /><br /> -<p>The good news is that you can download and use Illustrator for free for 7 days with a free trial. This way, you can test out all the features and capabilities of Illustrator and see if it suits your needs and preferences. In this article, we will show you how to download Adobe Illustrator for free in three easy steps.</p> -<h2>Step 1: Visit the Adobe Illustrator free trial page and sign in or create an Adobe ID</h2> -<p>The first step is to visit the <a href="(^1^)">Adobe Illustrator free trial page</a>. There, you will see a button that says "Start free trial". Click on it and you will be redirected to a page where you need to sign in or create an Adobe ID. If you already have an Adobe ID, you can sign in with your email address and password. If you don't have an Adobe ID, you can create one by filling out a simple form with your name, email address, password, country, and date of birth.</p> -<h2>Step 2: Download and install the Illustrator app on your device</h2> -<p>After signing in or creating an Adobe ID, you will be taken to a page where you can download the Illustrator app on your device. You can choose between Windows or Mac versions depending on your operating system. The download size is about 2 GB, so make sure you have enough space and a stable internet connection. Once the download is complete, open the file and follow the instructions to install the app on your device.</p> -<h2>Step 3: Launch Illustrator and start creating beautiful vector art and illustrations</h2> -<p>Congratulations! You have successfully downloaded Adobe Illustrator for free. Now you can launch the app and start creating beautiful vector art and illustrations. You will have access to all the features and tools of Illustrator for 7 days. You can also access hundreds of video tutorials for every skill level on the <a href="">Adobe website</a>. You can also get 100 GB of cloud storage, free mobile apps, and file sharing features with your Creative Cloud membership.</p> -<h1>What Can You Do with Illustrator?</h1> -<p>Illustrator is a powerful and versatile tool that can help you create anything you can imagine, from simple to complex. Here are some of the things you can do with Illustrator:</p> -<ul> -<li><b>Create logos, icons, sketches, typography, and complex illustrations for any project.</b> With Illustrator, you can make versatile vector art that you can use at any size and in any design project. You can also use tools like gradients, patterns, brushes, effects, masks, shapes, text, symbols, and more to add style and detail to your designs.</li <li><b>Draw anything with ease using tools that help you create what you envision.</b> With Illustrator, you can draw anything from simple shapes to complex curves with precision and control. You can use tools like the Pen tool, the Curvature tool, the Pencil tool, the Blob Brush tool, and more to create paths and shapes that you can edit and manipulate. You can also use the Image Trace feature to convert raster images into editable vector graphics.</li> -<li><b>Recolor your images with Generative Recolor (beta), a new feature powered by Adobe Firefly generative AI.</b> With Illustrator, you can experiment with different color schemes and harmonies for your images with just a few clicks. You can use the Generative Recolor feature to apply color variations to your images based on different themes, moods, or styles. You can also adjust the brightness, contrast, saturation, and temperature of your colors with sliders. This feature is currently in beta and available only for Creative Cloud members.</li> -<li><b>Explore vector art and line drawing styles to make your designs more unique and expressive.</b> With Illustrator, you can add personality and flair to your designs with different vector art and line drawing styles. You can use tools like the Width tool, the Variable Width Profile tool, the Art Brush tool, the Pattern Brush tool, and more to create custom strokes and brushes that you can apply to your paths and shapes. You can also use tools like the Blend tool, the Shape Builder tool, the Pathfinder panel, and more to combine and transform your shapes in various ways.</li> -</ul> -<h1>Frequently Asked Questions about Your Free Trial</h1> -<p>If you have any questions or doubts about your free trial of Illustrator, you might find the answers below. Here are some of the most frequently asked questions about your free trial:</p> -<h3>How long does the Illustrator free trial last?</h3> -<p>The Illustrator free trial lasts for 7 days from the day you start it. You can cancel it anytime before the 7 days are over and you won't be charged. If you want to continue using Illustrator after the trial period, you will need to purchase a subscription plan.</p> -<h3>Can I download Illustrator for free?</h3> -<p>No, Illustrator is not free. You can download it for free only for a 7-day trial period. After that, you will need to pay a monthly or annual fee to use it. However, there are some free alternatives to Illustrator that you can try, such as Inkscape, Gravit Designer, Vectr, or BoxySVG.</p> -<h3>Will this free Illustrator trial work on macOS and Windows?</h3> -<p>Yes, this free Illustrator trial will work on both macOS and Windows devices. You can download the version that matches your operating system from the Adobe website. You will need a 64-bit processor and at least 8 GB of RAM to run Illustrator smoothly.</p> -<p>adobe illustrator free trial download<br /> -adobe firefly ai art generator<br /> -adobe illustrator vector graphics software<br /> -adobe illustrator download for windows 10<br /> -adobe firefly beta sign up<br /> -adobe illustrator download for mac<br /> -adobe firefly generative recolor tool<br /> -adobe illustrator download for students<br /> -adobe firefly text to image tool<br /> -adobe illustrator download with crack<br /> -adobe firefly generative fill tool<br /> -adobe illustrator download offline installer<br /> -adobe firefly text effects tool<br /> -adobe illustrator download size<br /> -adobe firefly creative generative ai engine<br /> -adobe illustrator download link<br /> -adobe firefly photoshop beta features<br /> -adobe illustrator download apk<br /> -adobe firefly express beta features<br /> -adobe illustrator download old version<br /> -adobe firefly 3d modeling potential<br /> -adobe illustrator download for android<br /> -adobe firefly video editing potential<br /> -adobe illustrator download for chromebook<br /> -adobe firefly responsible development<br /> -adobe illustrator download for ipad<br /> -how to download adobe illustrator for free<br /> -how to use adobe firefly in illustrator<br /> -how to install adobe illustrator on pc<br /> -how to create with adobe firefly in express<br /> -how to update adobe illustrator to latest version<br /> -how to edit images with adobe firefly in photoshop<br /> -how to uninstall adobe illustrator from mac<br /> -how to join the adobe firefly beta program<br /> -how to activate adobe illustrator without internet<br /> -how to learn the basics of adobe firefly tools<br /> -how to fix adobe illustrator not opening issue<br /> -how to watch the replay of the adobe firefly live demos<br /> -how to export files from adobe illustrator in different formats<br /> -how to get started with the new features of the adobe firefly engine</p> -<h3>Can I download a trial version of Illustrator CS6?</h3> -<p>No, you cannot download a trial version of Illustrator CS6 anymore. Adobe has discontinued the support for older versions of Illustrator and other Creative Cloud apps. The latest version of Illustrator is Illustrator 2022 (version 26), which is the only one available for download as a free trial.</p> -<h3>Do students get a discount if they decide to purchase after the free trial?</h3> -<p>Yes, students get a discount if they decide to purchase after the free trial. Adobe offers a special plan for students and teachers that gives them access to all the Creative Cloud apps, including Illustrator, for only $19.99 per month for the first year and $29.99 per month after that. To qualify for this plan, you need to provide proof of eligibility such as a school email address or an ID card.</p> -<h1>Conclusion</h1> -<p>In conclusion, Adobe Illustrator is a powerful and versatile tool that can help you create stunning vector art and illustrations for any project. You can download it for free for 7 days with a free trial and test out all its features and capabilities. If you like it, you can purchase a subscription plan that suits your needs and budget. Alternatively, you can try some of the free alternatives to Illustrator that we mentioned above. Whatever you choose, we hope you enjoy creating beautiful designs with vector graphics.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dominoes Online - Free and Fun Domino Game APK.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dominoes Online - Free and Fun Domino Game APK.md deleted file mode 100644 index b20269e9a82acd402bd295bc52c182792cf1b26a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dominoes Online - Free and Fun Domino Game APK.md +++ /dev/null @@ -1,128 +0,0 @@ - -<h1>Domino - Dominoes Online APK: How to Play the Classic Game on Your Android Device</h1> -<p>Do you love playing dominoes, but don't have a physical set or someone to play with? Do you want to enjoy the fun and challenge of different domino games on your smartphone or tablet? If you answered yes to any of these questions, then you should try <strong>Domino - Dominoes Online APK</strong>, a free app that lets you play six classic domino games online and offline. In this article, we will show you what this app is, why you should download it, how to install it, and how to play it.</p> -<h2>domino - dominoes online apk</h2><br /><p><b><b>Download File</b> <a href="https://ssurll.com/2uNS3H">https://ssurll.com/2uNS3H</a></b></p><br /><br /> -<h2>Introduction</h2> -<p>Dominoes is one of the oldest and most popular board games in the world. It originated in China more than a thousand years ago, and spread to Europe and other continents over time. The game involves matching tiles with the same number of dots on one end, and creating chains or patterns with them. The game can be played by two to four players, and has many variations and rules depending on the region and preference.</p> -<h3>What is Domino - Dominoes Online APK?</h3> -<p><strong>Domino - Dominoes Online APK</strong> is an app that allows you to play six different domino games on your Android device. You can choose from All Fives, Draw Game, Block Game, All Threes, Cross and Kozel. Each game has its own rules and objectives, but they all share the same basic principle of matching tiles and scoring points. You can play online with friends or strangers, or offline with computer or local players. You can also customize your game settings, such as the number of tiles, the difficulty level, the sound effects, and more.</p> -<h3>Why should you download Domino - Dominoes Online APK?</h3> -<p>There are many reasons why you should download <strong>Domino - Dominoes Online APK</strong>. Here are some of them:</p> -<ul> -<li>It is free and easy to use. You don't need to pay anything or register an account to play. You just need to download and install the app, and you are ready to go.</li> -<li>It is fun and challenging. You can enjoy the classic game of dominoes in different modes and levels. You can test your skills and strategy against other players or the computer.</li> -<li>It is social and interactive. You can chat with other players online, send them emojis, invite them to your friends list, or challenge them to a rematch. You can also join our Dominos community and meet new friends.</li> -<li>It is offline-friendly. You don't need an internet connection or wifi to play. You can play offline with computer or local players anytime, anywhere.</li> -</ul> -<h2>How to download and install Domino - Dominoes Online APK</h2> -<p>If you are interested in downloading and installing <strong>Domino - Dominoes Online APK</strong>, here are the steps you need to follow:</p> -<h3>Step 1: Find a reliable source for the APK file</h3> -<p>An APK file is an Android package file that contains all the necessary files for an app to run on your device. You can find many sources for APK files online, but not all of them are safe or trustworthy. Some of them may contain viruses or malware that can harm your device or steal your data. Therefore, you should always download APK files from reputable and verified sources, such as the official website of the app developer, or trusted third-party platforms, such as APKPure, APKMirror, or Uptodown. For example, you can download <strong>Domino - Dominoes Online APK</strong> from this link: [Domino - Dominoes Online APK Download].</p> -<p>domino - dominoes online apk download free<br /> -domino - dominoes online apk mod unlimited money<br /> -domino - dominoes online apk latest version<br /> -domino - dominoes online apk for pc windows 10<br /> -domino - dominoes online apk offline mode<br /> -domino - dominoes online apk hack cheats<br /> -domino - dominoes online apk play with friends<br /> -domino - dominoes online apk no ads<br /> -domino - dominoes online apk skillcap games<br /> -domino - dominoes online apk update 2023<br /> -domino - dominoes online apk review ratings<br /> -domino - dominoes online apk how to play<br /> -domino - dominoes online apk tips tricks<br /> -domino - dominoes online apk best strategy<br /> -domino - dominoes online apk rules variations<br /> -domino - dominoes online apk all fives draw block<br /> -domino - dominoes online apk multiplayer chat<br /> -domino - dominoes online apk leaderboard achievements<br /> -domino - dominoes online apk custom tiles themes<br /> -domino - dominoes online apk sound effects music<br /> -domino - dominoes online apk bug fixes improvements<br /> -domino - dominoes online apk install guide<br /> -domino - dominoes online apk compatible devices<br /> -domino - dominoes online apk file size requirements<br /> -domino - dominoes online apk safe secure<br /> -domino - dominos online apkpure com download link[^1^]<br /> -dominos pizza delivery app vs dominos game app comparison<br /> -best dominos game apps for android ios 2023<br /> -how to win at dominos game app strategies tips tricks<br /> -dominos game app history origin facts trivia<br /> -dominos game app benefits for brain health fun entertainment<br /> -dominos game app challenges tournaments prizes rewards<br /> -dominos game app features options settings preferences<br /> -dominos game app feedback reviews testimonials ratings comments<br /> -dominos game app support contact help faq troubleshooting</p> -<h3>Step 2: Enable unknown sources on your device</h3> -<p>By default, your Android device will not allow you to install apps from sources other than the Google Play Store. This is a security measure to prevent unauthorized or harmful apps from accessing your device. However, if you want to install <strong>Domino - Dominoes Online APK</strong>, you need to enable unknown sources on your device. This will allow you to install apps from APK files that you downloaded from other sources. To do this, follow these steps:</p> -<ol> -<li>Go to your device's settings and tap on security or privacy.</li> -<li>Find the option that says unknown sources or install unknown apps and toggle it on.</li> -<li>A warning message will appear, telling you the risks of installing apps from unknown sources. Tap on OK or allow to confirm.</li> -</ol> -<p>Note: The exact steps may vary depending on your device model and Android version. You can also disable unknown sources after installing the app if you want to.</p> -<h3>Step 3: Download and install the APK file</h3> -<p>Once you have enabled unknown sources on your device, you can proceed to download and install <strong>Domino - Dominoes Online APK</strong>. To do this, follow these steps:</p> -<ol> -<li>Open the browser on your device and go to the link where you downloaded the APK file.</li> -<li>Tap on the download button and wait for the file to be downloaded.</li> -<li>Once the download is complete, tap on the file to open it.</li> -<li>A prompt will appear, asking you if you want to install the app. Tap on install and wait for the installation process to finish.</li> -<li>Once the installation is done, tap on open to launch the app.</li> -</ol> -<p>Congratulations! You have successfully downloaded and installed <strong>Domino - Dominoes Online APK</strong> on your Android device. You can now enjoy playing six classic domino games online and offline.</p> -<h2>How to play Domino - Dominoes Online APK</h2> -<p>Now that you have installed <strong>Domino - Dominoes Online APK</strong>, you might be wondering how to play it. Don't worry, we will guide you through the basics of playing this app. Here are the steps you need to follow:</p> -<h3>Choose a game mode</h3> -<p>The first thing you need to do is choose a game mode. <strong>Domino - Dominoes Online APK</strong> offers six different game modes, each with its own rules and objectives. You can choose from All Fives, Draw Game, Block Game, All Threes, Cross and Kozel. Here is a brief overview of each game mode:</p> - <h4>All Fives</h4> -<p>This is the most popular domino game in the world. The goal is to score points by making the ends of the domino chain add up to a multiple of five. For example, if the ends are 2 and 3, you score 5 points; if they are 4 and 6, you score 10 points; and so on. The first player to reach a certain number of points (usually 100 or 150) wins the game.</p> - <h4>Draw Game</h4> -<p>This is the simplest domino game. The goal is to get rid of all your tiles by matching them with the ends of the domino chain. If you cannot make a move, you have to draw a tile from the boneyard (the pile of unused tiles). The first player who runs out of tiles or has the lowest total value of tiles in their hand when the boneyard is empty wins the game.</p> - <h4>Block Game</h4> -<p>This is similar to Draw Game, except that there is no boneyard. The goal is still to get rid of all your tiles by matching them with the ends of the domino chain. However, if you cannot make a move, you have to pass your turn. The game ends when one player runs out of tiles or when both players cannot make a move. The player who has the lowest total value of tiles in their hand wins the game.</p> - <h4>All Threes</h4> -<p>This is another variation of All Fives, but with a different scoring system. The goal is to score points by making the ends of the domino chain add up to a multiple of three. For example, if the ends are 1 and 2, you score 3 points; if they are 3 and 6, you score 9 points; and so on. The first player to reach a certain number of points (usually 100 or 150) wins the game.</p> - <h4>Cross and Kozel</h4> -<p>This is a Russian domino game that is played with a double-six set. The goal is to score points by making crosses (tiles with the same number on both ends) or kozels (tiles with a blank end). For example, if you play a cross with 5 on both ends, you score 10 points; if you play a kozel with a blank and a 4, you score 4 points; and so on. The first player to reach a certain number of points (usually 100 or 150) wins the game.</p> - <h3>Choose an opponent</h3> -<p>After choosing a game mode, you need to choose an opponent. <strong>Domino - Dominoes Online APK</strong> gives you two options: online or offline. Here is what they mean:</p> - <h4>Online with friends or strangers</h4> -<p>If you choose online, you can play with other players from around the world. You can either join a random game or create your own game and invite your friends. You can also chat with other players, send them emojis, add them to your friends list, or challenge them to a rematch. You can also join our Dominos community and meet new friends.</p> - <h4>Offline with computer or local players</h4> -<p>If you choose offline, you can play with computer or local players. You can either play solo against the computer, or play with up to three other players on the same device. You can also adjust the difficulty level of the computer, from easy to hard. You can also play without an internet connection or wifi.</p> - <h3>Follow the rules and enjoy the game</h3> -<p>The last step is to follow the rules and enjoy the game. Depending on the game mode and the opponent you chose, the rules may vary slightly. However, here are some general rules that apply to all games:</p> -<ul> -<li>The game starts with each player drawing seven tiles from the boneyard (except in Block Game, where there is no boneyard).</li> -<li>The player who has the highest double (a tile with the same number on both ends) goes first and places it on the table.</li> -<li>The next player must place a tile that matches one end of the domino chain. If they cannot, they have to draw a tile from the boneyard (if available) or pass their turn.</li> -<li>The game continues until one player runs out of tiles, or when both players cannot make a move.</li> -<li>The player who runs out of tiles first wins the round and scores points based on the tiles left in their opponent's hand.</li> -<li>The game ends when one player reaches the target score (usually 100 or 150), or when a certain number of rounds are played.</li> -</ul> -<p>That's it! You are now ready to play <strong>Domino - Dominoes Online APK</strong> and have fun with six classic domino games online and offline.</p> -<h2>Conclusion</h2> -<p>In this article, we have shown you what <strong>Domino - Dominoes Online APK</strong> is, why you should download it, how to install it, and how to play it. We hope that you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy playing!</p> -<h2>FAQs</h2> -<p>Here are some frequently asked questions about <strong>Domino - Dominoes Online APK</strong>:</p> - <ol> -<li><strong>Is Domino - Dominoes Online APK safe?</strong></li> -<p>Yes, <strong>Domino - Dominoes Online APK</strong> is safe and secure. It does not contain any viruses or malware that can harm your device or steal your data. However, you should always download it from a reliable source, such as the official website of the app developer, or trusted third-party platforms, such as APKPure, APKMirror, or Uptodown.</p> - <li><strong>Is Domino - Dominoes Online APK free?</strong></li> -<p>Yes, <strong>Domino - Dominoes Online APK</strong> is free and easy to use. You don't need to pay anything or register an account to play. You just need to download and install the app, and you are ready to go.</ p> - <li><strong>How can I update Domino - Dominoes Online APK?</strong></li> -<p>You can update <strong>Domino - Dominoes Online APK</strong> by downloading and installing the latest version of the app from the same source where you downloaded it before. Alternatively, you can enable the auto-update feature on your device, which will automatically download and install the updates for you when they are available.</p> - <li><strong>How can I contact the developer of Domino - Dominoes Online APK?</strong></li> -<p>If you have any questions, suggestions, or feedback about <strong>Domino - Dominoes Online APK</strong>, you can contact the developer of the app by sending an email to <a href="mailto:domino@skillcap.com">domino@skillcap.com</a>. You can also visit their website at <a href="https://skillcap.com/domino">https://skillcap.com/domino</a> or follow them on Facebook at <a href="https://www.facebook.com/DominoesOnline">https://www.facebook.com/DominoesOnline</a>.</p> - <li><strong>What are some tips and tricks for playing Domino - Dominoes Online APK?</strong></li> -<p>Here are some tips and tricks for playing <strong>Domino - Dominoes Online APK</strong> that can help you improve your game and win more matches:</p> -<ul> -<li>Pay attention to the tiles that have been played and the ones that are left in the boneyard. This will help you deduce what tiles your opponent has and what moves they can make.</li> -<li>Try to play tiles that have a high value or a high frequency on both ends. This will give you more options to match and score points.</li> -<li>Try to block your opponent from making moves or scoring points. You can do this by playing tiles that have a low value or a low frequency on both ends, or by creating gaps in the domino chain.</li> -<li>Try to keep a balanced hand of tiles. This will prevent you from getting stuck with tiles that you cannot play or score with.</li> -<li>Have fun and enjoy the game. Don't get frustrated or angry if you lose. Learn from your mistakes and try again.</li> -</ul></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/disco_project/st_disco.py b/spaces/skf15963/summary/fengshen/examples/disco_project/st_disco.py deleted file mode 100644 index 8676ac2763aab65300bdcb588ac74c4e672745d5..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/disco_project/st_disco.py +++ /dev/null @@ -1,56 +0,0 @@ -# from disco_huge import Diffuser -# from utils import * -from disco import Diffuser -import streamlit as st -from io import BytesIO -from PIL import Image -from disco import steps - - -@st.cache(show_spinner=False, allow_output_mutation=True) # 加装饰器, 只加载一次。 -class ST_Diffuser(Diffuser): - def __init__(self, custom_path): - super().__init__(custom_path) - - -if __name__ == '__main__': - dd = ST_Diffuser(custom_path="IDEA-CCNL/Taiyi-Diffusion-532M-Nature") # 初始化 - form = st.form("参数设置") - input_text = form.text_input('输入文本生成图像:', value='', placeholder='你想象的一个画面') - form.form_submit_button("提交") - uploaded_file = st.file_uploader("上传初始化图片(可选)", type=["jpg", "png", "jpeg"]) - - text_scale_norm = st.sidebar.slider('文本强度', 0.1, 1.0, 0.5, step=0.1) - text_scale = int(text_scale_norm * 10000) - res_skip_steps = st.sidebar.slider('加噪强度', 0.1, 1.0, 0.9, step=0.1) - skip_steps = int(steps - round(res_skip_steps * steps)) - width = st.sidebar.slider('宽度', 384, 1024, 512, step=64) - heigth = st.sidebar.slider('高度', 384, 1024, 512, step=64) - - with st.spinner('正在生成中...'): - capture_img = None - if uploaded_file is not None: - # To read file as bytes: - bytes_data = uploaded_file.getvalue() - # 将字节数据转化成字节流 - bytes_data = BytesIO(bytes_data) - # Image.open()可以读字节流 - capture_img = Image.open(bytes_data).convert('RGB').resize((width, heigth)) - - image_status = st.empty() - image_status.image(capture_img, use_column_width=True) - else: - image_status = st.empty() - - if input_text: - # global text_prompts - input_text_prompts = [input_text] - image = dd.generate(input_text_prompts, - capture_img, - clip_guidance_scale=text_scale, - skip_steps=skip_steps, - st_dynamic_image=image_status, - init_scale=None, - side_x=width, - side_y=heigth) # 最终结果。实时显示修改generate里面的内容。 - image_status.image(image, use_column_width=True) diff --git a/spaces/skytnt/lyric-generator-ja/frontend/dist/js/chunk-vendors.f5ab6a81.js b/spaces/skytnt/lyric-generator-ja/frontend/dist/js/chunk-vendors.f5ab6a81.js deleted file mode 100644 index 1ab9acbc324e063c4bbdae7c7c8553563bee9f6b..0000000000000000000000000000000000000000 --- a/spaces/skytnt/lyric-generator-ja/frontend/dist/js/chunk-vendors.f5ab6a81.js +++ /dev/null @@ -1,19 +0,0 @@ -(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([["chunk-vendors"],{"00b4":function(t,e,n){"use strict";n("ac1f");var i=n("23e7"),r=n("da84"),o=n("c65b"),a=n("e330"),s=n("1626"),c=n("861d"),u=function(){var t=!1,e=/[ac]/;return e.exec=function(){return t=!0,/./.exec.apply(this,arguments)},!0===e.test("abc")&&t}(),l=r.Error,f=a(/./.test);i({target:"RegExp",proto:!0,forced:!u},{test:function(t){var e=this.exec;if(!s(e))return f(this,t);var n=o(e,this,t);if(null!==n&&!c(n))throw new l("RegExp exec method returned something other than an Object or null");return!!n}})},"00ee":function(t,e,n){var i=n("b622"),r=i("toStringTag"),o={};o[r]="z",t.exports="[object z]"===String(o)},"01b4":function(t,e){var n=function(){this.head=null,this.tail=null};n.prototype={add:function(t){var e={item:t,next:null};this.head?this.tail.next=e:this.head=e,this.tail=e},get:function(){var t=this.head;if(t)return this.head=t.next,this.tail===t&&(this.tail=null),t.item}},t.exports=n},"0366":function(t,e,n){var i=n("e330"),r=n("59ed"),o=n("40d5"),a=i(i.bind);t.exports=function(t,e){return r(t),void 0===e?t:o?a(t,e):function(){return t.apply(e,arguments)}}},"0481":function(t,e,n){"use strict";var i=n("23e7"),r=n("a2bf"),o=n("7b0b"),a=n("07fa"),s=n("5926"),c=n("65f0");i({target:"Array",proto:!0},{flat:function(){var t=arguments.length?arguments[0]:void 0,e=o(this),n=a(e),i=c(e,0);return i.length=r(i,e,e,n,0,void 0===t?1:s(t)),i}})},"04d1":function(t,e,n){var i=n("342f"),r=i.match(/firefox\/(\d+)/i);t.exports=!!r&&+r[1]},"0538":function(t,e,n){"use strict";var i=n("da84"),r=n("e330"),o=n("59ed"),a=n("861d"),s=n("1a2d"),c=n("f36a"),u=n("40d5"),l=i.Function,f=r([].concat),h=r([].join),d={},p=function(t,e,n){if(!s(d,e)){for(var i=[],r=0;r<e;r++)i[r]="a["+r+"]";d[e]=l("C,a","return new C("+h(i,",")+")")}return d[e](t,n)};t.exports=u?l.bind:function(t){var e=o(this),n=e.prototype,i=c(arguments,1),r=function(){var n=f(i,c(arguments));return this instanceof r?p(e,n.length,n):e.apply(t,n)};return a(n)&&(r.prototype=n),r}},"057f":function(t,e,n){var i=n("c6b6"),r=n("fc6a"),o=n("241c").f,a=n("4dae"),s="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],c=function(t){try{return o(t)}catch(e){return a(s)}};t.exports.f=function(t){return s&&"Window"==i(t)?c(t):o(r(t))}},"06c5":function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));n("fb6a"),n("d3b7"),n("b0c0"),n("a630"),n("3ca3"),n("ac1f"),n("00b4");var i=n("6b75");function r(t,e){if(t){if("string"===typeof t)return Object(i["a"])(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?Object(i["a"])(t,e):void 0}}},"06cf":function(t,e,n){var i=n("83ab"),r=n("c65b"),o=n("d1e7"),a=n("5c6c"),s=n("fc6a"),c=n("a04b"),u=n("1a2d"),l=n("0cfb"),f=Object.getOwnPropertyDescriptor;e.f=i?f:function(t,e){if(t=s(t),e=c(e),l)try{return f(t,e)}catch(n){}if(u(t,e))return a(!r(o.f,t,e),t[e])}},"0789":function(t,e,n){"use strict";n.d(e,"c",(function(){return l})),n.d(e,"d",(function(){return f})),n.d(e,"e",(function(){return h})),n.d(e,"a",(function(){return d})),n.d(e,"b",(function(){return p}));n("99af");var i=n("d9f7");function r(){for(var t,e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length,i=new Array(n>1?n-1:0),r=1;r<n;r++)i[r-1]=arguments[r];return(t=Array()).concat.apply(t,[e].concat(i))}function o(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"top center 0",n=arguments.length>2?arguments[2]:void 0;return{name:t,functional:!0,props:{group:{type:Boolean,default:!1},hideOnLeave:{type:Boolean,default:!1},leaveAbsolute:{type:Boolean,default:!1},mode:{type:String,default:n},origin:{type:String,default:e}},render:function(e,n){var o="transition".concat(n.props.group?"-group":""),a={props:{name:t,mode:n.props.mode},on:{beforeEnter:function(t){t.style.transformOrigin=n.props.origin,t.style.webkitTransformOrigin=n.props.origin}}};return n.props.leaveAbsolute&&(a.on.leave=r(a.on.leave,(function(t){var e=t.offsetTop,n=t.offsetLeft,i=t.offsetWidth,r=t.offsetHeight;t._transitionInitialStyles={position:t.style.position,top:t.style.top,left:t.style.left,width:t.style.width,height:t.style.height},t.style.position="absolute",t.style.top=e+"px",t.style.left=n+"px",t.style.width=i+"px",t.style.height=r+"px"})),a.on.afterLeave=r(a.on.afterLeave,(function(t){if(t&&t._transitionInitialStyles){var e=t._transitionInitialStyles,n=e.position,i=e.top,r=e.left,o=e.width,a=e.height;delete t._transitionInitialStyles,t.style.position=n||"",t.style.top=i||"",t.style.left=r||"",t.style.width=o||"",t.style.height=a||""}}))),n.props.hideOnLeave&&(a.on.leave=r(a.on.leave,(function(t){t.style.setProperty("display","none","important")}))),e(o,Object(i["a"])(n.data,a),n.children)}}}function a(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"in-out";return{name:t,functional:!0,props:{mode:{type:String,default:n}},render:function(n,r){return n("transition",Object(i["a"])(r.data,{props:{name:t},on:e}),r.children)}}}var s=n("ade3"),c=n("80d2"),u=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=e?"width":"height",i="offset".concat(Object(c["u"])(n));return{beforeEnter:function(t){t._parent=t.parentNode,t._initialStyle=Object(s["a"])({transition:t.style.transition,overflow:t.style.overflow},n,t.style[n])},enter:function(e){var r=e._initialStyle;e.style.setProperty("transition","none","important"),e.style.overflow="hidden";var o="".concat(e[i],"px");e.style[n]="0",e.offsetHeight,e.style.transition=r.transition,t&&e._parent&&e._parent.classList.add(t),requestAnimationFrame((function(){e.style[n]=o}))},afterEnter:o,enterCancelled:o,leave:function(t){t._initialStyle=Object(s["a"])({transition:"",overflow:t.style.overflow},n,t.style[n]),t.style.overflow="hidden",t.style[n]="".concat(t[i],"px"),t.offsetHeight,requestAnimationFrame((function(){return t.style[n]="0"}))},afterLeave:r,leaveCancelled:r};function r(e){t&&e._parent&&e._parent.classList.remove(t),o(e)}function o(t){var e=t._initialStyle[n];t.style.overflow=t._initialStyle.overflow,null!=e&&(t.style[n]=e),delete t._initialStyle}},l=(o("carousel-transition"),o("carousel-reverse-transition"),o("tab-transition"),o("tab-reverse-transition"),o("menu-transition"),o("fab-transition","center center","out-in"),o("dialog-transition"),o("dialog-bottom-transition"),o("dialog-top-transition"),o("fade-transition")),f=(o("scale-transition"),o("scroll-x-transition")),h=(o("scroll-x-reverse-transition"),o("scroll-y-transition"),o("scroll-y-reverse-transition"),o("slide-x-transition")),d=(o("slide-x-reverse-transition"),o("slide-y-transition"),o("slide-y-reverse-transition"),a("expand-transition",u())),p=a("expand-x-transition",u("",!0))},"07ac":function(t,e,n){var i=n("23e7"),r=n("6f53").values;i({target:"Object",stat:!0},{values:function(t){return r(t)}})},"07fa":function(t,e,n){var i=n("50c4");t.exports=function(t){return i(t.length)}},"0a06":function(t,e,n){"use strict";var i=n("2444"),r=n("c532"),o=n("f6b4"),a=n("5270");function s(t){this.defaults=t,this.interceptors={request:new o,response:new o}}s.prototype.request=function(t){"string"===typeof t&&(t=r.merge({url:arguments[0]},arguments[1])),t=r.merge(i,{method:"get"},this.defaults,t),t.method=t.method.toLowerCase();var e=[a,void 0],n=Promise.resolve(t);this.interceptors.request.forEach((function(t){e.unshift(t.fulfilled,t.rejected)})),this.interceptors.response.forEach((function(t){e.push(t.fulfilled,t.rejected)}));while(e.length)n=n.then(e.shift(),e.shift());return n},r.forEach(["delete","get","head","options"],(function(t){s.prototype[t]=function(e,n){return this.request(r.merge(n||{},{method:t,url:e}))}})),r.forEach(["post","put","patch"],(function(t){s.prototype[t]=function(e,n,i){return this.request(r.merge(i||{},{method:t,url:e,data:n}))}})),t.exports=s},"0b42":function(t,e,n){var i=n("da84"),r=n("e8b5"),o=n("68ee"),a=n("861d"),s=n("b622"),c=s("species"),u=i.Array;t.exports=function(t){var e;return r(t)&&(e=t.constructor,o(e)&&(e===u||r(e.prototype))?e=void 0:a(e)&&(e=e[c],null===e&&(e=void 0))),void 0===e?u:e}},"0bc6":function(t,e,n){},"0cb2":function(t,e,n){var i=n("e330"),r=n("7b0b"),o=Math.floor,a=i("".charAt),s=i("".replace),c=i("".slice),u=/\$([$&'`]|\d{1,2}|<[^>]*>)/g,l=/\$([$&'`]|\d{1,2})/g;t.exports=function(t,e,n,i,f,h){var d=n+t.length,p=i.length,v=l;return void 0!==f&&(f=r(f),v=u),s(h,v,(function(r,s){var u;switch(a(s,0)){case"$":return"$";case"&":return t;case"`":return c(e,0,n);case"'":return c(e,d);case"<":u=f[c(s,1,-1)];break;default:var l=+s;if(0===l)return r;if(l>p){var h=o(l/10);return 0===h?r:h<=p?void 0===i[h-1]?a(s,1):i[h-1]+a(s,1):r}u=i[l-1]}return void 0===u?"":u}))}},"0cfb":function(t,e,n){var i=n("83ab"),r=n("d039"),o=n("cc12");t.exports=!i&&!r((function(){return 7!=Object.defineProperty(o("div"),"a",{get:function(){return 7}}).a}))},"0d51":function(t,e,n){var i=n("da84"),r=i.String;t.exports=function(t){try{return r(t)}catch(e){return"Object"}}},"0df6":function(t,e,n){"use strict";t.exports=function(t){return function(e){return t.apply(null,e)}}},"0fd9":function(t,e,n){"use strict";var i=n("ade3"),r=n("5530"),o=(n("d3b7"),n("caad"),n("2532"),n("99af"),n("b64b"),n("ac1f"),n("5319"),n("4ec9"),n("3ca3"),n("ddb0"),n("159b"),n("4b85"),n("2b0e")),a=n("d9f7"),s=n("80d2"),c=["sm","md","lg","xl"],u=["start","end","center"];function l(t,e){return c.reduce((function(n,i){return n[t+Object(s["u"])(i)]=e(),n}),{})}var f=function(t){return[].concat(u,["baseline","stretch"]).includes(t)},h=l("align",(function(){return{type:String,default:null,validator:f}})),d=function(t){return[].concat(u,["space-between","space-around"]).includes(t)},p=l("justify",(function(){return{type:String,default:null,validator:d}})),v=function(t){return[].concat(u,["space-between","space-around","stretch"]).includes(t)},m=l("alignContent",(function(){return{type:String,default:null,validator:v}})),g={align:Object.keys(h),justify:Object.keys(p),alignContent:Object.keys(m)},b={align:"align",justify:"justify",alignContent:"align-content"};function y(t,e,n){var i=b[t];if(null!=n){if(e){var r=e.replace(t,"");i+="-".concat(r)}return i+="-".concat(n),i.toLowerCase()}}var x=new Map;e["a"]=o["a"].extend({name:"v-row",functional:!0,props:Object(r["a"])(Object(r["a"])(Object(r["a"])({tag:{type:String,default:"div"},dense:Boolean,noGutters:Boolean,align:{type:String,default:null,validator:f}},h),{},{justify:{type:String,default:null,validator:d}},p),{},{alignContent:{type:String,default:null,validator:v}},m),render:function(t,e){var n=e.props,r=e.data,o=e.children,s="";for(var c in n)s+=String(n[c]);var u=x.get(s);return u||function(){var t,e;for(e in u=[],g)g[e].forEach((function(t){var i=n[t],r=y(e,t,i);r&&u.push(r)}));u.push((t={"no-gutters":n.noGutters,"row--dense":n.dense},Object(i["a"])(t,"align-".concat(n.align),n.align),Object(i["a"])(t,"justify-".concat(n.justify),n.justify),Object(i["a"])(t,"align-content-".concat(n.alignContent),n.alignContent),t)),x.set(s,u)}(),t(n.tag,Object(a["a"])(r,{staticClass:"row",class:u}),o)}})},"107c":function(t,e,n){var i=n("d039"),r=n("da84"),o=r.RegExp;t.exports=i((function(){var t=o("(?<a>b)","g");return"b"!==t.exec("b").groups.a||"bc"!=="b".replace(t,"$<a>c")}))},1148:function(t,e,n){"use strict";var i=n("da84"),r=n("5926"),o=n("577e"),a=n("1d80"),s=i.RangeError;t.exports=function(t){var e=o(a(this)),n="",i=r(t);if(i<0||i==1/0)throw s("Wrong number of repetitions");for(;i>0;(i>>>=1)&&(e+=e))1&i&&(n+=e);return n}},1276:function(t,e,n){"use strict";var i=n("2ba4"),r=n("c65b"),o=n("e330"),a=n("d784"),s=n("44e7"),c=n("825a"),u=n("1d80"),l=n("4840"),f=n("8aa5"),h=n("50c4"),d=n("577e"),p=n("dc4a"),v=n("4dae"),m=n("14c3"),g=n("9263"),b=n("9f7f"),y=n("d039"),x=b.UNSUPPORTED_Y,w=4294967295,O=Math.min,_=[].push,S=o(/./.exec),C=o(_),k=o("".slice),j=!y((function(){var t=/(?:)/,e=t.exec;t.exec=function(){return e.apply(this,arguments)};var n="ab".split(t);return 2!==n.length||"a"!==n[0]||"b"!==n[1]}));a("split",(function(t,e,n){var o;return o="c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length?function(t,n){var o=d(u(this)),a=void 0===n?w:n>>>0;if(0===a)return[];if(void 0===t)return[o];if(!s(t))return r(e,o,t,a);var c,l,f,h=[],p=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),m=0,b=new RegExp(t.source,p+"g");while(c=r(g,b,o)){if(l=b.lastIndex,l>m&&(C(h,k(o,m,c.index)),c.length>1&&c.index<o.length&&i(_,h,v(c,1)),f=c[0].length,m=l,h.length>=a))break;b.lastIndex===c.index&&b.lastIndex++}return m===o.length?!f&&S(b,"")||C(h,""):C(h,k(o,m)),h.length>a?v(h,0,a):h}:"0".split(void 0,0).length?function(t,n){return void 0===t&&0===n?[]:r(e,this,t,n)}:e,[function(e,n){var i=u(this),a=void 0==e?void 0:p(e,t);return a?r(a,e,i,n):r(o,d(i),e,n)},function(t,i){var r=c(this),a=d(t),s=n(o,r,a,i,o!==e);if(s.done)return s.value;var u=l(r,RegExp),p=r.unicode,v=(r.ignoreCase?"i":"")+(r.multiline?"m":"")+(r.unicode?"u":"")+(x?"g":"y"),g=new u(x?"^(?:"+r.source+")":r,v),b=void 0===i?w:i>>>0;if(0===b)return[];if(0===a.length)return null===m(g,a)?[a]:[];var y=0,_=0,S=[];while(_<a.length){g.lastIndex=x?0:_;var j,$=m(g,x?k(a,_):a);if(null===$||(j=O(h(g.lastIndex+(x?_:0)),a.length))===y)_=f(a,_,p);else{if(C(S,k(a,y,_)),S.length===b)return S;for(var A=1;A<=$.length-1;A++)if(C(S,$[A]),S.length===b)return S;_=y=j}}return C(S,k(a,y)),S}]}),!j,x)},"129f":function(t,e){t.exports=Object.is||function(t,e){return t===e?0!==t||1/t===1/e:t!=t&&e!=e}},"132d":function(t,e,n){"use strict";var i,r=n("5530"),o=(n("c96a"),n("d3b7"),n("caad"),n("2532"),n("ac1f"),n("00b4"),n("a9e3"),n("498a"),n("7db0"),n("fb6a"),n("4804"),n("7e2b")),a=n("a9ad"),s=n("af2b"),c=n("7560"),u=n("80d2"),l=n("2b0e"),f=n("58df");function h(t){return["fas","far","fal","fab","fad","fak"].some((function(e){return t.includes(e)}))}function d(t){return/^[mzlhvcsqta]\s*[-+.0-9][^mlhvzcsqta]+/i.test(t)&&/[\dz]$/i.test(t)&&t.length>4}(function(t){t["xSmall"]="12px",t["small"]="16px",t["default"]="24px",t["medium"]="28px",t["large"]="36px",t["xLarge"]="40px"})(i||(i={}));var p=Object(f["a"])(o["a"],a["a"],s["a"],c["a"]).extend({name:"v-icon",props:{dense:Boolean,disabled:Boolean,left:Boolean,right:Boolean,size:[Number,String],tag:{type:String,required:!1,default:"i"}},computed:{medium:function(){return!1},hasClickListener:function(){return Boolean(this.listeners$.click||this.listeners$["!click"])}},methods:{getIcon:function(){var t="";return this.$slots.default&&(t=this.$slots.default[0].text.trim()),Object(u["t"])(this,t)},getSize:function(){var t={xSmall:this.xSmall,small:this.small,medium:this.medium,large:this.large,xLarge:this.xLarge},e=Object(u["q"])(t).find((function(e){return t[e]}));return e&&i[e]||Object(u["d"])(this.size)},getDefaultData:function(){return{staticClass:"v-icon notranslate",class:{"v-icon--disabled":this.disabled,"v-icon--left":this.left,"v-icon--link":this.hasClickListener,"v-icon--right":this.right,"v-icon--dense":this.dense},attrs:Object(r["a"])({"aria-hidden":!this.hasClickListener,disabled:this.hasClickListener&&this.disabled,type:this.hasClickListener?"button":void 0},this.attrs$),on:this.listeners$}},getSvgWrapperData:function(){var t=this.getSize(),e=Object(r["a"])(Object(r["a"])({},this.getDefaultData()),{},{style:t?{fontSize:t,height:t,width:t}:void 0});return this.applyColors(e),e},applyColors:function(t){t.class=Object(r["a"])(Object(r["a"])({},t.class),this.themeClasses),this.setTextColor(this.color,t)},renderFontIcon:function(t,e){var n=[],i=this.getDefaultData(),r="material-icons",o=t.indexOf("-"),a=o<=-1;a?n.push(t):(r=t.slice(0,o),h(r)&&(r="")),i.class[r]=!0,i.class[t]=!a;var s=this.getSize();return s&&(i.style={fontSize:s}),this.applyColors(i),e(this.hasClickListener?"button":this.tag,i,n)},renderSvgIcon:function(t,e){var n={class:"v-icon__svg",attrs:{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",role:"img","aria-hidden":!0}},i=this.getSize();return i&&(n.style={fontSize:i,height:i,width:i}),e(this.hasClickListener?"button":"span",this.getSvgWrapperData(),[e("svg",n,[e("path",{attrs:{d:t}})])])},renderSvgIconComponent:function(t,e){var n={class:{"v-icon__component":!0}},i=this.getSize();i&&(n.style={fontSize:i,height:i,width:i}),this.applyColors(n);var r=t.component;return n.props=t.props,n.nativeOn=n.on,e(this.hasClickListener?"button":"span",this.getSvgWrapperData(),[e(r,n)])}},render:function(t){var e=this.getIcon();return"string"===typeof e?d(e)?this.renderSvgIcon(e,t):this.renderFontIcon(e,t):this.renderSvgIconComponent(e,t)}});e["a"]=l["a"].extend({name:"v-icon",$_wrapperFor:p,functional:!0,render:function(t,e){var n=e.data,i=e.children,r="";return n.domProps&&(r=n.domProps.textContent||n.domProps.innerHTML||r,delete n.domProps.textContent,delete n.domProps.innerHTML),t(p,n,r?[r]:i)}})},"13d2":function(t,e,n){var i=n("d039"),r=n("1626"),o=n("1a2d"),a=n("83ab"),s=n("5e77").CONFIGURABLE,c=n("8925"),u=n("69f3"),l=u.enforce,f=u.get,h=Object.defineProperty,d=a&&!i((function(){return 8!==h((function(){}),"length",{value:8}).length})),p=String(String).split("String"),v=t.exports=function(t,e,n){if("Symbol("===String(e).slice(0,7)&&(e="["+String(e).replace(/^Symbol\(([^)]*)\)/,"$1")+"]"),n&&n.getter&&(e="get "+e),n&&n.setter&&(e="set "+e),(!o(t,"name")||s&&t.name!==e)&&h(t,"name",{value:e,configurable:!0}),d&&n&&o(n,"arity")&&t.length!==n.arity&&h(t,"length",{value:n.arity}),n&&o(n,"constructor")&&n.constructor){if(a)try{h(t,"prototype",{writable:!1})}catch(r){}}else t.prototype=void 0;var i=l(t);return o(i,"source")||(i.source=p.join("string"==typeof e?e:"")),t};Function.prototype.toString=v((function(){return r(this)&&f(this).source||c(this)}),"toString")},"14c3":function(t,e,n){var i=n("da84"),r=n("c65b"),o=n("825a"),a=n("1626"),s=n("c6b6"),c=n("9263"),u=i.TypeError;t.exports=function(t,e){var n=t.exec;if(a(n)){var i=r(n,t,e);return null!==i&&o(i),i}if("RegExp"===s(t))return r(c,t,e);throw u("RegExp#exec called on incompatible receiver")}},"14e5":function(t,e,n){"use strict";var i=n("23e7"),r=n("c65b"),o=n("59ed"),a=n("f069"),s=n("e667"),c=n("2266"),u=n("5eed");i({target:"Promise",stat:!0,forced:u},{all:function(t){var e=this,n=a.f(e),i=n.resolve,u=n.reject,l=s((function(){var n=o(e.resolve),a=[],s=0,l=1;c(t,(function(t){var o=s++,c=!1;l++,r(n,e,t).then((function(t){c||(c=!0,a[o]=t,--l||i(a))}),u)})),--l||i(a)}));return l.error&&u(l.value),n.promise}})},"159b":function(t,e,n){var i=n("da84"),r=n("fdbc"),o=n("785a"),a=n("17c2"),s=n("9112"),c=function(t){if(t&&t.forEach!==a)try{s(t,"forEach",a)}catch(e){t.forEach=a}};for(var u in r)r[u]&&c(i[u]&&i[u].prototype);c(o)},"15fd":function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));n("a4d3"),n("b64b");function i(t,e){if(null==t)return{};var n,i,r={},o=Object.keys(t);for(i=0;i<o.length;i++)n=o[i],e.indexOf(n)>=0||(r[n]=t[n]);return r}function r(t,e){if(null==t)return{};var n,r,o=i(t,e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);for(r=0;r<a.length;r++)n=a[r],e.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(t,n)&&(o[n]=t[n])}return o}},1626:function(t,e){t.exports=function(t){return"function"==typeof t}},"166a":function(t,e,n){},1681:function(t,e,n){},"17c2":function(t,e,n){"use strict";var i=n("b727").forEach,r=n("a640"),o=r("forEach");t.exports=o?[].forEach:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}},"18a5":function(t,e,n){"use strict";var i=n("23e7"),r=n("857a"),o=n("af03");i({target:"String",proto:!0,forced:o("anchor")},{anchor:function(t){return r(this,"a","name",t)}})},"19aa":function(t,e,n){var i=n("da84"),r=n("3a9b"),o=i.TypeError;t.exports=function(t,e){if(r(e,t))return t;throw o("Incorrect invocation")}},"1a2d":function(t,e,n){var i=n("e330"),r=n("7b0b"),o=i({}.hasOwnProperty);t.exports=Object.hasOwn||function(t,e){return o(r(t),e)}},"1b2c":function(t,e,n){},"1be4":function(t,e,n){var i=n("d066");t.exports=i("document","documentElement")},"1c7e":function(t,e,n){var i=n("b622"),r=i("iterator"),o=!1;try{var a=0,s={next:function(){return{done:!!a++}},return:function(){o=!0}};s[r]=function(){return this},Array.from(s,(function(){throw 2}))}catch(c){}t.exports=function(t,e){if(!e&&!o)return!1;var n=!1;try{var i={};i[r]=function(){return{next:function(){return{done:n=!0}}}},t(i)}catch(c){}return n}},"1c87":function(t,e,n){"use strict";var i=n("ade3"),r=n("5530"),o=(n("9911"),n("498a"),n("99af"),n("ac1f"),n("5319"),n("2b0e")),a=n("5607"),s=n("80d2");e["a"]=o["a"].extend({name:"routable",directives:{Ripple:a["a"]},props:{activeClass:String,append:Boolean,disabled:Boolean,exact:{type:Boolean,default:void 0},exactPath:Boolean,exactActiveClass:String,link:Boolean,href:[String,Object],to:[String,Object],nuxt:Boolean,replace:Boolean,ripple:{type:[Boolean,Object],default:null},tag:String,target:String},data:function(){return{isActive:!1,proxyClass:""}},computed:{classes:function(){var t={};return this.to||(this.activeClass&&(t[this.activeClass]=this.isActive),this.proxyClass&&(t[this.proxyClass]=this.isActive)),t},computedRipple:function(){var t;return null!=(t=this.ripple)?t:!this.disabled&&this.isClickable},isClickable:function(){return!this.disabled&&Boolean(this.isLink||this.$listeners.click||this.$listeners["!click"]||this.$attrs.tabindex)},isLink:function(){return this.to||this.href||this.link},styles:function(){return{}}},watch:{$route:"onRouteChange"},mounted:function(){this.onRouteChange()},methods:{generateRouteLink:function(){var t,e,n=this.exact,o=(t={attrs:{tabindex:"tabindex"in this.$attrs?this.$attrs.tabindex:void 0},class:this.classes,style:this.styles,props:{},directives:[{name:"ripple",value:this.computedRipple}]},Object(i["a"])(t,this.to?"nativeOn":"on",Object(r["a"])(Object(r["a"])({},this.$listeners),"click"in this?{click:this.click}:void 0)),Object(i["a"])(t,"ref","link"),t);if("undefined"===typeof this.exact&&(n="/"===this.to||this.to===Object(this.to)&&"/"===this.to.path),this.to){var a=this.activeClass,s=this.exactActiveClass||a;this.proxyClass&&(a="".concat(a," ").concat(this.proxyClass).trim(),s="".concat(s," ").concat(this.proxyClass).trim()),e=this.nuxt?"nuxt-link":"router-link",Object.assign(o.props,{to:this.to,exact:n,exactPath:this.exactPath,activeClass:a,exactActiveClass:s,append:this.append,replace:this.replace})}else e=(this.href?"a":this.tag)||"div","a"===e&&this.href&&(o.attrs.href=this.href);return this.target&&(o.attrs.target=this.target),{tag:e,data:o}},onRouteChange:function(){var t=this;if(this.to&&this.$refs.link&&this.$route){var e="".concat(this.activeClass||""," ").concat(this.proxyClass||"").trim(),n="".concat(this.exactActiveClass||""," ").concat(this.proxyClass||"").trim()||e,i="_vnode.data.class."+(this.exact?n:e);this.$nextTick((function(){!Object(s["j"])(t.$refs.link,i)===t.isActive&&t.toggle()}))}},toggle:function(){this.isActive=!this.isActive}}})},"1cdc":function(t,e,n){var i=n("342f");t.exports=/(?:ipad|iphone|ipod).*applewebkit/i.test(i)},"1d2b":function(t,e,n){"use strict";t.exports=function(t,e){return function(){for(var n=new Array(arguments.length),i=0;i<n.length;i++)n[i]=arguments[i];return t.apply(e,n)}}},"1d80":function(t,e,n){var i=n("da84"),r=i.TypeError;t.exports=function(t){if(void 0==t)throw r("Can't call method on "+t);return t}},"1da1":function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));n("d3b7");function i(t,e,n,i,r,o,a){try{var s=t[o](a),c=s.value}catch(u){return void n(u)}s.done?e(c):Promise.resolve(c).then(i,r)}function r(t){return function(){var e=this,n=arguments;return new Promise((function(r,o){var a=t.apply(e,n);function s(t){i(a,r,o,s,c,"next",t)}function c(t){i(a,r,o,s,c,"throw",t)}s(void 0)}))}}},"1dde":function(t,e,n){var i=n("d039"),r=n("b622"),o=n("2d00"),a=r("species");t.exports=function(t){return o>=51||!i((function(){var e=[],n=e.constructor={};return n[a]=function(){return{foo:1}},1!==e[t](Boolean).foo}))}},"1f4f":function(t,e,n){"use strict";var i=n("5530"),r=(n("a9e3"),n("8b37"),n("80d2")),o=n("7560"),a=n("58df");e["a"]=Object(a["a"])(o["a"]).extend({name:"v-simple-table",props:{dense:Boolean,fixedHeader:Boolean,height:[Number,String]},computed:{classes:function(){return Object(i["a"])({"v-data-table--dense":this.dense,"v-data-table--fixed-height":!!this.height&&!this.fixedHeader,"v-data-table--fixed-header":this.fixedHeader,"v-data-table--has-top":!!this.$slots.top,"v-data-table--has-bottom":!!this.$slots.bottom},this.themeClasses)}},methods:{genWrapper:function(){return this.$slots.wrapper||this.$createElement("div",{staticClass:"v-data-table__wrapper",style:{height:Object(r["d"])(this.height)}},[this.$createElement("table",this.$slots.default)])}},render:function(t){return t("div",{staticClass:"v-data-table",class:this.classes},[this.$slots.top,this.genWrapper(),this.$slots.bottom])}})},"20f6":function(t,e,n){},2266:function(t,e,n){var i=n("da84"),r=n("0366"),o=n("c65b"),a=n("825a"),s=n("0d51"),c=n("e95a"),u=n("07fa"),l=n("3a9b"),f=n("9a1f"),h=n("35a1"),d=n("2a62"),p=i.TypeError,v=function(t,e){this.stopped=t,this.result=e},m=v.prototype;t.exports=function(t,e,n){var i,g,b,y,x,w,O,_=n&&n.that,S=!(!n||!n.AS_ENTRIES),C=!(!n||!n.IS_ITERATOR),k=!(!n||!n.INTERRUPTED),j=r(e,_),$=function(t){return i&&d(i,"normal",t),new v(!0,t)},A=function(t){return S?(a(t),k?j(t[0],t[1],$):j(t[0],t[1])):k?j(t,$):j(t)};if(C)i=t;else{if(g=h(t),!g)throw p(s(t)+" is not iterable");if(c(g)){for(b=0,y=u(t);y>b;b++)if(x=A(t[b]),x&&l(m,x))return x;return new v(!1)}i=f(t,g)}w=i.next;while(!(O=o(w,i)).done){try{x=A(O.value)}catch(E){d(i,"throw",E)}if("object"==typeof x&&x&&l(m,x))return x}return new v(!1)}},"22da":function(t,e,n){"use strict";var i=n("490a");e["a"]=i["a"]},"23cb":function(t,e,n){var i=n("5926"),r=Math.max,o=Math.min;t.exports=function(t,e){var n=i(t);return n<0?r(n+e,0):o(n,e)}},"23e7":function(t,e,n){var i=n("da84"),r=n("06cf").f,o=n("9112"),a=n("cb2d"),s=n("ce4e"),c=n("e893"),u=n("94ca");t.exports=function(t,e){var n,l,f,h,d,p,v=t.target,m=t.global,g=t.stat;if(l=m?i:g?i[v]||s(v,{}):(i[v]||{}).prototype,l)for(f in e){if(d=e[f],t.noTargetGet?(p=r(l,f),h=p&&p.value):h=l[f],n=u(m?f:v+(g?".":"#")+f,t.forced),!n&&void 0!==h){if(typeof d==typeof h)continue;c(d,h)}(t.sham||h&&h.sham)&&o(d,"sham",!0),a(l,f,d,t)}}},"241c":function(t,e,n){var i=n("ca84"),r=n("7839"),o=r.concat("length","prototype");e.f=Object.getOwnPropertyNames||function(t){return i(t,o)}},2444:function(t,e,n){"use strict";(function(e){var i=n("c532"),r=n("c8af"),o={"Content-Type":"application/x-www-form-urlencoded"};function a(t,e){!i.isUndefined(t)&&i.isUndefined(t["Content-Type"])&&(t["Content-Type"]=e)}function s(){var t;return("undefined"!==typeof XMLHttpRequest||"undefined"!==typeof e)&&(t=n("b50d")),t}var c={adapter:s(),transformRequest:[function(t,e){return r(e,"Content-Type"),i.isFormData(t)||i.isArrayBuffer(t)||i.isBuffer(t)||i.isStream(t)||i.isFile(t)||i.isBlob(t)?t:i.isArrayBufferView(t)?t.buffer:i.isURLSearchParams(t)?(a(e,"application/x-www-form-urlencoded;charset=utf-8"),t.toString()):i.isObject(t)?(a(e,"application/json;charset=utf-8"),JSON.stringify(t)):t}],transformResponse:[function(t){if("string"===typeof t)try{t=JSON.parse(t)}catch(e){}return t}],timeout:0,xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",maxContentLength:-1,validateStatus:function(t){return t>=200&&t<300},headers:{common:{Accept:"application/json, text/plain, */*"}}};i.forEach(["delete","get","head"],(function(t){c.headers[t]={}})),i.forEach(["post","put","patch"],(function(t){c.headers[t]=i.merge(o)})),t.exports=c}).call(this,n("4362"))},"24b2":function(t,e,n){"use strict";n("a9e3");var i=n("80d2"),r=n("2b0e");e["a"]=r["a"].extend({name:"measurable",props:{height:[Number,String],maxHeight:[Number,String],maxWidth:[Number,String],minHeight:[Number,String],minWidth:[Number,String],width:[Number,String]},computed:{measurableStyles:function(){var t={},e=Object(i["d"])(this.height),n=Object(i["d"])(this.minHeight),r=Object(i["d"])(this.minWidth),o=Object(i["d"])(this.maxHeight),a=Object(i["d"])(this.maxWidth),s=Object(i["d"])(this.width);return e&&(t.height=e),n&&(t.minHeight=n),r&&(t.minWidth=r),o&&(t.maxHeight=o),a&&(t.maxWidth=a),s&&(t.width=s),t}}})},2532:function(t,e,n){"use strict";var i=n("23e7"),r=n("e330"),o=n("5a34"),a=n("1d80"),s=n("577e"),c=n("ab13"),u=r("".indexOf);i({target:"String",proto:!0,forced:!c("includes")},{includes:function(t){return!!~u(s(a(this)),s(o(t)),arguments.length>1?arguments[1]:void 0)}})},"25a8":function(t,e,n){},"25f0":function(t,e,n){"use strict";var i=n("5e77").PROPER,r=n("cb2d"),o=n("825a"),a=n("577e"),s=n("d039"),c=n("90d8"),u="toString",l=RegExp.prototype,f=l[u],h=s((function(){return"/a/b"!=f.call({source:"a",flags:"b"})})),d=i&&f.name!=u;(h||d)&&r(RegExp.prototype,u,(function(){var t=o(this),e=a(t.source),n=a(c(t));return"/"+e+"/"+n}),{unsafe:!0})},2626:function(t,e,n){"use strict";var i=n("d066"),r=n("9bf2"),o=n("b622"),a=n("83ab"),s=o("species");t.exports=function(t){var e=i(t),n=r.f;a&&e&&!e[s]&&n(e,s,{configurable:!0,get:function(){return this}})}},"262e":function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));n("d9e2");function i(t,e){return i=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t},i(t,e)}function r(t,e){if("function"!==typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),Object.defineProperty(t,"prototype",{writable:!1}),e&&i(t,e)}},2877:function(t,e,n){"use strict";function i(t,e,n,i,r,o,a,s){var c,u="function"===typeof t?t.options:t;if(e&&(u.render=e,u.staticRenderFns=n,u._compiled=!0),i&&(u.functional=!0),o&&(u._scopeId="data-v-"+o),a?(c=function(t){t=t||this.$vnode&&this.$vnode.ssrContext||this.parent&&this.parent.$vnode&&this.parent.$vnode.ssrContext,t||"undefined"===typeof __VUE_SSR_CONTEXT__||(t=__VUE_SSR_CONTEXT__),r&&r.call(this,t),t&&t._registeredComponents&&t._registeredComponents.add(a)},u._ssrRegister=c):r&&(c=s?function(){r.call(this,(u.functional?this.parent:this).$root.$options.shadowRoot)}:r),c)if(u.functional){u._injectStyles=c;var l=u.render;u.render=function(t,e){return c.call(e),l(t,e)}}else{var f=u.beforeCreate;u.beforeCreate=f?[].concat(f,c):[c]}return{exports:t,options:u}}n.d(e,"a",(function(){return i}))},2909:function(t,e,n){"use strict";n.d(e,"a",(function(){return c}));var i=n("6b75");function r(t){if(Array.isArray(t))return Object(i["a"])(t)}n("a4d3"),n("e01a"),n("d3b7"),n("d28b"),n("3ca3"),n("ddb0"),n("a630");function o(t){if("undefined"!==typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}var a=n("06c5");n("d9e2");function s(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}function c(t){return r(t)||o(t)||Object(a["a"])(t)||s()}},"2a62":function(t,e,n){var i=n("c65b"),r=n("825a"),o=n("dc4a");t.exports=function(t,e,n){var a,s;r(t);try{if(a=o(t,"return"),!a){if("throw"===e)throw n;return n}a=i(a,t)}catch(c){s=!0,a=c}if("throw"===e)throw n;if(s)throw a;return r(a),n}},"2a7f":function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var i=n("71d9"),r=n("80d2"),o=Object(r["e"])("v-toolbar__title"),a=Object(r["e"])("v-toolbar__items");i["a"]},"2b0e":function(t,e,n){"use strict";(function(t){ -/*! - * Vue.js v2.6.14 - * (c) 2014-2021 Evan You - * Released under the MIT License. - */ -var n=Object.freeze({});function i(t){return void 0===t||null===t}function r(t){return void 0!==t&&null!==t}function o(t){return!0===t}function a(t){return!1===t}function s(t){return"string"===typeof t||"number"===typeof t||"symbol"===typeof t||"boolean"===typeof t}function c(t){return null!==t&&"object"===typeof t}var u=Object.prototype.toString;function l(t){return"[object Object]"===u.call(t)}function f(t){return"[object RegExp]"===u.call(t)}function h(t){var e=parseFloat(String(t));return e>=0&&Math.floor(e)===e&&isFinite(t)}function d(t){return r(t)&&"function"===typeof t.then&&"function"===typeof t.catch}function p(t){return null==t?"":Array.isArray(t)||l(t)&&t.toString===u?JSON.stringify(t,null,2):String(t)}function v(t){var e=parseFloat(t);return isNaN(e)?t:e}function m(t,e){for(var n=Object.create(null),i=t.split(","),r=0;r<i.length;r++)n[i[r]]=!0;return e?function(t){return n[t.toLowerCase()]}:function(t){return n[t]}}m("slot,component",!0);var g=m("key,ref,slot,slot-scope,is");function b(t,e){if(t.length){var n=t.indexOf(e);if(n>-1)return t.splice(n,1)}}var y=Object.prototype.hasOwnProperty;function x(t,e){return y.call(t,e)}function w(t){var e=Object.create(null);return function(n){var i=e[n];return i||(e[n]=t(n))}}var O=/-(\w)/g,_=w((function(t){return t.replace(O,(function(t,e){return e?e.toUpperCase():""}))})),S=w((function(t){return t.charAt(0).toUpperCase()+t.slice(1)})),C=/\B([A-Z])/g,k=w((function(t){return t.replace(C,"-$1").toLowerCase()}));function j(t,e){function n(n){var i=arguments.length;return i?i>1?t.apply(e,arguments):t.call(e,n):t.call(e)}return n._length=t.length,n}function $(t,e){return t.bind(e)}var A=Function.prototype.bind?$:j;function E(t,e){e=e||0;var n=t.length-e,i=new Array(n);while(n--)i[n]=t[n+e];return i}function T(t,e){for(var n in e)t[n]=e[n];return t}function L(t){for(var e={},n=0;n<t.length;n++)t[n]&&T(e,t[n]);return e}function I(t,e,n){}var B=function(t,e,n){return!1},M=function(t){return t};function P(t,e){if(t===e)return!0;var n=c(t),i=c(e);if(!n||!i)return!n&&!i&&String(t)===String(e);try{var r=Array.isArray(t),o=Array.isArray(e);if(r&&o)return t.length===e.length&&t.every((function(t,n){return P(t,e[n])}));if(t instanceof Date&&e instanceof Date)return t.getTime()===e.getTime();if(r||o)return!1;var a=Object.keys(t),s=Object.keys(e);return a.length===s.length&&a.every((function(n){return P(t[n],e[n])}))}catch(u){return!1}}function D(t,e){for(var n=0;n<t.length;n++)if(P(t[n],e))return n;return-1}function R(t){var e=!1;return function(){e||(e=!0,t.apply(this,arguments))}}var N="data-server-rendered",V=["component","directive","filter"],F=["beforeCreate","created","beforeMount","mounted","beforeUpdate","updated","beforeDestroy","destroyed","activated","deactivated","errorCaptured","serverPrefetch"],z={optionMergeStrategies:Object.create(null),silent:!1,productionTip:!1,devtools:!1,performance:!1,errorHandler:null,warnHandler:null,ignoredElements:[],keyCodes:Object.create(null),isReservedTag:B,isReservedAttr:B,isUnknownElement:B,getTagNamespace:I,parsePlatformTagName:M,mustUseProp:B,async:!0,_lifecycleHooks:F},H=/a-zA-Z\u00B7\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u037D\u037F-\u1FFF\u200C-\u200D\u203F-\u2040\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD/;function W(t){var e=(t+"").charCodeAt(0);return 36===e||95===e}function U(t,e,n,i){Object.defineProperty(t,e,{value:n,enumerable:!!i,writable:!0,configurable:!0})}var q=new RegExp("[^"+H.source+".$_\\d]");function G(t){if(!q.test(t)){var e=t.split(".");return function(t){for(var n=0;n<e.length;n++){if(!t)return;t=t[e[n]]}return t}}}var Y,Z="__proto__"in{},K="undefined"!==typeof window,X="undefined"!==typeof WXEnvironment&&!!WXEnvironment.platform,J=X&&WXEnvironment.platform.toLowerCase(),Q=K&&window.navigator.userAgent.toLowerCase(),tt=Q&&/msie|trident/.test(Q),et=Q&&Q.indexOf("msie 9.0")>0,nt=Q&&Q.indexOf("edge/")>0,it=(Q&&Q.indexOf("android"),Q&&/iphone|ipad|ipod|ios/.test(Q)||"ios"===J),rt=(Q&&/chrome\/\d+/.test(Q),Q&&/phantomjs/.test(Q),Q&&Q.match(/firefox\/(\d+)/)),ot={}.watch,at=!1;if(K)try{var st={};Object.defineProperty(st,"passive",{get:function(){at=!0}}),window.addEventListener("test-passive",null,st)}catch(Sa){}var ct=function(){return void 0===Y&&(Y=!K&&!X&&"undefined"!==typeof t&&(t["process"]&&"server"===t["process"].env.VUE_ENV)),Y},ut=K&&window.__VUE_DEVTOOLS_GLOBAL_HOOK__;function lt(t){return"function"===typeof t&&/native code/.test(t.toString())}var ft,ht="undefined"!==typeof Symbol&&lt(Symbol)&&"undefined"!==typeof Reflect&&lt(Reflect.ownKeys);ft="undefined"!==typeof Set&&lt(Set)?Set:function(){function t(){this.set=Object.create(null)}return t.prototype.has=function(t){return!0===this.set[t]},t.prototype.add=function(t){this.set[t]=!0},t.prototype.clear=function(){this.set=Object.create(null)},t}();var dt=I,pt=0,vt=function(){this.id=pt++,this.subs=[]};vt.prototype.addSub=function(t){this.subs.push(t)},vt.prototype.removeSub=function(t){b(this.subs,t)},vt.prototype.depend=function(){vt.target&&vt.target.addDep(this)},vt.prototype.notify=function(){var t=this.subs.slice();for(var e=0,n=t.length;e<n;e++)t[e].update()},vt.target=null;var mt=[];function gt(t){mt.push(t),vt.target=t}function bt(){mt.pop(),vt.target=mt[mt.length-1]}var yt=function(t,e,n,i,r,o,a,s){this.tag=t,this.data=e,this.children=n,this.text=i,this.elm=r,this.ns=void 0,this.context=o,this.fnContext=void 0,this.fnOptions=void 0,this.fnScopeId=void 0,this.key=e&&e.key,this.componentOptions=a,this.componentInstance=void 0,this.parent=void 0,this.raw=!1,this.isStatic=!1,this.isRootInsert=!0,this.isComment=!1,this.isCloned=!1,this.isOnce=!1,this.asyncFactory=s,this.asyncMeta=void 0,this.isAsyncPlaceholder=!1},xt={child:{configurable:!0}};xt.child.get=function(){return this.componentInstance},Object.defineProperties(yt.prototype,xt);var wt=function(t){void 0===t&&(t="");var e=new yt;return e.text=t,e.isComment=!0,e};function Ot(t){return new yt(void 0,void 0,void 0,String(t))}function _t(t){var e=new yt(t.tag,t.data,t.children&&t.children.slice(),t.text,t.elm,t.context,t.componentOptions,t.asyncFactory);return e.ns=t.ns,e.isStatic=t.isStatic,e.key=t.key,e.isComment=t.isComment,e.fnContext=t.fnContext,e.fnOptions=t.fnOptions,e.fnScopeId=t.fnScopeId,e.asyncMeta=t.asyncMeta,e.isCloned=!0,e}var St=Array.prototype,Ct=Object.create(St),kt=["push","pop","shift","unshift","splice","sort","reverse"];kt.forEach((function(t){var e=St[t];U(Ct,t,(function(){var n=[],i=arguments.length;while(i--)n[i]=arguments[i];var r,o=e.apply(this,n),a=this.__ob__;switch(t){case"push":case"unshift":r=n;break;case"splice":r=n.slice(2);break}return r&&a.observeArray(r),a.dep.notify(),o}))}));var jt=Object.getOwnPropertyNames(Ct),$t=!0;function At(t){$t=t}var Et=function(t){this.value=t,this.dep=new vt,this.vmCount=0,U(t,"__ob__",this),Array.isArray(t)?(Z?Tt(t,Ct):Lt(t,Ct,jt),this.observeArray(t)):this.walk(t)};function Tt(t,e){t.__proto__=e}function Lt(t,e,n){for(var i=0,r=n.length;i<r;i++){var o=n[i];U(t,o,e[o])}}function It(t,e){var n;if(c(t)&&!(t instanceof yt))return x(t,"__ob__")&&t.__ob__ instanceof Et?n=t.__ob__:$t&&!ct()&&(Array.isArray(t)||l(t))&&Object.isExtensible(t)&&!t._isVue&&(n=new Et(t)),e&&n&&n.vmCount++,n}function Bt(t,e,n,i,r){var o=new vt,a=Object.getOwnPropertyDescriptor(t,e);if(!a||!1!==a.configurable){var s=a&&a.get,c=a&&a.set;s&&!c||2!==arguments.length||(n=t[e]);var u=!r&&It(n);Object.defineProperty(t,e,{enumerable:!0,configurable:!0,get:function(){var e=s?s.call(t):n;return vt.target&&(o.depend(),u&&(u.dep.depend(),Array.isArray(e)&&Dt(e))),e},set:function(e){var i=s?s.call(t):n;e===i||e!==e&&i!==i||s&&!c||(c?c.call(t,e):n=e,u=!r&&It(e),o.notify())}})}}function Mt(t,e,n){if(Array.isArray(t)&&h(e))return t.length=Math.max(t.length,e),t.splice(e,1,n),n;if(e in t&&!(e in Object.prototype))return t[e]=n,n;var i=t.__ob__;return t._isVue||i&&i.vmCount?n:i?(Bt(i.value,e,n),i.dep.notify(),n):(t[e]=n,n)}function Pt(t,e){if(Array.isArray(t)&&h(e))t.splice(e,1);else{var n=t.__ob__;t._isVue||n&&n.vmCount||x(t,e)&&(delete t[e],n&&n.dep.notify())}}function Dt(t){for(var e=void 0,n=0,i=t.length;n<i;n++)e=t[n],e&&e.__ob__&&e.__ob__.dep.depend(),Array.isArray(e)&&Dt(e)}Et.prototype.walk=function(t){for(var e=Object.keys(t),n=0;n<e.length;n++)Bt(t,e[n])},Et.prototype.observeArray=function(t){for(var e=0,n=t.length;e<n;e++)It(t[e])};var Rt=z.optionMergeStrategies;function Nt(t,e){if(!e)return t;for(var n,i,r,o=ht?Reflect.ownKeys(e):Object.keys(e),a=0;a<o.length;a++)n=o[a],"__ob__"!==n&&(i=t[n],r=e[n],x(t,n)?i!==r&&l(i)&&l(r)&&Nt(i,r):Mt(t,n,r));return t}function Vt(t,e,n){return n?function(){var i="function"===typeof e?e.call(n,n):e,r="function"===typeof t?t.call(n,n):t;return i?Nt(i,r):r}:e?t?function(){return Nt("function"===typeof e?e.call(this,this):e,"function"===typeof t?t.call(this,this):t)}:e:t}function Ft(t,e){var n=e?t?t.concat(e):Array.isArray(e)?e:[e]:t;return n?zt(n):n}function zt(t){for(var e=[],n=0;n<t.length;n++)-1===e.indexOf(t[n])&&e.push(t[n]);return e}function Ht(t,e,n,i){var r=Object.create(t||null);return e?T(r,e):r}Rt.data=function(t,e,n){return n?Vt(t,e,n):e&&"function"!==typeof e?t:Vt(t,e)},F.forEach((function(t){Rt[t]=Ft})),V.forEach((function(t){Rt[t+"s"]=Ht})),Rt.watch=function(t,e,n,i){if(t===ot&&(t=void 0),e===ot&&(e=void 0),!e)return Object.create(t||null);if(!t)return e;var r={};for(var o in T(r,t),e){var a=r[o],s=e[o];a&&!Array.isArray(a)&&(a=[a]),r[o]=a?a.concat(s):Array.isArray(s)?s:[s]}return r},Rt.props=Rt.methods=Rt.inject=Rt.computed=function(t,e,n,i){if(!t)return e;var r=Object.create(null);return T(r,t),e&&T(r,e),r},Rt.provide=Vt;var Wt=function(t,e){return void 0===e?t:e};function Ut(t,e){var n=t.props;if(n){var i,r,o,a={};if(Array.isArray(n)){i=n.length;while(i--)r=n[i],"string"===typeof r&&(o=_(r),a[o]={type:null})}else if(l(n))for(var s in n)r=n[s],o=_(s),a[o]=l(r)?r:{type:r};else 0;t.props=a}}function qt(t,e){var n=t.inject;if(n){var i=t.inject={};if(Array.isArray(n))for(var r=0;r<n.length;r++)i[n[r]]={from:n[r]};else if(l(n))for(var o in n){var a=n[o];i[o]=l(a)?T({from:o},a):{from:a}}else 0}}function Gt(t){var e=t.directives;if(e)for(var n in e){var i=e[n];"function"===typeof i&&(e[n]={bind:i,update:i})}}function Yt(t,e,n){if("function"===typeof e&&(e=e.options),Ut(e,n),qt(e,n),Gt(e),!e._base&&(e.extends&&(t=Yt(t,e.extends,n)),e.mixins))for(var i=0,r=e.mixins.length;i<r;i++)t=Yt(t,e.mixins[i],n);var o,a={};for(o in t)s(o);for(o in e)x(t,o)||s(o);function s(i){var r=Rt[i]||Wt;a[i]=r(t[i],e[i],n,i)}return a}function Zt(t,e,n,i){if("string"===typeof n){var r=t[e];if(x(r,n))return r[n];var o=_(n);if(x(r,o))return r[o];var a=S(o);if(x(r,a))return r[a];var s=r[n]||r[o]||r[a];return s}}function Kt(t,e,n,i){var r=e[t],o=!x(n,t),a=n[t],s=ee(Boolean,r.type);if(s>-1)if(o&&!x(r,"default"))a=!1;else if(""===a||a===k(t)){var c=ee(String,r.type);(c<0||s<c)&&(a=!0)}if(void 0===a){a=Xt(i,r,t);var u=$t;At(!0),It(a),At(u)}return a}function Xt(t,e,n){if(x(e,"default")){var i=e.default;return t&&t.$options.propsData&&void 0===t.$options.propsData[n]&&void 0!==t._props[n]?t._props[n]:"function"===typeof i&&"Function"!==Qt(e.type)?i.call(t):i}}var Jt=/^\s*function (\w+)/;function Qt(t){var e=t&&t.toString().match(Jt);return e?e[1]:""}function te(t,e){return Qt(t)===Qt(e)}function ee(t,e){if(!Array.isArray(e))return te(e,t)?0:-1;for(var n=0,i=e.length;n<i;n++)if(te(e[n],t))return n;return-1}function ne(t,e,n){gt();try{if(e){var i=e;while(i=i.$parent){var r=i.$options.errorCaptured;if(r)for(var o=0;o<r.length;o++)try{var a=!1===r[o].call(i,t,e,n);if(a)return}catch(Sa){re(Sa,i,"errorCaptured hook")}}}re(t,e,n)}finally{bt()}}function ie(t,e,n,i,r){var o;try{o=n?t.apply(e,n):t.call(e),o&&!o._isVue&&d(o)&&!o._handled&&(o.catch((function(t){return ne(t,i,r+" (Promise/async)")})),o._handled=!0)}catch(Sa){ne(Sa,i,r)}return o}function re(t,e,n){if(z.errorHandler)try{return z.errorHandler.call(null,t,e,n)}catch(Sa){Sa!==t&&oe(Sa,null,"config.errorHandler")}oe(t,e,n)}function oe(t,e,n){if(!K&&!X||"undefined"===typeof console)throw t;console.error(t)}var ae,se=!1,ce=[],ue=!1;function le(){ue=!1;var t=ce.slice(0);ce.length=0;for(var e=0;e<t.length;e++)t[e]()}if("undefined"!==typeof Promise&&lt(Promise)){var fe=Promise.resolve();ae=function(){fe.then(le),it&&setTimeout(I)},se=!0}else if(tt||"undefined"===typeof MutationObserver||!lt(MutationObserver)&&"[object MutationObserverConstructor]"!==MutationObserver.toString())ae="undefined"!==typeof setImmediate&&lt(setImmediate)?function(){setImmediate(le)}:function(){setTimeout(le,0)};else{var he=1,de=new MutationObserver(le),pe=document.createTextNode(String(he));de.observe(pe,{characterData:!0}),ae=function(){he=(he+1)%2,pe.data=String(he)},se=!0}function ve(t,e){var n;if(ce.push((function(){if(t)try{t.call(e)}catch(Sa){ne(Sa,e,"nextTick")}else n&&n(e)})),ue||(ue=!0,ae()),!t&&"undefined"!==typeof Promise)return new Promise((function(t){n=t}))}var me=new ft;function ge(t){be(t,me),me.clear()}function be(t,e){var n,i,r=Array.isArray(t);if(!(!r&&!c(t)||Object.isFrozen(t)||t instanceof yt)){if(t.__ob__){var o=t.__ob__.dep.id;if(e.has(o))return;e.add(o)}if(r){n=t.length;while(n--)be(t[n],e)}else{i=Object.keys(t),n=i.length;while(n--)be(t[i[n]],e)}}}var ye=w((function(t){var e="&"===t.charAt(0);t=e?t.slice(1):t;var n="~"===t.charAt(0);t=n?t.slice(1):t;var i="!"===t.charAt(0);return t=i?t.slice(1):t,{name:t,once:n,capture:i,passive:e}}));function xe(t,e){function n(){var t=arguments,i=n.fns;if(!Array.isArray(i))return ie(i,null,arguments,e,"v-on handler");for(var r=i.slice(),o=0;o<r.length;o++)ie(r[o],null,t,e,"v-on handler")}return n.fns=t,n}function we(t,e,n,r,a,s){var c,u,l,f;for(c in t)u=t[c],l=e[c],f=ye(c),i(u)||(i(l)?(i(u.fns)&&(u=t[c]=xe(u,s)),o(f.once)&&(u=t[c]=a(f.name,u,f.capture)),n(f.name,u,f.capture,f.passive,f.params)):u!==l&&(l.fns=u,t[c]=l));for(c in e)i(t[c])&&(f=ye(c),r(f.name,e[c],f.capture))}function Oe(t,e,n){var a;t instanceof yt&&(t=t.data.hook||(t.data.hook={}));var s=t[e];function c(){n.apply(this,arguments),b(a.fns,c)}i(s)?a=xe([c]):r(s.fns)&&o(s.merged)?(a=s,a.fns.push(c)):a=xe([s,c]),a.merged=!0,t[e]=a}function _e(t,e,n){var o=e.options.props;if(!i(o)){var a={},s=t.attrs,c=t.props;if(r(s)||r(c))for(var u in o){var l=k(u);Se(a,c,u,l,!0)||Se(a,s,u,l,!1)}return a}}function Se(t,e,n,i,o){if(r(e)){if(x(e,n))return t[n]=e[n],o||delete e[n],!0;if(x(e,i))return t[n]=e[i],o||delete e[i],!0}return!1}function Ce(t){for(var e=0;e<t.length;e++)if(Array.isArray(t[e]))return Array.prototype.concat.apply([],t);return t}function ke(t){return s(t)?[Ot(t)]:Array.isArray(t)?$e(t):void 0}function je(t){return r(t)&&r(t.text)&&a(t.isComment)}function $e(t,e){var n,a,c,u,l=[];for(n=0;n<t.length;n++)a=t[n],i(a)||"boolean"===typeof a||(c=l.length-1,u=l[c],Array.isArray(a)?a.length>0&&(a=$e(a,(e||"")+"_"+n),je(a[0])&&je(u)&&(l[c]=Ot(u.text+a[0].text),a.shift()),l.push.apply(l,a)):s(a)?je(u)?l[c]=Ot(u.text+a):""!==a&&l.push(Ot(a)):je(a)&&je(u)?l[c]=Ot(u.text+a.text):(o(t._isVList)&&r(a.tag)&&i(a.key)&&r(e)&&(a.key="__vlist"+e+"_"+n+"__"),l.push(a)));return l}function Ae(t){var e=t.$options.provide;e&&(t._provided="function"===typeof e?e.call(t):e)}function Ee(t){var e=Te(t.$options.inject,t);e&&(At(!1),Object.keys(e).forEach((function(n){Bt(t,n,e[n])})),At(!0))}function Te(t,e){if(t){for(var n=Object.create(null),i=ht?Reflect.ownKeys(t):Object.keys(t),r=0;r<i.length;r++){var o=i[r];if("__ob__"!==o){var a=t[o].from,s=e;while(s){if(s._provided&&x(s._provided,a)){n[o]=s._provided[a];break}s=s.$parent}if(!s)if("default"in t[o]){var c=t[o].default;n[o]="function"===typeof c?c.call(e):c}else 0}}return n}}function Le(t,e){if(!t||!t.length)return{};for(var n={},i=0,r=t.length;i<r;i++){var o=t[i],a=o.data;if(a&&a.attrs&&a.attrs.slot&&delete a.attrs.slot,o.context!==e&&o.fnContext!==e||!a||null==a.slot)(n.default||(n.default=[])).push(o);else{var s=a.slot,c=n[s]||(n[s]=[]);"template"===o.tag?c.push.apply(c,o.children||[]):c.push(o)}}for(var u in n)n[u].every(Ie)&&delete n[u];return n}function Ie(t){return t.isComment&&!t.asyncFactory||" "===t.text}function Be(t){return t.isComment&&t.asyncFactory}function Me(t,e,i){var r,o=Object.keys(e).length>0,a=t?!!t.$stable:!o,s=t&&t.$key;if(t){if(t._normalized)return t._normalized;if(a&&i&&i!==n&&s===i.$key&&!o&&!i.$hasNormal)return i;for(var c in r={},t)t[c]&&"$"!==c[0]&&(r[c]=Pe(e,c,t[c]))}else r={};for(var u in e)u in r||(r[u]=De(e,u));return t&&Object.isExtensible(t)&&(t._normalized=r),U(r,"$stable",a),U(r,"$key",s),U(r,"$hasNormal",o),r}function Pe(t,e,n){var i=function(){var t=arguments.length?n.apply(null,arguments):n({});t=t&&"object"===typeof t&&!Array.isArray(t)?[t]:ke(t);var e=t&&t[0];return t&&(!e||1===t.length&&e.isComment&&!Be(e))?void 0:t};return n.proxy&&Object.defineProperty(t,e,{get:i,enumerable:!0,configurable:!0}),i}function De(t,e){return function(){return t[e]}}function Re(t,e){var n,i,o,a,s;if(Array.isArray(t)||"string"===typeof t)for(n=new Array(t.length),i=0,o=t.length;i<o;i++)n[i]=e(t[i],i);else if("number"===typeof t)for(n=new Array(t),i=0;i<t;i++)n[i]=e(i+1,i);else if(c(t))if(ht&&t[Symbol.iterator]){n=[];var u=t[Symbol.iterator](),l=u.next();while(!l.done)n.push(e(l.value,n.length)),l=u.next()}else for(a=Object.keys(t),n=new Array(a.length),i=0,o=a.length;i<o;i++)s=a[i],n[i]=e(t[s],s,i);return r(n)||(n=[]),n._isVList=!0,n}function Ne(t,e,n,i){var r,o=this.$scopedSlots[t];o?(n=n||{},i&&(n=T(T({},i),n)),r=o(n)||("function"===typeof e?e():e)):r=this.$slots[t]||("function"===typeof e?e():e);var a=n&&n.slot;return a?this.$createElement("template",{slot:a},r):r}function Ve(t){return Zt(this.$options,"filters",t,!0)||M}function Fe(t,e){return Array.isArray(t)?-1===t.indexOf(e):t!==e}function ze(t,e,n,i,r){var o=z.keyCodes[e]||n;return r&&i&&!z.keyCodes[e]?Fe(r,i):o?Fe(o,t):i?k(i)!==e:void 0===t}function He(t,e,n,i,r){if(n)if(c(n)){var o;Array.isArray(n)&&(n=L(n));var a=function(a){if("class"===a||"style"===a||g(a))o=t;else{var s=t.attrs&&t.attrs.type;o=i||z.mustUseProp(e,s,a)?t.domProps||(t.domProps={}):t.attrs||(t.attrs={})}var c=_(a),u=k(a);if(!(c in o)&&!(u in o)&&(o[a]=n[a],r)){var l=t.on||(t.on={});l["update:"+a]=function(t){n[a]=t}}};for(var s in n)a(s)}else;return t}function We(t,e){var n=this._staticTrees||(this._staticTrees=[]),i=n[t];return i&&!e||(i=n[t]=this.$options.staticRenderFns[t].call(this._renderProxy,null,this),qe(i,"__static__"+t,!1)),i}function Ue(t,e,n){return qe(t,"__once__"+e+(n?"_"+n:""),!0),t}function qe(t,e,n){if(Array.isArray(t))for(var i=0;i<t.length;i++)t[i]&&"string"!==typeof t[i]&&Ge(t[i],e+"_"+i,n);else Ge(t,e,n)}function Ge(t,e,n){t.isStatic=!0,t.key=e,t.isOnce=n}function Ye(t,e){if(e)if(l(e)){var n=t.on=t.on?T({},t.on):{};for(var i in e){var r=n[i],o=e[i];n[i]=r?[].concat(r,o):o}}else;return t}function Ze(t,e,n,i){e=e||{$stable:!n};for(var r=0;r<t.length;r++){var o=t[r];Array.isArray(o)?Ze(o,e,n):o&&(o.proxy&&(o.fn.proxy=!0),e[o.key]=o.fn)}return i&&(e.$key=i),e}function Ke(t,e){for(var n=0;n<e.length;n+=2){var i=e[n];"string"===typeof i&&i&&(t[e[n]]=e[n+1])}return t}function Xe(t,e){return"string"===typeof t?e+t:t}function Je(t){t._o=Ue,t._n=v,t._s=p,t._l=Re,t._t=Ne,t._q=P,t._i=D,t._m=We,t._f=Ve,t._k=ze,t._b=He,t._v=Ot,t._e=wt,t._u=Ze,t._g=Ye,t._d=Ke,t._p=Xe}function Qe(t,e,i,r,a){var s,c=this,u=a.options;x(r,"_uid")?(s=Object.create(r),s._original=r):(s=r,r=r._original);var l=o(u._compiled),f=!l;this.data=t,this.props=e,this.children=i,this.parent=r,this.listeners=t.on||n,this.injections=Te(u.inject,r),this.slots=function(){return c.$slots||Me(t.scopedSlots,c.$slots=Le(i,r)),c.$slots},Object.defineProperty(this,"scopedSlots",{enumerable:!0,get:function(){return Me(t.scopedSlots,this.slots())}}),l&&(this.$options=u,this.$slots=this.slots(),this.$scopedSlots=Me(t.scopedSlots,this.$slots)),u._scopeId?this._c=function(t,e,n,i){var o=dn(s,t,e,n,i,f);return o&&!Array.isArray(o)&&(o.fnScopeId=u._scopeId,o.fnContext=r),o}:this._c=function(t,e,n,i){return dn(s,t,e,n,i,f)}}function tn(t,e,i,o,a){var s=t.options,c={},u=s.props;if(r(u))for(var l in u)c[l]=Kt(l,u,e||n);else r(i.attrs)&&nn(c,i.attrs),r(i.props)&&nn(c,i.props);var f=new Qe(i,c,a,o,t),h=s.render.call(null,f._c,f);if(h instanceof yt)return en(h,i,f.parent,s,f);if(Array.isArray(h)){for(var d=ke(h)||[],p=new Array(d.length),v=0;v<d.length;v++)p[v]=en(d[v],i,f.parent,s,f);return p}}function en(t,e,n,i,r){var o=_t(t);return o.fnContext=n,o.fnOptions=i,e.slot&&((o.data||(o.data={})).slot=e.slot),o}function nn(t,e){for(var n in e)t[_(n)]=e[n]}Je(Qe.prototype);var rn={init:function(t,e){if(t.componentInstance&&!t.componentInstance._isDestroyed&&t.data.keepAlive){var n=t;rn.prepatch(n,n)}else{var i=t.componentInstance=sn(t,Tn);i.$mount(e?t.elm:void 0,e)}},prepatch:function(t,e){var n=e.componentOptions,i=e.componentInstance=t.componentInstance;Pn(i,n.propsData,n.listeners,e,n.children)},insert:function(t){var e=t.context,n=t.componentInstance;n._isMounted||(n._isMounted=!0,Vn(n,"mounted")),t.data.keepAlive&&(e._isMounted?Qn(n):Rn(n,!0))},destroy:function(t){var e=t.componentInstance;e._isDestroyed||(t.data.keepAlive?Nn(e,!0):e.$destroy())}},on=Object.keys(rn);function an(t,e,n,a,s){if(!i(t)){var u=n.$options._base;if(c(t)&&(t=u.extend(t)),"function"===typeof t){var l;if(i(t.cid)&&(l=t,t=_n(l,u),void 0===t))return On(l,e,n,a,s);e=e||{},Oi(t),r(e.model)&&ln(t.options,e);var f=_e(e,t,s);if(o(t.options.functional))return tn(t,f,e,n,a);var h=e.on;if(e.on=e.nativeOn,o(t.options.abstract)){var d=e.slot;e={},d&&(e.slot=d)}cn(e);var p=t.options.name||s,v=new yt("vue-component-"+t.cid+(p?"-"+p:""),e,void 0,void 0,void 0,n,{Ctor:t,propsData:f,listeners:h,tag:s,children:a},l);return v}}}function sn(t,e){var n={_isComponent:!0,_parentVnode:t,parent:e},i=t.data.inlineTemplate;return r(i)&&(n.render=i.render,n.staticRenderFns=i.staticRenderFns),new t.componentOptions.Ctor(n)}function cn(t){for(var e=t.hook||(t.hook={}),n=0;n<on.length;n++){var i=on[n],r=e[i],o=rn[i];r===o||r&&r._merged||(e[i]=r?un(o,r):o)}}function un(t,e){var n=function(n,i){t(n,i),e(n,i)};return n._merged=!0,n}function ln(t,e){var n=t.model&&t.model.prop||"value",i=t.model&&t.model.event||"input";(e.attrs||(e.attrs={}))[n]=e.model.value;var o=e.on||(e.on={}),a=o[i],s=e.model.callback;r(a)?(Array.isArray(a)?-1===a.indexOf(s):a!==s)&&(o[i]=[s].concat(a)):o[i]=s}var fn=1,hn=2;function dn(t,e,n,i,r,a){return(Array.isArray(n)||s(n))&&(r=i,i=n,n=void 0),o(a)&&(r=hn),pn(t,e,n,i,r)}function pn(t,e,n,i,o){if(r(n)&&r(n.__ob__))return wt();if(r(n)&&r(n.is)&&(e=n.is),!e)return wt();var a,s,c;(Array.isArray(i)&&"function"===typeof i[0]&&(n=n||{},n.scopedSlots={default:i[0]},i.length=0),o===hn?i=ke(i):o===fn&&(i=Ce(i)),"string"===typeof e)?(s=t.$vnode&&t.$vnode.ns||z.getTagNamespace(e),a=z.isReservedTag(e)?new yt(z.parsePlatformTagName(e),n,i,void 0,void 0,t):n&&n.pre||!r(c=Zt(t.$options,"components",e))?new yt(e,n,i,void 0,void 0,t):an(c,n,t,i,e)):a=an(e,n,t,i);return Array.isArray(a)?a:r(a)?(r(s)&&vn(a,s),r(n)&&mn(n),a):wt()}function vn(t,e,n){if(t.ns=e,"foreignObject"===t.tag&&(e=void 0,n=!0),r(t.children))for(var a=0,s=t.children.length;a<s;a++){var c=t.children[a];r(c.tag)&&(i(c.ns)||o(n)&&"svg"!==c.tag)&&vn(c,e,n)}}function mn(t){c(t.style)&&ge(t.style),c(t.class)&&ge(t.class)}function gn(t){t._vnode=null,t._staticTrees=null;var e=t.$options,i=t.$vnode=e._parentVnode,r=i&&i.context;t.$slots=Le(e._renderChildren,r),t.$scopedSlots=n,t._c=function(e,n,i,r){return dn(t,e,n,i,r,!1)},t.$createElement=function(e,n,i,r){return dn(t,e,n,i,r,!0)};var o=i&&i.data;Bt(t,"$attrs",o&&o.attrs||n,null,!0),Bt(t,"$listeners",e._parentListeners||n,null,!0)}var bn,yn=null;function xn(t){Je(t.prototype),t.prototype.$nextTick=function(t){return ve(t,this)},t.prototype._render=function(){var t,e=this,n=e.$options,i=n.render,r=n._parentVnode;r&&(e.$scopedSlots=Me(r.data.scopedSlots,e.$slots,e.$scopedSlots)),e.$vnode=r;try{yn=e,t=i.call(e._renderProxy,e.$createElement)}catch(Sa){ne(Sa,e,"render"),t=e._vnode}finally{yn=null}return Array.isArray(t)&&1===t.length&&(t=t[0]),t instanceof yt||(t=wt()),t.parent=r,t}}function wn(t,e){return(t.__esModule||ht&&"Module"===t[Symbol.toStringTag])&&(t=t.default),c(t)?e.extend(t):t}function On(t,e,n,i,r){var o=wt();return o.asyncFactory=t,o.asyncMeta={data:e,context:n,children:i,tag:r},o}function _n(t,e){if(o(t.error)&&r(t.errorComp))return t.errorComp;if(r(t.resolved))return t.resolved;var n=yn;if(n&&r(t.owners)&&-1===t.owners.indexOf(n)&&t.owners.push(n),o(t.loading)&&r(t.loadingComp))return t.loadingComp;if(n&&!r(t.owners)){var a=t.owners=[n],s=!0,u=null,l=null;n.$on("hook:destroyed",(function(){return b(a,n)}));var f=function(t){for(var e=0,n=a.length;e<n;e++)a[e].$forceUpdate();t&&(a.length=0,null!==u&&(clearTimeout(u),u=null),null!==l&&(clearTimeout(l),l=null))},h=R((function(n){t.resolved=wn(n,e),s?a.length=0:f(!0)})),p=R((function(e){r(t.errorComp)&&(t.error=!0,f(!0))})),v=t(h,p);return c(v)&&(d(v)?i(t.resolved)&&v.then(h,p):d(v.component)&&(v.component.then(h,p),r(v.error)&&(t.errorComp=wn(v.error,e)),r(v.loading)&&(t.loadingComp=wn(v.loading,e),0===v.delay?t.loading=!0:u=setTimeout((function(){u=null,i(t.resolved)&&i(t.error)&&(t.loading=!0,f(!1))}),v.delay||200)),r(v.timeout)&&(l=setTimeout((function(){l=null,i(t.resolved)&&p(null)}),v.timeout)))),s=!1,t.loading?t.loadingComp:t.resolved}}function Sn(t){if(Array.isArray(t))for(var e=0;e<t.length;e++){var n=t[e];if(r(n)&&(r(n.componentOptions)||Be(n)))return n}}function Cn(t){t._events=Object.create(null),t._hasHookEvent=!1;var e=t.$options._parentListeners;e&&An(t,e)}function kn(t,e){bn.$on(t,e)}function jn(t,e){bn.$off(t,e)}function $n(t,e){var n=bn;return function i(){var r=e.apply(null,arguments);null!==r&&n.$off(t,i)}}function An(t,e,n){bn=t,we(e,n||{},kn,jn,$n,t),bn=void 0}function En(t){var e=/^hook:/;t.prototype.$on=function(t,n){var i=this;if(Array.isArray(t))for(var r=0,o=t.length;r<o;r++)i.$on(t[r],n);else(i._events[t]||(i._events[t]=[])).push(n),e.test(t)&&(i._hasHookEvent=!0);return i},t.prototype.$once=function(t,e){var n=this;function i(){n.$off(t,i),e.apply(n,arguments)}return i.fn=e,n.$on(t,i),n},t.prototype.$off=function(t,e){var n=this;if(!arguments.length)return n._events=Object.create(null),n;if(Array.isArray(t)){for(var i=0,r=t.length;i<r;i++)n.$off(t[i],e);return n}var o,a=n._events[t];if(!a)return n;if(!e)return n._events[t]=null,n;var s=a.length;while(s--)if(o=a[s],o===e||o.fn===e){a.splice(s,1);break}return n},t.prototype.$emit=function(t){var e=this,n=e._events[t];if(n){n=n.length>1?E(n):n;for(var i=E(arguments,1),r='event handler for "'+t+'"',o=0,a=n.length;o<a;o++)ie(n[o],e,i,e,r)}return e}}var Tn=null;function Ln(t){var e=Tn;return Tn=t,function(){Tn=e}}function In(t){var e=t.$options,n=e.parent;if(n&&!e.abstract){while(n.$options.abstract&&n.$parent)n=n.$parent;n.$children.push(t)}t.$parent=n,t.$root=n?n.$root:t,t.$children=[],t.$refs={},t._watcher=null,t._inactive=null,t._directInactive=!1,t._isMounted=!1,t._isDestroyed=!1,t._isBeingDestroyed=!1}function Bn(t){t.prototype._update=function(t,e){var n=this,i=n.$el,r=n._vnode,o=Ln(n);n._vnode=t,n.$el=r?n.__patch__(r,t):n.__patch__(n.$el,t,e,!1),o(),i&&(i.__vue__=null),n.$el&&(n.$el.__vue__=n),n.$vnode&&n.$parent&&n.$vnode===n.$parent._vnode&&(n.$parent.$el=n.$el)},t.prototype.$forceUpdate=function(){var t=this;t._watcher&&t._watcher.update()},t.prototype.$destroy=function(){var t=this;if(!t._isBeingDestroyed){Vn(t,"beforeDestroy"),t._isBeingDestroyed=!0;var e=t.$parent;!e||e._isBeingDestroyed||t.$options.abstract||b(e.$children,t),t._watcher&&t._watcher.teardown();var n=t._watchers.length;while(n--)t._watchers[n].teardown();t._data.__ob__&&t._data.__ob__.vmCount--,t._isDestroyed=!0,t.__patch__(t._vnode,null),Vn(t,"destroyed"),t.$off(),t.$el&&(t.$el.__vue__=null),t.$vnode&&(t.$vnode.parent=null)}}}function Mn(t,e,n){var i;return t.$el=e,t.$options.render||(t.$options.render=wt),Vn(t,"beforeMount"),i=function(){t._update(t._render(),n)},new ii(t,i,I,{before:function(){t._isMounted&&!t._isDestroyed&&Vn(t,"beforeUpdate")}},!0),n=!1,null==t.$vnode&&(t._isMounted=!0,Vn(t,"mounted")),t}function Pn(t,e,i,r,o){var a=r.data.scopedSlots,s=t.$scopedSlots,c=!!(a&&!a.$stable||s!==n&&!s.$stable||a&&t.$scopedSlots.$key!==a.$key||!a&&t.$scopedSlots.$key),u=!!(o||t.$options._renderChildren||c);if(t.$options._parentVnode=r,t.$vnode=r,t._vnode&&(t._vnode.parent=r),t.$options._renderChildren=o,t.$attrs=r.data.attrs||n,t.$listeners=i||n,e&&t.$options.props){At(!1);for(var l=t._props,f=t.$options._propKeys||[],h=0;h<f.length;h++){var d=f[h],p=t.$options.props;l[d]=Kt(d,p,e,t)}At(!0),t.$options.propsData=e}i=i||n;var v=t.$options._parentListeners;t.$options._parentListeners=i,An(t,i,v),u&&(t.$slots=Le(o,r.context),t.$forceUpdate())}function Dn(t){while(t&&(t=t.$parent))if(t._inactive)return!0;return!1}function Rn(t,e){if(e){if(t._directInactive=!1,Dn(t))return}else if(t._directInactive)return;if(t._inactive||null===t._inactive){t._inactive=!1;for(var n=0;n<t.$children.length;n++)Rn(t.$children[n]);Vn(t,"activated")}}function Nn(t,e){if((!e||(t._directInactive=!0,!Dn(t)))&&!t._inactive){t._inactive=!0;for(var n=0;n<t.$children.length;n++)Nn(t.$children[n]);Vn(t,"deactivated")}}function Vn(t,e){gt();var n=t.$options[e],i=e+" hook";if(n)for(var r=0,o=n.length;r<o;r++)ie(n[r],t,null,t,i);t._hasHookEvent&&t.$emit("hook:"+e),bt()}var Fn=[],zn=[],Hn={},Wn=!1,Un=!1,qn=0;function Gn(){qn=Fn.length=zn.length=0,Hn={},Wn=Un=!1}var Yn=0,Zn=Date.now;if(K&&!tt){var Kn=window.performance;Kn&&"function"===typeof Kn.now&&Zn()>document.createEvent("Event").timeStamp&&(Zn=function(){return Kn.now()})}function Xn(){var t,e;for(Yn=Zn(),Un=!0,Fn.sort((function(t,e){return t.id-e.id})),qn=0;qn<Fn.length;qn++)t=Fn[qn],t.before&&t.before(),e=t.id,Hn[e]=null,t.run();var n=zn.slice(),i=Fn.slice();Gn(),ti(n),Jn(i),ut&&z.devtools&&ut.emit("flush")}function Jn(t){var e=t.length;while(e--){var n=t[e],i=n.vm;i._watcher===n&&i._isMounted&&!i._isDestroyed&&Vn(i,"updated")}}function Qn(t){t._inactive=!1,zn.push(t)}function ti(t){for(var e=0;e<t.length;e++)t[e]._inactive=!0,Rn(t[e],!0)}function ei(t){var e=t.id;if(null==Hn[e]){if(Hn[e]=!0,Un){var n=Fn.length-1;while(n>qn&&Fn[n].id>t.id)n--;Fn.splice(n+1,0,t)}else Fn.push(t);Wn||(Wn=!0,ve(Xn))}}var ni=0,ii=function(t,e,n,i,r){this.vm=t,r&&(t._watcher=this),t._watchers.push(this),i?(this.deep=!!i.deep,this.user=!!i.user,this.lazy=!!i.lazy,this.sync=!!i.sync,this.before=i.before):this.deep=this.user=this.lazy=this.sync=!1,this.cb=n,this.id=++ni,this.active=!0,this.dirty=this.lazy,this.deps=[],this.newDeps=[],this.depIds=new ft,this.newDepIds=new ft,this.expression="","function"===typeof e?this.getter=e:(this.getter=G(e),this.getter||(this.getter=I)),this.value=this.lazy?void 0:this.get()};ii.prototype.get=function(){var t;gt(this);var e=this.vm;try{t=this.getter.call(e,e)}catch(Sa){if(!this.user)throw Sa;ne(Sa,e,'getter for watcher "'+this.expression+'"')}finally{this.deep&&ge(t),bt(),this.cleanupDeps()}return t},ii.prototype.addDep=function(t){var e=t.id;this.newDepIds.has(e)||(this.newDepIds.add(e),this.newDeps.push(t),this.depIds.has(e)||t.addSub(this))},ii.prototype.cleanupDeps=function(){var t=this.deps.length;while(t--){var e=this.deps[t];this.newDepIds.has(e.id)||e.removeSub(this)}var n=this.depIds;this.depIds=this.newDepIds,this.newDepIds=n,this.newDepIds.clear(),n=this.deps,this.deps=this.newDeps,this.newDeps=n,this.newDeps.length=0},ii.prototype.update=function(){this.lazy?this.dirty=!0:this.sync?this.run():ei(this)},ii.prototype.run=function(){if(this.active){var t=this.get();if(t!==this.value||c(t)||this.deep){var e=this.value;if(this.value=t,this.user){var n='callback for watcher "'+this.expression+'"';ie(this.cb,this.vm,[t,e],this.vm,n)}else this.cb.call(this.vm,t,e)}}},ii.prototype.evaluate=function(){this.value=this.get(),this.dirty=!1},ii.prototype.depend=function(){var t=this.deps.length;while(t--)this.deps[t].depend()},ii.prototype.teardown=function(){if(this.active){this.vm._isBeingDestroyed||b(this.vm._watchers,this);var t=this.deps.length;while(t--)this.deps[t].removeSub(this);this.active=!1}};var ri={enumerable:!0,configurable:!0,get:I,set:I};function oi(t,e,n){ri.get=function(){return this[e][n]},ri.set=function(t){this[e][n]=t},Object.defineProperty(t,n,ri)}function ai(t){t._watchers=[];var e=t.$options;e.props&&si(t,e.props),e.methods&&vi(t,e.methods),e.data?ci(t):It(t._data={},!0),e.computed&&fi(t,e.computed),e.watch&&e.watch!==ot&&mi(t,e.watch)}function si(t,e){var n=t.$options.propsData||{},i=t._props={},r=t.$options._propKeys=[],o=!t.$parent;o||At(!1);var a=function(o){r.push(o);var a=Kt(o,e,n,t);Bt(i,o,a),o in t||oi(t,"_props",o)};for(var s in e)a(s);At(!0)}function ci(t){var e=t.$options.data;e=t._data="function"===typeof e?ui(e,t):e||{},l(e)||(e={});var n=Object.keys(e),i=t.$options.props,r=(t.$options.methods,n.length);while(r--){var o=n[r];0,i&&x(i,o)||W(o)||oi(t,"_data",o)}It(e,!0)}function ui(t,e){gt();try{return t.call(e,e)}catch(Sa){return ne(Sa,e,"data()"),{}}finally{bt()}}var li={lazy:!0};function fi(t,e){var n=t._computedWatchers=Object.create(null),i=ct();for(var r in e){var o=e[r],a="function"===typeof o?o:o.get;0,i||(n[r]=new ii(t,a||I,I,li)),r in t||hi(t,r,o)}}function hi(t,e,n){var i=!ct();"function"===typeof n?(ri.get=i?di(e):pi(n),ri.set=I):(ri.get=n.get?i&&!1!==n.cache?di(e):pi(n.get):I,ri.set=n.set||I),Object.defineProperty(t,e,ri)}function di(t){return function(){var e=this._computedWatchers&&this._computedWatchers[t];if(e)return e.dirty&&e.evaluate(),vt.target&&e.depend(),e.value}}function pi(t){return function(){return t.call(this,this)}}function vi(t,e){t.$options.props;for(var n in e)t[n]="function"!==typeof e[n]?I:A(e[n],t)}function mi(t,e){for(var n in e){var i=e[n];if(Array.isArray(i))for(var r=0;r<i.length;r++)gi(t,n,i[r]);else gi(t,n,i)}}function gi(t,e,n,i){return l(n)&&(i=n,n=n.handler),"string"===typeof n&&(n=t[n]),t.$watch(e,n,i)}function bi(t){var e={get:function(){return this._data}},n={get:function(){return this._props}};Object.defineProperty(t.prototype,"$data",e),Object.defineProperty(t.prototype,"$props",n),t.prototype.$set=Mt,t.prototype.$delete=Pt,t.prototype.$watch=function(t,e,n){var i=this;if(l(e))return gi(i,t,e,n);n=n||{},n.user=!0;var r=new ii(i,t,e,n);if(n.immediate){var o='callback for immediate watcher "'+r.expression+'"';gt(),ie(e,i,[r.value],i,o),bt()}return function(){r.teardown()}}}var yi=0;function xi(t){t.prototype._init=function(t){var e=this;e._uid=yi++,e._isVue=!0,t&&t._isComponent?wi(e,t):e.$options=Yt(Oi(e.constructor),t||{},e),e._renderProxy=e,e._self=e,In(e),Cn(e),gn(e),Vn(e,"beforeCreate"),Ee(e),ai(e),Ae(e),Vn(e,"created"),e.$options.el&&e.$mount(e.$options.el)}}function wi(t,e){var n=t.$options=Object.create(t.constructor.options),i=e._parentVnode;n.parent=e.parent,n._parentVnode=i;var r=i.componentOptions;n.propsData=r.propsData,n._parentListeners=r.listeners,n._renderChildren=r.children,n._componentTag=r.tag,e.render&&(n.render=e.render,n.staticRenderFns=e.staticRenderFns)}function Oi(t){var e=t.options;if(t.super){var n=Oi(t.super),i=t.superOptions;if(n!==i){t.superOptions=n;var r=_i(t);r&&T(t.extendOptions,r),e=t.options=Yt(n,t.extendOptions),e.name&&(e.components[e.name]=t)}}return e}function _i(t){var e,n=t.options,i=t.sealedOptions;for(var r in n)n[r]!==i[r]&&(e||(e={}),e[r]=n[r]);return e}function Si(t){this._init(t)}function Ci(t){t.use=function(t){var e=this._installedPlugins||(this._installedPlugins=[]);if(e.indexOf(t)>-1)return this;var n=E(arguments,1);return n.unshift(this),"function"===typeof t.install?t.install.apply(t,n):"function"===typeof t&&t.apply(null,n),e.push(t),this}}function ki(t){t.mixin=function(t){return this.options=Yt(this.options,t),this}}function ji(t){t.cid=0;var e=1;t.extend=function(t){t=t||{};var n=this,i=n.cid,r=t._Ctor||(t._Ctor={});if(r[i])return r[i];var o=t.name||n.options.name;var a=function(t){this._init(t)};return a.prototype=Object.create(n.prototype),a.prototype.constructor=a,a.cid=e++,a.options=Yt(n.options,t),a["super"]=n,a.options.props&&$i(a),a.options.computed&&Ai(a),a.extend=n.extend,a.mixin=n.mixin,a.use=n.use,V.forEach((function(t){a[t]=n[t]})),o&&(a.options.components[o]=a),a.superOptions=n.options,a.extendOptions=t,a.sealedOptions=T({},a.options),r[i]=a,a}}function $i(t){var e=t.options.props;for(var n in e)oi(t.prototype,"_props",n)}function Ai(t){var e=t.options.computed;for(var n in e)hi(t.prototype,n,e[n])}function Ei(t){V.forEach((function(e){t[e]=function(t,n){return n?("component"===e&&l(n)&&(n.name=n.name||t,n=this.options._base.extend(n)),"directive"===e&&"function"===typeof n&&(n={bind:n,update:n}),this.options[e+"s"][t]=n,n):this.options[e+"s"][t]}}))}function Ti(t){return t&&(t.Ctor.options.name||t.tag)}function Li(t,e){return Array.isArray(t)?t.indexOf(e)>-1:"string"===typeof t?t.split(",").indexOf(e)>-1:!!f(t)&&t.test(e)}function Ii(t,e){var n=t.cache,i=t.keys,r=t._vnode;for(var o in n){var a=n[o];if(a){var s=a.name;s&&!e(s)&&Bi(n,o,i,r)}}}function Bi(t,e,n,i){var r=t[e];!r||i&&r.tag===i.tag||r.componentInstance.$destroy(),t[e]=null,b(n,e)}xi(Si),bi(Si),En(Si),Bn(Si),xn(Si);var Mi=[String,RegExp,Array],Pi={name:"keep-alive",abstract:!0,props:{include:Mi,exclude:Mi,max:[String,Number]},methods:{cacheVNode:function(){var t=this,e=t.cache,n=t.keys,i=t.vnodeToCache,r=t.keyToCache;if(i){var o=i.tag,a=i.componentInstance,s=i.componentOptions;e[r]={name:Ti(s),tag:o,componentInstance:a},n.push(r),this.max&&n.length>parseInt(this.max)&&Bi(e,n[0],n,this._vnode),this.vnodeToCache=null}}},created:function(){this.cache=Object.create(null),this.keys=[]},destroyed:function(){for(var t in this.cache)Bi(this.cache,t,this.keys)},mounted:function(){var t=this;this.cacheVNode(),this.$watch("include",(function(e){Ii(t,(function(t){return Li(e,t)}))})),this.$watch("exclude",(function(e){Ii(t,(function(t){return!Li(e,t)}))}))},updated:function(){this.cacheVNode()},render:function(){var t=this.$slots.default,e=Sn(t),n=e&&e.componentOptions;if(n){var i=Ti(n),r=this,o=r.include,a=r.exclude;if(o&&(!i||!Li(o,i))||a&&i&&Li(a,i))return e;var s=this,c=s.cache,u=s.keys,l=null==e.key?n.Ctor.cid+(n.tag?"::"+n.tag:""):e.key;c[l]?(e.componentInstance=c[l].componentInstance,b(u,l),u.push(l)):(this.vnodeToCache=e,this.keyToCache=l),e.data.keepAlive=!0}return e||t&&t[0]}},Di={KeepAlive:Pi};function Ri(t){var e={get:function(){return z}};Object.defineProperty(t,"config",e),t.util={warn:dt,extend:T,mergeOptions:Yt,defineReactive:Bt},t.set=Mt,t.delete=Pt,t.nextTick=ve,t.observable=function(t){return It(t),t},t.options=Object.create(null),V.forEach((function(e){t.options[e+"s"]=Object.create(null)})),t.options._base=t,T(t.options.components,Di),Ci(t),ki(t),ji(t),Ei(t)}Ri(Si),Object.defineProperty(Si.prototype,"$isServer",{get:ct}),Object.defineProperty(Si.prototype,"$ssrContext",{get:function(){return this.$vnode&&this.$vnode.ssrContext}}),Object.defineProperty(Si,"FunctionalRenderContext",{value:Qe}),Si.version="2.6.14";var Ni=m("style,class"),Vi=m("input,textarea,option,select,progress"),Fi=function(t,e,n){return"value"===n&&Vi(t)&&"button"!==e||"selected"===n&&"option"===t||"checked"===n&&"input"===t||"muted"===n&&"video"===t},zi=m("contenteditable,draggable,spellcheck"),Hi=m("events,caret,typing,plaintext-only"),Wi=function(t,e){return Zi(e)||"false"===e?"false":"contenteditable"===t&&Hi(e)?e:"true"},Ui=m("allowfullscreen,async,autofocus,autoplay,checked,compact,controls,declare,default,defaultchecked,defaultmuted,defaultselected,defer,disabled,enabled,formnovalidate,hidden,indeterminate,inert,ismap,itemscope,loop,multiple,muted,nohref,noresize,noshade,novalidate,nowrap,open,pauseonexit,readonly,required,reversed,scoped,seamless,selected,sortable,truespeed,typemustmatch,visible"),qi="http://www.w3.org/1999/xlink",Gi=function(t){return":"===t.charAt(5)&&"xlink"===t.slice(0,5)},Yi=function(t){return Gi(t)?t.slice(6,t.length):""},Zi=function(t){return null==t||!1===t};function Ki(t){var e=t.data,n=t,i=t;while(r(i.componentInstance))i=i.componentInstance._vnode,i&&i.data&&(e=Xi(i.data,e));while(r(n=n.parent))n&&n.data&&(e=Xi(e,n.data));return Ji(e.staticClass,e.class)}function Xi(t,e){return{staticClass:Qi(t.staticClass,e.staticClass),class:r(t.class)?[t.class,e.class]:e.class}}function Ji(t,e){return r(t)||r(e)?Qi(t,tr(e)):""}function Qi(t,e){return t?e?t+" "+e:t:e||""}function tr(t){return Array.isArray(t)?er(t):c(t)?nr(t):"string"===typeof t?t:""}function er(t){for(var e,n="",i=0,o=t.length;i<o;i++)r(e=tr(t[i]))&&""!==e&&(n&&(n+=" "),n+=e);return n}function nr(t){var e="";for(var n in t)t[n]&&(e&&(e+=" "),e+=n);return e}var ir={svg:"http://www.w3.org/2000/svg",math:"http://www.w3.org/1998/Math/MathML"},rr=m("html,body,base,head,link,meta,style,title,address,article,aside,footer,header,h1,h2,h3,h4,h5,h6,hgroup,nav,section,div,dd,dl,dt,figcaption,figure,picture,hr,img,li,main,ol,p,pre,ul,a,b,abbr,bdi,bdo,br,cite,code,data,dfn,em,i,kbd,mark,q,rp,rt,rtc,ruby,s,samp,small,span,strong,sub,sup,time,u,var,wbr,area,audio,map,track,video,embed,object,param,source,canvas,script,noscript,del,ins,caption,col,colgroup,table,thead,tbody,td,th,tr,button,datalist,fieldset,form,input,label,legend,meter,optgroup,option,output,progress,select,textarea,details,dialog,menu,menuitem,summary,content,element,shadow,template,blockquote,iframe,tfoot"),or=m("svg,animate,circle,clippath,cursor,defs,desc,ellipse,filter,font-face,foreignobject,g,glyph,image,line,marker,mask,missing-glyph,path,pattern,polygon,polyline,rect,switch,symbol,text,textpath,tspan,use,view",!0),ar=function(t){return rr(t)||or(t)};function sr(t){return or(t)?"svg":"math"===t?"math":void 0}var cr=Object.create(null);function ur(t){if(!K)return!0;if(ar(t))return!1;if(t=t.toLowerCase(),null!=cr[t])return cr[t];var e=document.createElement(t);return t.indexOf("-")>-1?cr[t]=e.constructor===window.HTMLUnknownElement||e.constructor===window.HTMLElement:cr[t]=/HTMLUnknownElement/.test(e.toString())}var lr=m("text,number,password,search,email,tel,url");function fr(t){if("string"===typeof t){var e=document.querySelector(t);return e||document.createElement("div")}return t}function hr(t,e){var n=document.createElement(t);return"select"!==t||e.data&&e.data.attrs&&void 0!==e.data.attrs.multiple&&n.setAttribute("multiple","multiple"),n}function dr(t,e){return document.createElementNS(ir[t],e)}function pr(t){return document.createTextNode(t)}function vr(t){return document.createComment(t)}function mr(t,e,n){t.insertBefore(e,n)}function gr(t,e){t.removeChild(e)}function br(t,e){t.appendChild(e)}function yr(t){return t.parentNode}function xr(t){return t.nextSibling}function wr(t){return t.tagName}function Or(t,e){t.textContent=e}function _r(t,e){t.setAttribute(e,"")}var Sr=Object.freeze({createElement:hr,createElementNS:dr,createTextNode:pr,createComment:vr,insertBefore:mr,removeChild:gr,appendChild:br,parentNode:yr,nextSibling:xr,tagName:wr,setTextContent:Or,setStyleScope:_r}),Cr={create:function(t,e){kr(e)},update:function(t,e){t.data.ref!==e.data.ref&&(kr(t,!0),kr(e))},destroy:function(t){kr(t,!0)}};function kr(t,e){var n=t.data.ref;if(r(n)){var i=t.context,o=t.componentInstance||t.elm,a=i.$refs;e?Array.isArray(a[n])?b(a[n],o):a[n]===o&&(a[n]=void 0):t.data.refInFor?Array.isArray(a[n])?a[n].indexOf(o)<0&&a[n].push(o):a[n]=[o]:a[n]=o}}var jr=new yt("",{},[]),$r=["create","activate","update","remove","destroy"];function Ar(t,e){return t.key===e.key&&t.asyncFactory===e.asyncFactory&&(t.tag===e.tag&&t.isComment===e.isComment&&r(t.data)===r(e.data)&&Er(t,e)||o(t.isAsyncPlaceholder)&&i(e.asyncFactory.error))}function Er(t,e){if("input"!==t.tag)return!0;var n,i=r(n=t.data)&&r(n=n.attrs)&&n.type,o=r(n=e.data)&&r(n=n.attrs)&&n.type;return i===o||lr(i)&&lr(o)}function Tr(t,e,n){var i,o,a={};for(i=e;i<=n;++i)o=t[i].key,r(o)&&(a[o]=i);return a}function Lr(t){var e,n,a={},c=t.modules,u=t.nodeOps;for(e=0;e<$r.length;++e)for(a[$r[e]]=[],n=0;n<c.length;++n)r(c[n][$r[e]])&&a[$r[e]].push(c[n][$r[e]]);function l(t){return new yt(u.tagName(t).toLowerCase(),{},[],void 0,t)}function f(t,e){function n(){0===--n.listeners&&h(t)}return n.listeners=e,n}function h(t){var e=u.parentNode(t);r(e)&&u.removeChild(e,t)}function d(t,e,n,i,a,s,c){if(r(t.elm)&&r(s)&&(t=s[c]=_t(t)),t.isRootInsert=!a,!p(t,e,n,i)){var l=t.data,f=t.children,h=t.tag;r(h)?(t.elm=t.ns?u.createElementNS(t.ns,h):u.createElement(h,t),O(t),y(t,f,e),r(l)&&w(t,e),b(n,t.elm,i)):o(t.isComment)?(t.elm=u.createComment(t.text),b(n,t.elm,i)):(t.elm=u.createTextNode(t.text),b(n,t.elm,i))}}function p(t,e,n,i){var a=t.data;if(r(a)){var s=r(t.componentInstance)&&a.keepAlive;if(r(a=a.hook)&&r(a=a.init)&&a(t,!1),r(t.componentInstance))return v(t,e),b(n,t.elm,i),o(s)&&g(t,e,n,i),!0}}function v(t,e){r(t.data.pendingInsert)&&(e.push.apply(e,t.data.pendingInsert),t.data.pendingInsert=null),t.elm=t.componentInstance.$el,x(t)?(w(t,e),O(t)):(kr(t),e.push(t))}function g(t,e,n,i){var o,s=t;while(s.componentInstance)if(s=s.componentInstance._vnode,r(o=s.data)&&r(o=o.transition)){for(o=0;o<a.activate.length;++o)a.activate[o](jr,s);e.push(s);break}b(n,t.elm,i)}function b(t,e,n){r(t)&&(r(n)?u.parentNode(n)===t&&u.insertBefore(t,e,n):u.appendChild(t,e))}function y(t,e,n){if(Array.isArray(e)){0;for(var i=0;i<e.length;++i)d(e[i],n,t.elm,null,!0,e,i)}else s(t.text)&&u.appendChild(t.elm,u.createTextNode(String(t.text)))}function x(t){while(t.componentInstance)t=t.componentInstance._vnode;return r(t.tag)}function w(t,n){for(var i=0;i<a.create.length;++i)a.create[i](jr,t);e=t.data.hook,r(e)&&(r(e.create)&&e.create(jr,t),r(e.insert)&&n.push(t))}function O(t){var e;if(r(e=t.fnScopeId))u.setStyleScope(t.elm,e);else{var n=t;while(n)r(e=n.context)&&r(e=e.$options._scopeId)&&u.setStyleScope(t.elm,e),n=n.parent}r(e=Tn)&&e!==t.context&&e!==t.fnContext&&r(e=e.$options._scopeId)&&u.setStyleScope(t.elm,e)}function _(t,e,n,i,r,o){for(;i<=r;++i)d(n[i],o,t,e,!1,n,i)}function S(t){var e,n,i=t.data;if(r(i))for(r(e=i.hook)&&r(e=e.destroy)&&e(t),e=0;e<a.destroy.length;++e)a.destroy[e](t);if(r(e=t.children))for(n=0;n<t.children.length;++n)S(t.children[n])}function C(t,e,n){for(;e<=n;++e){var i=t[e];r(i)&&(r(i.tag)?(k(i),S(i)):h(i.elm))}}function k(t,e){if(r(e)||r(t.data)){var n,i=a.remove.length+1;for(r(e)?e.listeners+=i:e=f(t.elm,i),r(n=t.componentInstance)&&r(n=n._vnode)&&r(n.data)&&k(n,e),n=0;n<a.remove.length;++n)a.remove[n](t,e);r(n=t.data.hook)&&r(n=n.remove)?n(t,e):e()}else h(t.elm)}function j(t,e,n,o,a){var s,c,l,f,h=0,p=0,v=e.length-1,m=e[0],g=e[v],b=n.length-1,y=n[0],x=n[b],w=!a;while(h<=v&&p<=b)i(m)?m=e[++h]:i(g)?g=e[--v]:Ar(m,y)?(A(m,y,o,n,p),m=e[++h],y=n[++p]):Ar(g,x)?(A(g,x,o,n,b),g=e[--v],x=n[--b]):Ar(m,x)?(A(m,x,o,n,b),w&&u.insertBefore(t,m.elm,u.nextSibling(g.elm)),m=e[++h],x=n[--b]):Ar(g,y)?(A(g,y,o,n,p),w&&u.insertBefore(t,g.elm,m.elm),g=e[--v],y=n[++p]):(i(s)&&(s=Tr(e,h,v)),c=r(y.key)?s[y.key]:$(y,e,h,v),i(c)?d(y,o,t,m.elm,!1,n,p):(l=e[c],Ar(l,y)?(A(l,y,o,n,p),e[c]=void 0,w&&u.insertBefore(t,l.elm,m.elm)):d(y,o,t,m.elm,!1,n,p)),y=n[++p]);h>v?(f=i(n[b+1])?null:n[b+1].elm,_(t,f,n,p,b,o)):p>b&&C(e,h,v)}function $(t,e,n,i){for(var o=n;o<i;o++){var a=e[o];if(r(a)&&Ar(t,a))return o}}function A(t,e,n,s,c,l){if(t!==e){r(e.elm)&&r(s)&&(e=s[c]=_t(e));var f=e.elm=t.elm;if(o(t.isAsyncPlaceholder))r(e.asyncFactory.resolved)?L(t.elm,e,n):e.isAsyncPlaceholder=!0;else if(o(e.isStatic)&&o(t.isStatic)&&e.key===t.key&&(o(e.isCloned)||o(e.isOnce)))e.componentInstance=t.componentInstance;else{var h,d=e.data;r(d)&&r(h=d.hook)&&r(h=h.prepatch)&&h(t,e);var p=t.children,v=e.children;if(r(d)&&x(e)){for(h=0;h<a.update.length;++h)a.update[h](t,e);r(h=d.hook)&&r(h=h.update)&&h(t,e)}i(e.text)?r(p)&&r(v)?p!==v&&j(f,p,v,n,l):r(v)?(r(t.text)&&u.setTextContent(f,""),_(f,null,v,0,v.length-1,n)):r(p)?C(p,0,p.length-1):r(t.text)&&u.setTextContent(f,""):t.text!==e.text&&u.setTextContent(f,e.text),r(d)&&r(h=d.hook)&&r(h=h.postpatch)&&h(t,e)}}}function E(t,e,n){if(o(n)&&r(t.parent))t.parent.data.pendingInsert=e;else for(var i=0;i<e.length;++i)e[i].data.hook.insert(e[i])}var T=m("attrs,class,staticClass,staticStyle,key");function L(t,e,n,i){var a,s=e.tag,c=e.data,u=e.children;if(i=i||c&&c.pre,e.elm=t,o(e.isComment)&&r(e.asyncFactory))return e.isAsyncPlaceholder=!0,!0;if(r(c)&&(r(a=c.hook)&&r(a=a.init)&&a(e,!0),r(a=e.componentInstance)))return v(e,n),!0;if(r(s)){if(r(u))if(t.hasChildNodes())if(r(a=c)&&r(a=a.domProps)&&r(a=a.innerHTML)){if(a!==t.innerHTML)return!1}else{for(var l=!0,f=t.firstChild,h=0;h<u.length;h++){if(!f||!L(f,u[h],n,i)){l=!1;break}f=f.nextSibling}if(!l||f)return!1}else y(e,u,n);if(r(c)){var d=!1;for(var p in c)if(!T(p)){d=!0,w(e,n);break}!d&&c["class"]&&ge(c["class"])}}else t.data!==e.text&&(t.data=e.text);return!0}return function(t,e,n,s){if(!i(e)){var c=!1,f=[];if(i(t))c=!0,d(e,f);else{var h=r(t.nodeType);if(!h&&Ar(t,e))A(t,e,f,null,null,s);else{if(h){if(1===t.nodeType&&t.hasAttribute(N)&&(t.removeAttribute(N),n=!0),o(n)&&L(t,e,f))return E(e,f,!0),t;t=l(t)}var p=t.elm,v=u.parentNode(p);if(d(e,f,p._leaveCb?null:v,u.nextSibling(p)),r(e.parent)){var m=e.parent,g=x(e);while(m){for(var b=0;b<a.destroy.length;++b)a.destroy[b](m);if(m.elm=e.elm,g){for(var y=0;y<a.create.length;++y)a.create[y](jr,m);var w=m.data.hook.insert;if(w.merged)for(var O=1;O<w.fns.length;O++)w.fns[O]()}else kr(m);m=m.parent}}r(v)?C([t],0,0):r(t.tag)&&S(t)}}return E(e,f,c),e.elm}r(t)&&S(t)}}var Ir={create:Br,update:Br,destroy:function(t){Br(t,jr)}};function Br(t,e){(t.data.directives||e.data.directives)&&Mr(t,e)}function Mr(t,e){var n,i,r,o=t===jr,a=e===jr,s=Dr(t.data.directives,t.context),c=Dr(e.data.directives,e.context),u=[],l=[];for(n in c)i=s[n],r=c[n],i?(r.oldValue=i.value,r.oldArg=i.arg,Nr(r,"update",e,t),r.def&&r.def.componentUpdated&&l.push(r)):(Nr(r,"bind",e,t),r.def&&r.def.inserted&&u.push(r));if(u.length){var f=function(){for(var n=0;n<u.length;n++)Nr(u[n],"inserted",e,t)};o?Oe(e,"insert",f):f()}if(l.length&&Oe(e,"postpatch",(function(){for(var n=0;n<l.length;n++)Nr(l[n],"componentUpdated",e,t)})),!o)for(n in s)c[n]||Nr(s[n],"unbind",t,t,a)}var Pr=Object.create(null);function Dr(t,e){var n,i,r=Object.create(null);if(!t)return r;for(n=0;n<t.length;n++)i=t[n],i.modifiers||(i.modifiers=Pr),r[Rr(i)]=i,i.def=Zt(e.$options,"directives",i.name,!0);return r}function Rr(t){return t.rawName||t.name+"."+Object.keys(t.modifiers||{}).join(".")}function Nr(t,e,n,i,r){var o=t.def&&t.def[e];if(o)try{o(n.elm,t,n,i,r)}catch(Sa){ne(Sa,n.context,"directive "+t.name+" "+e+" hook")}}var Vr=[Cr,Ir];function Fr(t,e){var n=e.componentOptions;if((!r(n)||!1!==n.Ctor.options.inheritAttrs)&&(!i(t.data.attrs)||!i(e.data.attrs))){var o,a,s,c=e.elm,u=t.data.attrs||{},l=e.data.attrs||{};for(o in r(l.__ob__)&&(l=e.data.attrs=T({},l)),l)a=l[o],s=u[o],s!==a&&zr(c,o,a,e.data.pre);for(o in(tt||nt)&&l.value!==u.value&&zr(c,"value",l.value),u)i(l[o])&&(Gi(o)?c.removeAttributeNS(qi,Yi(o)):zi(o)||c.removeAttribute(o))}}function zr(t,e,n,i){i||t.tagName.indexOf("-")>-1?Hr(t,e,n):Ui(e)?Zi(n)?t.removeAttribute(e):(n="allowfullscreen"===e&&"EMBED"===t.tagName?"true":e,t.setAttribute(e,n)):zi(e)?t.setAttribute(e,Wi(e,n)):Gi(e)?Zi(n)?t.removeAttributeNS(qi,Yi(e)):t.setAttributeNS(qi,e,n):Hr(t,e,n)}function Hr(t,e,n){if(Zi(n))t.removeAttribute(e);else{if(tt&&!et&&"TEXTAREA"===t.tagName&&"placeholder"===e&&""!==n&&!t.__ieph){var i=function(e){e.stopImmediatePropagation(),t.removeEventListener("input",i)};t.addEventListener("input",i),t.__ieph=!0}t.setAttribute(e,n)}}var Wr={create:Fr,update:Fr};function Ur(t,e){var n=e.elm,o=e.data,a=t.data;if(!(i(o.staticClass)&&i(o.class)&&(i(a)||i(a.staticClass)&&i(a.class)))){var s=Ki(e),c=n._transitionClasses;r(c)&&(s=Qi(s,tr(c))),s!==n._prevClass&&(n.setAttribute("class",s),n._prevClass=s)}}var qr,Gr={create:Ur,update:Ur},Yr="__r",Zr="__c";function Kr(t){if(r(t[Yr])){var e=tt?"change":"input";t[e]=[].concat(t[Yr],t[e]||[]),delete t[Yr]}r(t[Zr])&&(t.change=[].concat(t[Zr],t.change||[]),delete t[Zr])}function Xr(t,e,n){var i=qr;return function r(){var o=e.apply(null,arguments);null!==o&&to(t,r,n,i)}}var Jr=se&&!(rt&&Number(rt[1])<=53);function Qr(t,e,n,i){if(Jr){var r=Yn,o=e;e=o._wrapper=function(t){if(t.target===t.currentTarget||t.timeStamp>=r||t.timeStamp<=0||t.target.ownerDocument!==document)return o.apply(this,arguments)}}qr.addEventListener(t,e,at?{capture:n,passive:i}:n)}function to(t,e,n,i){(i||qr).removeEventListener(t,e._wrapper||e,n)}function eo(t,e){if(!i(t.data.on)||!i(e.data.on)){var n=e.data.on||{},r=t.data.on||{};qr=e.elm,Kr(n),we(n,r,Qr,to,Xr,e.context),qr=void 0}}var no,io={create:eo,update:eo};function ro(t,e){if(!i(t.data.domProps)||!i(e.data.domProps)){var n,o,a=e.elm,s=t.data.domProps||{},c=e.data.domProps||{};for(n in r(c.__ob__)&&(c=e.data.domProps=T({},c)),s)n in c||(a[n]="");for(n in c){if(o=c[n],"textContent"===n||"innerHTML"===n){if(e.children&&(e.children.length=0),o===s[n])continue;1===a.childNodes.length&&a.removeChild(a.childNodes[0])}if("value"===n&&"PROGRESS"!==a.tagName){a._value=o;var u=i(o)?"":String(o);oo(a,u)&&(a.value=u)}else if("innerHTML"===n&&or(a.tagName)&&i(a.innerHTML)){no=no||document.createElement("div"),no.innerHTML="<svg>"+o+"</svg>";var l=no.firstChild;while(a.firstChild)a.removeChild(a.firstChild);while(l.firstChild)a.appendChild(l.firstChild)}else if(o!==s[n])try{a[n]=o}catch(Sa){}}}}function oo(t,e){return!t.composing&&("OPTION"===t.tagName||ao(t,e)||so(t,e))}function ao(t,e){var n=!0;try{n=document.activeElement!==t}catch(Sa){}return n&&t.value!==e}function so(t,e){var n=t.value,i=t._vModifiers;if(r(i)){if(i.number)return v(n)!==v(e);if(i.trim)return n.trim()!==e.trim()}return n!==e}var co={create:ro,update:ro},uo=w((function(t){var e={},n=/;(?![^(]*\))/g,i=/:(.+)/;return t.split(n).forEach((function(t){if(t){var n=t.split(i);n.length>1&&(e[n[0].trim()]=n[1].trim())}})),e}));function lo(t){var e=fo(t.style);return t.staticStyle?T(t.staticStyle,e):e}function fo(t){return Array.isArray(t)?L(t):"string"===typeof t?uo(t):t}function ho(t,e){var n,i={};if(e){var r=t;while(r.componentInstance)r=r.componentInstance._vnode,r&&r.data&&(n=lo(r.data))&&T(i,n)}(n=lo(t.data))&&T(i,n);var o=t;while(o=o.parent)o.data&&(n=lo(o.data))&&T(i,n);return i}var po,vo=/^--/,mo=/\s*!important$/,go=function(t,e,n){if(vo.test(e))t.style.setProperty(e,n);else if(mo.test(n))t.style.setProperty(k(e),n.replace(mo,""),"important");else{var i=yo(e);if(Array.isArray(n))for(var r=0,o=n.length;r<o;r++)t.style[i]=n[r];else t.style[i]=n}},bo=["Webkit","Moz","ms"],yo=w((function(t){if(po=po||document.createElement("div").style,t=_(t),"filter"!==t&&t in po)return t;for(var e=t.charAt(0).toUpperCase()+t.slice(1),n=0;n<bo.length;n++){var i=bo[n]+e;if(i in po)return i}}));function xo(t,e){var n=e.data,o=t.data;if(!(i(n.staticStyle)&&i(n.style)&&i(o.staticStyle)&&i(o.style))){var a,s,c=e.elm,u=o.staticStyle,l=o.normalizedStyle||o.style||{},f=u||l,h=fo(e.data.style)||{};e.data.normalizedStyle=r(h.__ob__)?T({},h):h;var d=ho(e,!0);for(s in f)i(d[s])&&go(c,s,"");for(s in d)a=d[s],a!==f[s]&&go(c,s,null==a?"":a)}}var wo={create:xo,update:xo},Oo=/\s+/;function _o(t,e){if(e&&(e=e.trim()))if(t.classList)e.indexOf(" ")>-1?e.split(Oo).forEach((function(e){return t.classList.add(e)})):t.classList.add(e);else{var n=" "+(t.getAttribute("class")||"")+" ";n.indexOf(" "+e+" ")<0&&t.setAttribute("class",(n+e).trim())}}function So(t,e){if(e&&(e=e.trim()))if(t.classList)e.indexOf(" ")>-1?e.split(Oo).forEach((function(e){return t.classList.remove(e)})):t.classList.remove(e),t.classList.length||t.removeAttribute("class");else{var n=" "+(t.getAttribute("class")||"")+" ",i=" "+e+" ";while(n.indexOf(i)>=0)n=n.replace(i," ");n=n.trim(),n?t.setAttribute("class",n):t.removeAttribute("class")}}function Co(t){if(t){if("object"===typeof t){var e={};return!1!==t.css&&T(e,ko(t.name||"v")),T(e,t),e}return"string"===typeof t?ko(t):void 0}}var ko=w((function(t){return{enterClass:t+"-enter",enterToClass:t+"-enter-to",enterActiveClass:t+"-enter-active",leaveClass:t+"-leave",leaveToClass:t+"-leave-to",leaveActiveClass:t+"-leave-active"}})),jo=K&&!et,$o="transition",Ao="animation",Eo="transition",To="transitionend",Lo="animation",Io="animationend";jo&&(void 0===window.ontransitionend&&void 0!==window.onwebkittransitionend&&(Eo="WebkitTransition",To="webkitTransitionEnd"),void 0===window.onanimationend&&void 0!==window.onwebkitanimationend&&(Lo="WebkitAnimation",Io="webkitAnimationEnd"));var Bo=K?window.requestAnimationFrame?window.requestAnimationFrame.bind(window):setTimeout:function(t){return t()};function Mo(t){Bo((function(){Bo(t)}))}function Po(t,e){var n=t._transitionClasses||(t._transitionClasses=[]);n.indexOf(e)<0&&(n.push(e),_o(t,e))}function Do(t,e){t._transitionClasses&&b(t._transitionClasses,e),So(t,e)}function Ro(t,e,n){var i=Vo(t,e),r=i.type,o=i.timeout,a=i.propCount;if(!r)return n();var s=r===$o?To:Io,c=0,u=function(){t.removeEventListener(s,l),n()},l=function(e){e.target===t&&++c>=a&&u()};setTimeout((function(){c<a&&u()}),o+1),t.addEventListener(s,l)}var No=/\b(transform|all)(,|$)/;function Vo(t,e){var n,i=window.getComputedStyle(t),r=(i[Eo+"Delay"]||"").split(", "),o=(i[Eo+"Duration"]||"").split(", "),a=Fo(r,o),s=(i[Lo+"Delay"]||"").split(", "),c=(i[Lo+"Duration"]||"").split(", "),u=Fo(s,c),l=0,f=0;e===$o?a>0&&(n=$o,l=a,f=o.length):e===Ao?u>0&&(n=Ao,l=u,f=c.length):(l=Math.max(a,u),n=l>0?a>u?$o:Ao:null,f=n?n===$o?o.length:c.length:0);var h=n===$o&&No.test(i[Eo+"Property"]);return{type:n,timeout:l,propCount:f,hasTransform:h}}function Fo(t,e){while(t.length<e.length)t=t.concat(t);return Math.max.apply(null,e.map((function(e,n){return zo(e)+zo(t[n])})))}function zo(t){return 1e3*Number(t.slice(0,-1).replace(",","."))}function Ho(t,e){var n=t.elm;r(n._leaveCb)&&(n._leaveCb.cancelled=!0,n._leaveCb());var o=Co(t.data.transition);if(!i(o)&&!r(n._enterCb)&&1===n.nodeType){var a=o.css,s=o.type,u=o.enterClass,l=o.enterToClass,f=o.enterActiveClass,h=o.appearClass,d=o.appearToClass,p=o.appearActiveClass,m=o.beforeEnter,g=o.enter,b=o.afterEnter,y=o.enterCancelled,x=o.beforeAppear,w=o.appear,O=o.afterAppear,_=o.appearCancelled,S=o.duration,C=Tn,k=Tn.$vnode;while(k&&k.parent)C=k.context,k=k.parent;var j=!C._isMounted||!t.isRootInsert;if(!j||w||""===w){var $=j&&h?h:u,A=j&&p?p:f,E=j&&d?d:l,T=j&&x||m,L=j&&"function"===typeof w?w:g,I=j&&O||b,B=j&&_||y,M=v(c(S)?S.enter:S);0;var P=!1!==a&&!et,D=qo(L),N=n._enterCb=R((function(){P&&(Do(n,E),Do(n,A)),N.cancelled?(P&&Do(n,$),B&&B(n)):I&&I(n),n._enterCb=null}));t.data.show||Oe(t,"insert",(function(){var e=n.parentNode,i=e&&e._pending&&e._pending[t.key];i&&i.tag===t.tag&&i.elm._leaveCb&&i.elm._leaveCb(),L&&L(n,N)})),T&&T(n),P&&(Po(n,$),Po(n,A),Mo((function(){Do(n,$),N.cancelled||(Po(n,E),D||(Uo(M)?setTimeout(N,M):Ro(n,s,N)))}))),t.data.show&&(e&&e(),L&&L(n,N)),P||D||N()}}}function Wo(t,e){var n=t.elm;r(n._enterCb)&&(n._enterCb.cancelled=!0,n._enterCb());var o=Co(t.data.transition);if(i(o)||1!==n.nodeType)return e();if(!r(n._leaveCb)){var a=o.css,s=o.type,u=o.leaveClass,l=o.leaveToClass,f=o.leaveActiveClass,h=o.beforeLeave,d=o.leave,p=o.afterLeave,m=o.leaveCancelled,g=o.delayLeave,b=o.duration,y=!1!==a&&!et,x=qo(d),w=v(c(b)?b.leave:b);0;var O=n._leaveCb=R((function(){n.parentNode&&n.parentNode._pending&&(n.parentNode._pending[t.key]=null),y&&(Do(n,l),Do(n,f)),O.cancelled?(y&&Do(n,u),m&&m(n)):(e(),p&&p(n)),n._leaveCb=null}));g?g(_):_()}function _(){O.cancelled||(!t.data.show&&n.parentNode&&((n.parentNode._pending||(n.parentNode._pending={}))[t.key]=t),h&&h(n),y&&(Po(n,u),Po(n,f),Mo((function(){Do(n,u),O.cancelled||(Po(n,l),x||(Uo(w)?setTimeout(O,w):Ro(n,s,O)))}))),d&&d(n,O),y||x||O())}}function Uo(t){return"number"===typeof t&&!isNaN(t)}function qo(t){if(i(t))return!1;var e=t.fns;return r(e)?qo(Array.isArray(e)?e[0]:e):(t._length||t.length)>1}function Go(t,e){!0!==e.data.show&&Ho(e)}var Yo=K?{create:Go,activate:Go,remove:function(t,e){!0!==t.data.show?Wo(t,e):e()}}:{},Zo=[Wr,Gr,io,co,wo,Yo],Ko=Zo.concat(Vr),Xo=Lr({nodeOps:Sr,modules:Ko});et&&document.addEventListener("selectionchange",(function(){var t=document.activeElement;t&&t.vmodel&&oa(t,"input")}));var Jo={inserted:function(t,e,n,i){"select"===n.tag?(i.elm&&!i.elm._vOptions?Oe(n,"postpatch",(function(){Jo.componentUpdated(t,e,n)})):Qo(t,e,n.context),t._vOptions=[].map.call(t.options,na)):("textarea"===n.tag||lr(t.type))&&(t._vModifiers=e.modifiers,e.modifiers.lazy||(t.addEventListener("compositionstart",ia),t.addEventListener("compositionend",ra),t.addEventListener("change",ra),et&&(t.vmodel=!0)))},componentUpdated:function(t,e,n){if("select"===n.tag){Qo(t,e,n.context);var i=t._vOptions,r=t._vOptions=[].map.call(t.options,na);if(r.some((function(t,e){return!P(t,i[e])}))){var o=t.multiple?e.value.some((function(t){return ea(t,r)})):e.value!==e.oldValue&&ea(e.value,r);o&&oa(t,"change")}}}};function Qo(t,e,n){ta(t,e,n),(tt||nt)&&setTimeout((function(){ta(t,e,n)}),0)}function ta(t,e,n){var i=e.value,r=t.multiple;if(!r||Array.isArray(i)){for(var o,a,s=0,c=t.options.length;s<c;s++)if(a=t.options[s],r)o=D(i,na(a))>-1,a.selected!==o&&(a.selected=o);else if(P(na(a),i))return void(t.selectedIndex!==s&&(t.selectedIndex=s));r||(t.selectedIndex=-1)}}function ea(t,e){return e.every((function(e){return!P(e,t)}))}function na(t){return"_value"in t?t._value:t.value}function ia(t){t.target.composing=!0}function ra(t){t.target.composing&&(t.target.composing=!1,oa(t.target,"input"))}function oa(t,e){var n=document.createEvent("HTMLEvents");n.initEvent(e,!0,!0),t.dispatchEvent(n)}function aa(t){return!t.componentInstance||t.data&&t.data.transition?t:aa(t.componentInstance._vnode)}var sa={bind:function(t,e,n){var i=e.value;n=aa(n);var r=n.data&&n.data.transition,o=t.__vOriginalDisplay="none"===t.style.display?"":t.style.display;i&&r?(n.data.show=!0,Ho(n,(function(){t.style.display=o}))):t.style.display=i?o:"none"},update:function(t,e,n){var i=e.value,r=e.oldValue;if(!i!==!r){n=aa(n);var o=n.data&&n.data.transition;o?(n.data.show=!0,i?Ho(n,(function(){t.style.display=t.__vOriginalDisplay})):Wo(n,(function(){t.style.display="none"}))):t.style.display=i?t.__vOriginalDisplay:"none"}},unbind:function(t,e,n,i,r){r||(t.style.display=t.__vOriginalDisplay)}},ca={model:Jo,show:sa},ua={name:String,appear:Boolean,css:Boolean,mode:String,type:String,enterClass:String,leaveClass:String,enterToClass:String,leaveToClass:String,enterActiveClass:String,leaveActiveClass:String,appearClass:String,appearActiveClass:String,appearToClass:String,duration:[Number,String,Object]};function la(t){var e=t&&t.componentOptions;return e&&e.Ctor.options.abstract?la(Sn(e.children)):t}function fa(t){var e={},n=t.$options;for(var i in n.propsData)e[i]=t[i];var r=n._parentListeners;for(var o in r)e[_(o)]=r[o];return e}function ha(t,e){if(/\d-keep-alive$/.test(e.tag))return t("keep-alive",{props:e.componentOptions.propsData})}function da(t){while(t=t.parent)if(t.data.transition)return!0}function pa(t,e){return e.key===t.key&&e.tag===t.tag}var va=function(t){return t.tag||Be(t)},ma=function(t){return"show"===t.name},ga={name:"transition",props:ua,abstract:!0,render:function(t){var e=this,n=this.$slots.default;if(n&&(n=n.filter(va),n.length)){0;var i=this.mode;0;var r=n[0];if(da(this.$vnode))return r;var o=la(r);if(!o)return r;if(this._leaving)return ha(t,r);var a="__transition-"+this._uid+"-";o.key=null==o.key?o.isComment?a+"comment":a+o.tag:s(o.key)?0===String(o.key).indexOf(a)?o.key:a+o.key:o.key;var c=(o.data||(o.data={})).transition=fa(this),u=this._vnode,l=la(u);if(o.data.directives&&o.data.directives.some(ma)&&(o.data.show=!0),l&&l.data&&!pa(o,l)&&!Be(l)&&(!l.componentInstance||!l.componentInstance._vnode.isComment)){var f=l.data.transition=T({},c);if("out-in"===i)return this._leaving=!0,Oe(f,"afterLeave",(function(){e._leaving=!1,e.$forceUpdate()})),ha(t,r);if("in-out"===i){if(Be(o))return u;var h,d=function(){h()};Oe(c,"afterEnter",d),Oe(c,"enterCancelled",d),Oe(f,"delayLeave",(function(t){h=t}))}}return r}}},ba=T({tag:String,moveClass:String},ua);delete ba.mode;var ya={props:ba,beforeMount:function(){var t=this,e=this._update;this._update=function(n,i){var r=Ln(t);t.__patch__(t._vnode,t.kept,!1,!0),t._vnode=t.kept,r(),e.call(t,n,i)}},render:function(t){for(var e=this.tag||this.$vnode.data.tag||"span",n=Object.create(null),i=this.prevChildren=this.children,r=this.$slots.default||[],o=this.children=[],a=fa(this),s=0;s<r.length;s++){var c=r[s];if(c.tag)if(null!=c.key&&0!==String(c.key).indexOf("__vlist"))o.push(c),n[c.key]=c,(c.data||(c.data={})).transition=a;else;}if(i){for(var u=[],l=[],f=0;f<i.length;f++){var h=i[f];h.data.transition=a,h.data.pos=h.elm.getBoundingClientRect(),n[h.key]?u.push(h):l.push(h)}this.kept=t(e,null,u),this.removed=l}return t(e,null,o)},updated:function(){var t=this.prevChildren,e=this.moveClass||(this.name||"v")+"-move";t.length&&this.hasMove(t[0].elm,e)&&(t.forEach(xa),t.forEach(wa),t.forEach(Oa),this._reflow=document.body.offsetHeight,t.forEach((function(t){if(t.data.moved){var n=t.elm,i=n.style;Po(n,e),i.transform=i.WebkitTransform=i.transitionDuration="",n.addEventListener(To,n._moveCb=function t(i){i&&i.target!==n||i&&!/transform$/.test(i.propertyName)||(n.removeEventListener(To,t),n._moveCb=null,Do(n,e))})}})))},methods:{hasMove:function(t,e){if(!jo)return!1;if(this._hasMove)return this._hasMove;var n=t.cloneNode();t._transitionClasses&&t._transitionClasses.forEach((function(t){So(n,t)})),_o(n,e),n.style.display="none",this.$el.appendChild(n);var i=Vo(n);return this.$el.removeChild(n),this._hasMove=i.hasTransform}}};function xa(t){t.elm._moveCb&&t.elm._moveCb(),t.elm._enterCb&&t.elm._enterCb()}function wa(t){t.data.newPos=t.elm.getBoundingClientRect()}function Oa(t){var e=t.data.pos,n=t.data.newPos,i=e.left-n.left,r=e.top-n.top;if(i||r){t.data.moved=!0;var o=t.elm.style;o.transform=o.WebkitTransform="translate("+i+"px,"+r+"px)",o.transitionDuration="0s"}}var _a={Transition:ga,TransitionGroup:ya};Si.config.mustUseProp=Fi,Si.config.isReservedTag=ar,Si.config.isReservedAttr=Ni,Si.config.getTagNamespace=sr,Si.config.isUnknownElement=ur,T(Si.options.directives,ca),T(Si.options.components,_a),Si.prototype.__patch__=K?Xo:I,Si.prototype.$mount=function(t,e){return t=t&&K?fr(t):void 0,Mn(this,t,e)},K&&setTimeout((function(){z.devtools&&ut&&ut.emit("init",Si)}),0),e["a"]=Si}).call(this,n("c8ba"))},"2b19":function(t,e,n){var i=n("23e7"),r=n("129f");i({target:"Object",stat:!0},{is:r})},"2ba4":function(t,e,n){var i=n("40d5"),r=Function.prototype,o=r.apply,a=r.call;t.exports="object"==typeof Reflect&&Reflect.apply||(i?a.bind(o):function(){return a.apply(o,arguments)})},"2c3e":function(t,e,n){var i=n("da84"),r=n("83ab"),o=n("9f7f").MISSED_STICKY,a=n("c6b6"),s=n("edd0"),c=n("69f3").get,u=RegExp.prototype,l=i.TypeError;r&&o&&s(u,"sticky",{configurable:!0,get:function(){if(this!==u){if("RegExp"===a(this))return!!c(this).sticky;throw l("Incompatible receiver, RegExp required")}}})},"2ca0":function(t,e,n){"use strict";var i=n("23e7"),r=n("e330"),o=n("06cf").f,a=n("50c4"),s=n("577e"),c=n("5a34"),u=n("1d80"),l=n("ab13"),f=n("c430"),h=r("".startsWith),d=r("".slice),p=Math.min,v=l("startsWith"),m=!f&&!v&&!!function(){var t=o(String.prototype,"startsWith");return t&&!t.writable}();i({target:"String",proto:!0,forced:!m&&!v},{startsWith:function(t){var e=s(u(this));c(t);var n=a(p(arguments.length>1?arguments[1]:void 0,e.length)),i=s(t);return h?h(e,i,n):d(e,n,n+i.length)===i}})},"2caf":function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));n("4ae1"),n("d3b7"),n("f8c9"),n("3410");function i(t){return i=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)},i(t)}function r(){if("undefined"===typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"===typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(t){return!1}}var o=n("99de");function a(t){var e=r();return function(){var n,r=i(t);if(e){var a=i(this).constructor;n=Reflect.construct(r,arguments,a)}else n=r.apply(this,arguments);return Object(o["a"])(this,n)}}},"2cf4":function(t,e,n){var i,r,o,a,s=n("da84"),c=n("2ba4"),u=n("0366"),l=n("1626"),f=n("1a2d"),h=n("d039"),d=n("1be4"),p=n("f36a"),v=n("cc12"),m=n("d6d6"),g=n("1cdc"),b=n("605d"),y=s.setImmediate,x=s.clearImmediate,w=s.process,O=s.Dispatch,_=s.Function,S=s.MessageChannel,C=s.String,k=0,j={},$="onreadystatechange";try{i=s.location}catch(I){}var A=function(t){if(f(j,t)){var e=j[t];delete j[t],e()}},E=function(t){return function(){A(t)}},T=function(t){A(t.data)},L=function(t){s.postMessage(C(t),i.protocol+"//"+i.host)};y&&x||(y=function(t){m(arguments.length,1);var e=l(t)?t:_(t),n=p(arguments,1);return j[++k]=function(){c(e,void 0,n)},r(k),k},x=function(t){delete j[t]},b?r=function(t){w.nextTick(E(t))}:O&&O.now?r=function(t){O.now(E(t))}:S&&!g?(o=new S,a=o.port2,o.port1.onmessage=T,r=u(a.postMessage,a)):s.addEventListener&&l(s.postMessage)&&!s.importScripts&&i&&"file:"!==i.protocol&&!h(L)?(r=L,s.addEventListener("message",T,!1)):r=$ in v("script")?function(t){d.appendChild(v("script"))[$]=function(){d.removeChild(this),A(t)}}:function(t){setTimeout(E(t),0)}),t.exports={set:y,clear:x}},"2d00":function(t,e,n){var i,r,o=n("da84"),a=n("342f"),s=o.process,c=o.Deno,u=s&&s.versions||c&&c.version,l=u&&u.v8;l&&(i=l.split("."),r=i[0]>0&&i[0]<4?1:+(i[0]+i[1])),!r&&a&&(i=a.match(/Edge\/(\d+)/),(!i||i[1]>=74)&&(i=a.match(/Chrome\/(\d+)/),i&&(r=+i[1]))),t.exports=r},"2d83":function(t,e,n){"use strict";var i=n("387f");t.exports=function(t,e,n,r,o){var a=new Error(t);return i(a,e,n,r,o)}},"2db4":function(t,e,n){"use strict";var i=n("ade3"),r=(n("a9e3"),n("caad"),n("ca71"),n("8dd9")),o=n("a9ad"),a=n("7560"),s=n("f2e7"),c=n("fe6c"),u=n("58df"),l=n("80d2"),f=n("d9bd");e["a"]=Object(u["a"])(r["a"],o["a"],s["a"],Object(c["b"])(["absolute","bottom","left","right","top"])).extend({name:"v-snackbar",props:{app:Boolean,centered:Boolean,contentClass:{type:String,default:""},multiLine:Boolean,text:Boolean,timeout:{type:[Number,String],default:5e3},transition:{type:[Boolean,String],default:"v-snack-transition",validator:function(t){return"string"===typeof t||!1===t}},vertical:Boolean},data:function(){return{activeTimeout:-1}},computed:{classes:function(){return{"v-snack--absolute":this.absolute,"v-snack--active":this.isActive,"v-snack--bottom":this.bottom||!this.top,"v-snack--centered":this.centered,"v-snack--has-background":this.hasBackground,"v-snack--left":this.left,"v-snack--multi-line":this.multiLine&&!this.vertical,"v-snack--right":this.right,"v-snack--text":this.text,"v-snack--top":this.top,"v-snack--vertical":this.vertical}},hasBackground:function(){return!this.text&&!this.outlined},isDark:function(){return this.hasBackground?!this.light:a["a"].options.computed.isDark.call(this)},styles:function(){if(this.absolute||!this.app)return{};var t=this.$vuetify.application,e=t.bar,n=t.bottom,i=t.footer,r=t.insetFooter,o=t.left,a=t.right,s=t.top;return{paddingBottom:Object(l["d"])(n+i+r),paddingLeft:Object(l["d"])(o),paddingRight:Object(l["d"])(a),paddingTop:Object(l["d"])(e+s)}}},watch:{isActive:"setTimeout",timeout:"setTimeout"},mounted:function(){this.isActive&&this.setTimeout()},created:function(){this.$attrs.hasOwnProperty("auto-height")&&Object(f["e"])("auto-height",this),0==this.timeout&&Object(f["d"])('timeout="0"',"-1",this)},methods:{genActions:function(){return this.$createElement("div",{staticClass:"v-snack__action "},[Object(l["l"])(this,"action",{attrs:{class:"v-snack__btn"}})])},genContent:function(){return this.$createElement("div",{staticClass:"v-snack__content",class:Object(i["a"])({},this.contentClass,!0),attrs:{role:"status","aria-live":"polite"}},[Object(l["l"])(this)])},genWrapper:function(){var t=this,e=this.hasBackground?this.setBackgroundColor:this.setTextColor,n=e(this.color,{staticClass:"v-snack__wrapper",class:r["a"].options.computed.classes.call(this),style:r["a"].options.computed.styles.call(this),directives:[{name:"show",value:this.isActive}],on:{pointerenter:function(){return window.clearTimeout(t.activeTimeout)},pointerleave:this.setTimeout}});return this.$createElement("div",n,[this.genContent(),this.genActions()])},genTransition:function(){return this.$createElement("transition",{props:{name:this.transition}},[this.genWrapper()])},setTimeout:function(){var t=this;window.clearTimeout(this.activeTimeout);var e=Number(this.timeout);this.isActive&&![0,-1].includes(e)&&(this.activeTimeout=window.setTimeout((function(){t.isActive=!1}),e))}},render:function(t){return t("div",{staticClass:"v-snack",class:this.classes,style:this.styles},[!1!==this.transition?this.genTransition():this.genWrapper()])}})},"2e67":function(t,e,n){"use strict";t.exports=function(t){return!(!t||!t.__CANCEL__)}},"2fa4":function(t,e,n){"use strict";n("20f6");var i=n("80d2");e["a"]=Object(i["e"])("spacer","div","v-spacer")},"30b5":function(t,e,n){"use strict";var i=n("c532");function r(t){return encodeURIComponent(t).replace(/%40/gi,"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%20/g,"+").replace(/%5B/gi,"[").replace(/%5D/gi,"]")}t.exports=function(t,e,n){if(!e)return t;var o;if(n)o=n(e);else if(i.isURLSearchParams(e))o=e.toString();else{var a=[];i.forEach(e,(function(t,e){null!==t&&"undefined"!==typeof t&&(i.isArray(t)?e+="[]":t=[t],i.forEach(t,(function(t){i.isDate(t)?t=t.toISOString():i.isObject(t)&&(t=JSON.stringify(t)),a.push(r(e)+"="+r(t))})))})),o=a.join("&")}return o&&(t+=(-1===t.indexOf("?")?"?":"&")+o),t}},3206:function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var i=n("ade3"),r=(n("99af"),n("2b0e")),o=n("d9bd");function a(t,e){return function(){return Object(o["c"])("The ".concat(t," component must be used inside a ").concat(e))}}function s(t,e,n){var o=e&&n?{register:a(e,n),unregister:a(e,n)}:null;return r["a"].extend({name:"registrable-inject",inject:Object(i["a"])({},t,{default:o})})}},3408:function(t,e,n){},3410:function(t,e,n){var i=n("23e7"),r=n("d039"),o=n("7b0b"),a=n("e163"),s=n("e177"),c=r((function(){a(1)}));i({target:"Object",stat:!0,forced:c,sham:!s},{getPrototypeOf:function(t){return a(o(t))}})},"342f":function(t,e,n){var i=n("d066");t.exports=i("navigator","userAgent")||""},3529:function(t,e,n){"use strict";var i=n("23e7"),r=n("c65b"),o=n("59ed"),a=n("f069"),s=n("e667"),c=n("2266"),u=n("5eed");i({target:"Promise",stat:!0,forced:u},{race:function(t){var e=this,n=a.f(e),i=n.reject,u=s((function(){var a=o(e.resolve);c(t,(function(t){r(a,e,t).then(n.resolve,i)}))}));return u.error&&i(u.value),n.promise}})},"35a1":function(t,e,n){var i=n("f5df"),r=n("dc4a"),o=n("3f8c"),a=n("b622"),s=a("iterator");t.exports=function(t){if(void 0!=t)return r(t,s)||r(t,"@@iterator")||o[i(t)]}},"36a7":function(t,e,n){},"37e8":function(t,e,n){var i=n("83ab"),r=n("aed9"),o=n("9bf2"),a=n("825a"),s=n("fc6a"),c=n("df75");e.f=i&&!r?Object.defineProperties:function(t,e){a(t);var n,i=s(e),r=c(e),u=r.length,l=0;while(u>l)o.f(t,n=r[l++],i[n]);return t}},3835:function(t,e,n){"use strict";function i(t){if(Array.isArray(t))return t}n.d(e,"a",(function(){return s}));n("a4d3"),n("e01a"),n("d3b7"),n("d28b"),n("3ca3"),n("ddb0");function r(t,e){var n=null==t?null:"undefined"!==typeof Symbol&&t[Symbol.iterator]||t["@@iterator"];if(null!=n){var i,r,o=[],a=!0,s=!1;try{for(n=n.call(t);!(a=(i=n.next()).done);a=!0)if(o.push(i.value),e&&o.length===e)break}catch(c){s=!0,r=c}finally{try{a||null==n["return"]||n["return"]()}finally{if(s)throw r}}return o}}var o=n("06c5");n("d9e2");function a(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}function s(t,e){return i(t)||r(t,e)||Object(o["a"])(t,e)||a()}},"387f":function(t,e,n){"use strict";t.exports=function(t,e,n,i,r){return t.config=e,n&&(t.code=n),t.request=i,t.response=r,t}},"38cb":function(t,e,n){"use strict";var i=n("53ca"),r=(n("a9e3"),n("fb6a"),n("a9ad")),o=n("7560"),a=n("3206"),s=n("80d2"),c=n("d9bd"),u=n("58df"),l=Object(u["a"])(r["a"],Object(a["a"])("form"),o["a"]);e["a"]=l.extend({name:"validatable",props:{disabled:Boolean,error:Boolean,errorCount:{type:[Number,String],default:1},errorMessages:{type:[String,Array],default:function(){return[]}},messages:{type:[String,Array],default:function(){return[]}},readonly:Boolean,rules:{type:Array,default:function(){return[]}},success:Boolean,successMessages:{type:[String,Array],default:function(){return[]}},validateOnBlur:Boolean,value:{required:!1}},data:function(){return{errorBucket:[],hasColor:!1,hasFocused:!1,hasInput:!1,isFocused:!1,isResetting:!1,lazyValue:this.value,valid:!1}},computed:{computedColor:function(){if(!this.isDisabled)return this.color?this.color:this.isDark&&!this.appIsDark?"white":"primary"},hasError:function(){return this.internalErrorMessages.length>0||this.errorBucket.length>0||this.error},hasSuccess:function(){return this.internalSuccessMessages.length>0||this.success},externalError:function(){return this.internalErrorMessages.length>0||this.error},hasMessages:function(){return this.validationTarget.length>0},hasState:function(){return!this.isDisabled&&(this.hasSuccess||this.shouldValidate&&this.hasError)},internalErrorMessages:function(){return this.genInternalMessages(this.errorMessages)},internalMessages:function(){return this.genInternalMessages(this.messages)},internalSuccessMessages:function(){return this.genInternalMessages(this.successMessages)},internalValue:{get:function(){return this.lazyValue},set:function(t){this.lazyValue=t,this.$emit("input",t)}},isDisabled:function(){return this.disabled||!!this.form&&this.form.disabled},isInteractive:function(){return!this.isDisabled&&!this.isReadonly},isReadonly:function(){return this.readonly||!!this.form&&this.form.readonly},shouldValidate:function(){return!!this.externalError||!this.isResetting&&(this.validateOnBlur?this.hasFocused&&!this.isFocused:this.hasInput||this.hasFocused)},validations:function(){return this.validationTarget.slice(0,Number(this.errorCount))},validationState:function(){if(!this.isDisabled)return this.hasError&&this.shouldValidate?"error":this.hasSuccess?"success":this.hasColor?this.computedColor:void 0},validationTarget:function(){return this.internalErrorMessages.length>0?this.internalErrorMessages:this.successMessages&&this.successMessages.length>0?this.internalSuccessMessages:this.messages&&this.messages.length>0?this.internalMessages:this.shouldValidate?this.errorBucket:[]}},watch:{rules:{handler:function(t,e){Object(s["f"])(t,e)||this.validate()},deep:!0},internalValue:function(){this.hasInput=!0,this.validateOnBlur||this.$nextTick(this.validate)},isFocused:function(t){t||this.isDisabled||(this.hasFocused=!0,this.validateOnBlur&&this.$nextTick(this.validate))},isResetting:function(){var t=this;setTimeout((function(){t.hasInput=!1,t.hasFocused=!1,t.isResetting=!1,t.validate()}),0)},hasError:function(t){this.shouldValidate&&this.$emit("update:error",t)},value:function(t){this.lazyValue=t}},beforeMount:function(){this.validate()},created:function(){this.form&&this.form.register(this)},beforeDestroy:function(){this.form&&this.form.unregister(this)},methods:{genInternalMessages:function(t){return t?Array.isArray(t)?t:[t]:[]},reset:function(){this.isResetting=!0,this.internalValue=Array.isArray(this.internalValue)?[]:null},resetValidation:function(){this.isResetting=!0},validate:function(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0],e=arguments.length>1?arguments[1]:void 0,n=[];e=e||this.internalValue,t&&(this.hasInput=this.hasFocused=!0);for(var r=0;r<this.rules.length;r++){var o=this.rules[r],a="function"===typeof o?o(e):o;!1===a||"string"===typeof a?n.push(a||""):"boolean"!==typeof a&&Object(c["b"])("Rules should return a string or boolean, received '".concat(Object(i["a"])(a),"' instead"),this)}return this.errorBucket=n,this.valid=0===n.length,this.valid}}})},"38cf":function(t,e,n){var i=n("23e7"),r=n("1148");i({target:"String",proto:!0},{repeat:r})},3934:function(t,e,n){"use strict";var i=n("c532");t.exports=i.isStandardBrowserEnv()?function(){var t,e=/(msie|trident)/i.test(navigator.userAgent),n=document.createElement("a");function r(t){var i=t;return e&&(n.setAttribute("href",i),i=n.href),n.setAttribute("href",i),{href:n.href,protocol:n.protocol?n.protocol.replace(/:$/,""):"",host:n.host,search:n.search?n.search.replace(/^\?/,""):"",hash:n.hash?n.hash.replace(/^#/,""):"",hostname:n.hostname,port:n.port,pathname:"/"===n.pathname.charAt(0)?n.pathname:"/"+n.pathname}}return t=r(window.location.href),function(e){var n=i.isString(e)?r(e):e;return n.protocol===t.protocol&&n.host===t.host}}():function(){return function(){return!0}}()},"3a66":function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var i=n("fe6c"),r=n("58df");function o(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return Object(r["a"])(Object(i["b"])(["absolute","fixed"])).extend({name:"applicationable",props:{app:Boolean},computed:{applicationProperty:function(){return t}},watch:{app:function(t,e){e?this.removeApplication(!0):this.callUpdate()},applicationProperty:function(t,e){this.$vuetify.application.unregister(this._uid,e)}},activated:function(){this.callUpdate()},created:function(){for(var t=0,n=e.length;t<n;t++)this.$watch(e[t],this.callUpdate);this.callUpdate()},mounted:function(){this.callUpdate()},deactivated:function(){this.removeApplication()},destroyed:function(){this.removeApplication()},methods:{callUpdate:function(){this.app&&this.$vuetify.application.register(this._uid,this.applicationProperty,this.updateApplication())},removeApplication:function(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];(t||this.app)&&this.$vuetify.application.unregister(this._uid,this.applicationProperty)},updateApplication:function(){return 0}}})}},"3a9b":function(t,e,n){var i=n("e330");t.exports=i({}.isPrototypeOf)},"3ad0":function(t,e,n){},"3bbe":function(t,e,n){var i=n("da84"),r=n("1626"),o=i.String,a=i.TypeError;t.exports=function(t){if("object"==typeof t||r(t))return t;throw a("Can't set "+o(t)+" as a prototype")}},"3ca3":function(t,e,n){"use strict";var i=n("6547").charAt,r=n("577e"),o=n("69f3"),a=n("7dd0"),s="String Iterator",c=o.set,u=o.getterFor(s);a(String,"String",(function(t){c(this,{type:s,string:r(t),index:0})}),(function(){var t,e=u(this),n=e.string,r=e.index;return r>=n.length?{value:void 0,done:!0}:(t=i(n,r),e.index+=t.length,{value:t,done:!1})}))},"3d87":function(t,e,n){var i=n("4930");t.exports=i&&!!Symbol["for"]&&!!Symbol.keyFor},"3ea3":function(t,e,n){var i=n("23e7"),r=n("f748"),o=Math.abs,a=Math.pow;i({target:"Math",stat:!0},{cbrt:function(t){return r(t=+t)*a(o(t),1/3)}})},"3f8c":function(t,e){t.exports={}},4069:function(t,e,n){var i=n("44d2");i("flat")},"408a":function(t,e,n){var i=n("e330");t.exports=i(1..valueOf)},"40d5":function(t,e,n){var i=n("d039");t.exports=!i((function(){var t=function(){}.bind();return"function"!=typeof t||t.hasOwnProperty("prototype")}))},"40dc":function(t,e,n){"use strict";var i=n("5530"),r=(n("c7cd"),n("a9e3"),n("8b0d"),n("71d9")),o=n("53ca");function a(t,e,n){var i=e.modifiers||{},r=i.self,a=void 0!==r&&r,s=e.value,c="object"===Object(o["a"])(s)&&s.options||{passive:!0},u="function"===typeof s||"handleEvent"in s?s:s.handler,l=a?t:e.arg?document.querySelector(e.arg):window;l&&(l.addEventListener("scroll",u,c),t._onScroll=Object(t._onScroll),t._onScroll[n.context._uid]={handler:u,options:c,target:a?void 0:l})}function s(t,e,n){var i;if(null!=(i=t._onScroll)&&i[n.context._uid]){var r=t._onScroll[n.context._uid],o=r.handler,a=r.options,s=r.target,c=void 0===s?t:s;c.removeEventListener("scroll",o,a),delete t._onScroll[n.context._uid]}}var c={inserted:a,unbind:s},u=c,l=n("3a66"),f=n("d9bd"),h=n("2b0e"),d=h["a"].extend({name:"scrollable",directives:{Scroll:c},props:{scrollTarget:String,scrollThreshold:[String,Number]},data:function(){return{currentScroll:0,currentThreshold:0,isActive:!1,isScrollingUp:!1,previousScroll:0,savedScroll:0,target:null}},computed:{canScroll:function(){return"undefined"!==typeof window},computedScrollThreshold:function(){return this.scrollThreshold?Number(this.scrollThreshold):300}},watch:{isScrollingUp:function(){this.savedScroll=this.savedScroll||this.currentScroll},isActive:function(){this.savedScroll=0}},mounted:function(){this.scrollTarget&&(this.target=document.querySelector(this.scrollTarget),this.target||Object(f["c"])("Unable to locate element with identifier ".concat(this.scrollTarget),this))},methods:{onScroll:function(){var t=this;this.canScroll&&(this.previousScroll=this.currentScroll,this.currentScroll=this.target?this.target.scrollTop:window.pageYOffset,this.isScrollingUp=this.currentScroll<this.previousScroll,this.currentThreshold=Math.abs(this.currentScroll-this.computedScrollThreshold),this.$nextTick((function(){Math.abs(t.currentScroll-t.savedScroll)>t.computedScrollThreshold&&t.thresholdMet()})))},thresholdMet:function(){}}}),p=n("d10f"),v=n("f2e7"),m=n("80d2"),g=n("58df"),b=Object(g["a"])(r["a"],d,p["a"],v["a"],Object(l["a"])("top",["clippedLeft","clippedRight","computedHeight","invertedScroll","isExtended","isProminent","value"]));e["a"]=b.extend({name:"v-app-bar",directives:{Scroll:u},provide:function(){return{VAppBar:this}},props:{clippedLeft:Boolean,clippedRight:Boolean,collapseOnScroll:Boolean,elevateOnScroll:Boolean,fadeImgOnScroll:Boolean,hideOnScroll:Boolean,invertedScroll:Boolean,scrollOffScreen:Boolean,shrinkOnScroll:Boolean,value:{type:Boolean,default:!0}},data:function(){return{isActive:this.value}},computed:{applicationProperty:function(){return this.bottom?"bottom":"top"},canScroll:function(){return d.options.computed.canScroll.call(this)&&(this.invertedScroll||this.elevateOnScroll||this.hideOnScroll||this.collapseOnScroll||this.isBooted||!this.value)},classes:function(){return Object(i["a"])(Object(i["a"])({},r["a"].options.computed.classes.call(this)),{},{"v-toolbar--collapse":this.collapse||this.collapseOnScroll,"v-app-bar":!0,"v-app-bar--clipped":this.clippedLeft||this.clippedRight,"v-app-bar--fade-img-on-scroll":this.fadeImgOnScroll,"v-app-bar--elevate-on-scroll":this.elevateOnScroll,"v-app-bar--fixed":!this.absolute&&(this.app||this.fixed),"v-app-bar--hide-shadow":this.hideShadow,"v-app-bar--is-scrolled":this.currentScroll>0,"v-app-bar--shrink-on-scroll":this.shrinkOnScroll})},scrollRatio:function(){var t=this.computedScrollThreshold;return Math.max((t-this.currentScroll)/t,0)},computedContentHeight:function(){if(!this.shrinkOnScroll)return r["a"].options.computed.computedContentHeight.call(this);var t=this.dense?48:56,e=this.computedOriginalHeight;return t+(e-t)*this.scrollRatio},computedFontSize:function(){if(this.isProminent){var t=1.25,e=1.5;return t+(e-t)*this.scrollRatio}},computedLeft:function(){return!this.app||this.clippedLeft?0:this.$vuetify.application.left},computedMarginTop:function(){return this.app?this.$vuetify.application.bar:0},computedOpacity:function(){if(this.fadeImgOnScroll)return this.scrollRatio},computedOriginalHeight:function(){var t=r["a"].options.computed.computedContentHeight.call(this);return this.isExtended&&(t+=parseInt(this.extensionHeight)),t},computedRight:function(){return!this.app||this.clippedRight?0:this.$vuetify.application.right},computedScrollThreshold:function(){return this.scrollThreshold?Number(this.scrollThreshold):this.computedOriginalHeight-(this.dense?48:56)},computedTransform:function(){if(!this.canScroll||this.elevateOnScroll&&0===this.currentScroll&&this.isActive)return 0;if(this.isActive)return 0;var t=this.scrollOffScreen?this.computedHeight:this.computedContentHeight;return this.bottom?t:-t},hideShadow:function(){return this.elevateOnScroll&&this.isExtended?this.currentScroll<this.computedScrollThreshold:this.elevateOnScroll?0===this.currentScroll||this.computedTransform<0:(!this.isExtended||this.scrollOffScreen)&&0!==this.computedTransform},isCollapsed:function(){return this.collapseOnScroll?this.currentScroll>0:r["a"].options.computed.isCollapsed.call(this)},isProminent:function(){return r["a"].options.computed.isProminent.call(this)||this.shrinkOnScroll},styles:function(){return Object(i["a"])(Object(i["a"])({},r["a"].options.computed.styles.call(this)),{},{fontSize:Object(m["d"])(this.computedFontSize,"rem"),marginTop:Object(m["d"])(this.computedMarginTop),transform:"translateY(".concat(Object(m["d"])(this.computedTransform),")"),left:Object(m["d"])(this.computedLeft),right:Object(m["d"])(this.computedRight)})}},watch:{canScroll:"onScroll",computedTransform:function(){this.canScroll&&(this.clippedLeft||this.clippedRight)&&this.callUpdate()},invertedScroll:function(t){this.isActive=!t||0!==this.currentScroll},hideOnScroll:function(t){this.isActive=!t||this.currentScroll<this.computedScrollThreshold}},created:function(){this.invertedScroll&&(this.isActive=!1)},methods:{genBackground:function(){var t=r["a"].options.methods.genBackground.call(this);return t.data=this._b(t.data||{},t.tag,{style:{opacity:this.computedOpacity}}),t},updateApplication:function(){return this.invertedScroll?0:this.computedHeight+this.computedTransform},thresholdMet:function(){this.invertedScroll?this.isActive=this.currentScroll>this.computedScrollThreshold:(this.hideOnScroll&&(this.isActive=this.isScrollingUp||this.currentScroll<this.computedScrollThreshold),this.currentThreshold<this.computedScrollThreshold||(this.savedScroll=this.currentScroll))}},render:function(t){var e=r["a"].options.render.call(this,t);return e.data=e.data||{},this.canScroll&&(e.data.directives=e.data.directives||[],e.data.directives.push({arg:this.scrollTarget,name:"scroll",value:this.onScroll})),e}})},"428f":function(t,e,n){var i=n("da84");t.exports=i},4362:function(t,e,n){e.nextTick=function(t){var e=Array.prototype.slice.call(arguments);e.shift(),setTimeout((function(){t.apply(null,e)}),0)},e.platform=e.arch=e.execPath=e.title="browser",e.pid=1,e.browser=!0,e.env={},e.argv=[],e.binding=function(t){throw new Error("No such module. (Possibly not yet loaded)")},function(){var t,i="/";e.cwd=function(){return i},e.chdir=function(e){t||(t=n("df7c")),i=t.resolve(e,i)}}(),e.exit=e.kill=e.umask=e.dlopen=e.uptime=e.memoryUsage=e.uvCounters=function(){},e.features={}},"44ad":function(t,e,n){var i=n("da84"),r=n("e330"),o=n("d039"),a=n("c6b6"),s=i.Object,c=r("".split);t.exports=o((function(){return!s("z").propertyIsEnumerable(0)}))?function(t){return"String"==a(t)?c(t,""):s(t)}:s},"44d2":function(t,e,n){var i=n("b622"),r=n("7c73"),o=n("9bf2"),a=i("unscopables"),s=Array.prototype;void 0==s[a]&&o.f(s,a,{configurable:!0,value:r(null)}),t.exports=function(t){s[a][t]=!0}},"44de":function(t,e,n){var i=n("da84");t.exports=function(t,e){var n=i.console;n&&n.error&&(1==arguments.length?n.error(t):n.error(t,e))}},"44e7":function(t,e,n){var i=n("861d"),r=n("c6b6"),o=n("b622"),a=o("match");t.exports=function(t){var e;return i(t)&&(void 0!==(e=t[a])?!!e:"RegExp"==r(t))}},"466d":function(t,e,n){"use strict";var i=n("c65b"),r=n("d784"),o=n("825a"),a=n("50c4"),s=n("577e"),c=n("1d80"),u=n("dc4a"),l=n("8aa5"),f=n("14c3");r("match",(function(t,e,n){return[function(e){var n=c(this),r=void 0==e?void 0:u(e,t);return r?i(r,e,n):new RegExp(e)[t](s(n))},function(t){var i=o(this),r=s(t),c=n(e,i,r);if(c.done)return c.value;if(!i.global)return f(i,r);var u=i.unicode;i.lastIndex=0;var h,d=[],p=0;while(null!==(h=f(i,r))){var v=s(h[0]);d[p]=v,""===v&&(i.lastIndex=l(r,a(i.lastIndex),u)),p++}return 0===p?null:d}]}))},"467f":function(t,e,n){"use strict";var i=n("2d83");t.exports=function(t,e,n){var r=n.config.validateStatus;n.status&&r&&!r(n.status)?e(i("Request failed with status code "+n.status,n.config,null,n.request,n)):t(n)}},4738:function(t,e,n){var i=n("da84"),r=n("d256"),o=n("1626"),a=n("94ca"),s=n("8925"),c=n("b622"),u=n("6069"),l=n("c430"),f=n("2d00"),h=r&&r.prototype,d=c("species"),p=!1,v=o(i.PromiseRejectionEvent),m=a("Promise",(function(){var t=s(r),e=t!==String(r);if(!e&&66===f)return!0;if(l&&(!h["catch"]||!h["finally"]))return!0;if(f>=51&&/native code/.test(t))return!1;var n=new r((function(t){t(1)})),i=function(t){t((function(){}),(function(){}))},o=n.constructor={};return o[d]=i,p=n.then((function(){}))instanceof i,!p||!e&&u&&!v}));t.exports={CONSTRUCTOR:m,REJECTION_EVENT:v,SUBCLASSING:p}},4804:function(t,e,n){},4840:function(t,e,n){var i=n("825a"),r=n("5087"),o=n("b622"),a=o("species");t.exports=function(t,e){var n,o=i(t).constructor;return void 0===o||void 0==(n=i(o)[a])?e:r(n)}},"485a":function(t,e,n){var i=n("da84"),r=n("c65b"),o=n("1626"),a=n("861d"),s=i.TypeError;t.exports=function(t,e){var n,i;if("string"===e&&o(n=t.toString)&&!a(i=r(n,t)))return i;if(o(n=t.valueOf)&&!a(i=r(n,t)))return i;if("string"!==e&&o(n=t.toString)&&!a(i=r(n,t)))return i;throw s("Can't convert object to primitive value")}},"490a":function(t,e,n){"use strict";n("a9e3"),n("99af"),n("8d4f");var i=n("90a2"),r=n("a9ad"),o=n("80d2");e["a"]=r["a"].extend({name:"v-progress-circular",directives:{intersect:i["a"]},props:{button:Boolean,indeterminate:Boolean,rotate:{type:[Number,String],default:0},size:{type:[Number,String],default:32},width:{type:[Number,String],default:4},value:{type:[Number,String],default:0}},data:function(){return{radius:20,isVisible:!0}},computed:{calculatedSize:function(){return Number(this.size)+(this.button?8:0)},circumference:function(){return 2*Math.PI*this.radius},classes:function(){return{"v-progress-circular--visible":this.isVisible,"v-progress-circular--indeterminate":this.indeterminate,"v-progress-circular--button":this.button}},normalizedValue:function(){return this.value<0?0:this.value>100?100:parseFloat(this.value)},strokeDashArray:function(){return Math.round(1e3*this.circumference)/1e3},strokeDashOffset:function(){return(100-this.normalizedValue)/100*this.circumference+"px"},strokeWidth:function(){return Number(this.width)/+this.size*this.viewBoxSize*2},styles:function(){return{height:Object(o["d"])(this.calculatedSize),width:Object(o["d"])(this.calculatedSize)}},svgStyles:function(){return{transform:"rotate(".concat(Number(this.rotate),"deg)")}},viewBoxSize:function(){return this.radius/(1-Number(this.width)/+this.size)}},methods:{genCircle:function(t,e){return this.$createElement("circle",{class:"v-progress-circular__".concat(t),attrs:{fill:"transparent",cx:2*this.viewBoxSize,cy:2*this.viewBoxSize,r:this.radius,"stroke-width":this.strokeWidth,"stroke-dasharray":this.strokeDashArray,"stroke-dashoffset":e}})},genSvg:function(){var t=[this.indeterminate||this.genCircle("underlay",0),this.genCircle("overlay",this.strokeDashOffset)];return this.$createElement("svg",{style:this.svgStyles,attrs:{xmlns:"http://www.w3.org/2000/svg",viewBox:"".concat(this.viewBoxSize," ").concat(this.viewBoxSize," ").concat(2*this.viewBoxSize," ").concat(2*this.viewBoxSize)}},t)},genInfo:function(){return this.$createElement("div",{staticClass:"v-progress-circular__info"},this.$slots.default)},onObserve:function(t,e,n){this.isVisible=n}},render:function(t){return t("div",this.setTextColor(this.color,{staticClass:"v-progress-circular",attrs:{role:"progressbar","aria-valuemin":0,"aria-valuemax":100,"aria-valuenow":this.indeterminate?void 0:this.normalizedValue},class:this.classes,directives:[{name:"intersect",value:this.onObserve}],style:this.styles,on:this.$listeners}),[this.genSvg(),this.genInfo()])}})},4930:function(t,e,n){var i=n("2d00"),r=n("d039");t.exports=!!Object.getOwnPropertySymbols&&!r((function(){var t=Symbol();return!String(t)||!(Object(t)instanceof Symbol)||!Symbol.sham&&i&&i<41}))},"498a":function(t,e,n){"use strict";var i=n("23e7"),r=n("58a8").trim,o=n("c8d2");i({target:"String",proto:!0,forced:o("trim")},{trim:function(){return r(this)}})},"4ae1":function(t,e,n){var i=n("23e7"),r=n("d066"),o=n("2ba4"),a=n("0538"),s=n("5087"),c=n("825a"),u=n("861d"),l=n("7c73"),f=n("d039"),h=r("Reflect","construct"),d=Object.prototype,p=[].push,v=f((function(){function t(){}return!(h((function(){}),[],t)instanceof t)})),m=!f((function(){h((function(){}))})),g=v||m;i({target:"Reflect",stat:!0,forced:g,sham:g},{construct:function(t,e){s(t),c(e);var n=arguments.length<3?t:s(arguments[2]);if(m&&!v)return h(t,e,n);if(t==n){switch(e.length){case 0:return new t;case 1:return new t(e[0]);case 2:return new t(e[0],e[1]);case 3:return new t(e[0],e[1],e[2]);case 4:return new t(e[0],e[1],e[2],e[3])}var i=[null];return o(p,i,e),new(o(a,t,i))}var r=n.prototype,f=l(u(r)?r:d),g=o(t,f,e);return u(g)?g:f}})},"4b85":function(t,e,n){},"4d63":function(t,e,n){var i=n("83ab"),r=n("da84"),o=n("e330"),a=n("94ca"),s=n("7156"),c=n("9112"),u=n("241c").f,l=n("3a9b"),f=n("44e7"),h=n("577e"),d=n("90d8"),p=n("9f7f"),v=n("aeb0"),m=n("cb2d"),g=n("d039"),b=n("1a2d"),y=n("69f3").enforce,x=n("2626"),w=n("b622"),O=n("fce3"),_=n("107c"),S=w("match"),C=r.RegExp,k=C.prototype,j=r.SyntaxError,$=o(k.exec),A=o("".charAt),E=o("".replace),T=o("".indexOf),L=o("".slice),I=/^\?<[^\s\d!#%&*+<=>@^][^\s!#%&*+<=>@^]*>/,B=/a/g,M=/a/g,P=new C(B)!==B,D=p.MISSED_STICKY,R=p.UNSUPPORTED_Y,N=i&&(!P||D||O||_||g((function(){return M[S]=!1,C(B)!=B||C(M)==M||"/a/i"!=C(B,"i")}))),V=function(t){for(var e,n=t.length,i=0,r="",o=!1;i<=n;i++)e=A(t,i),"\\"!==e?o||"."!==e?("["===e?o=!0:"]"===e&&(o=!1),r+=e):r+="[\\s\\S]":r+=e+A(t,++i);return r},F=function(t){for(var e,n=t.length,i=0,r="",o=[],a={},s=!1,c=!1,u=0,l="";i<=n;i++){if(e=A(t,i),"\\"===e)e+=A(t,++i);else if("]"===e)s=!1;else if(!s)switch(!0){case"["===e:s=!0;break;case"("===e:$(I,L(t,i+1))&&(i+=2,c=!0),r+=e,u++;continue;case">"===e&&c:if(""===l||b(a,l))throw new j("Invalid capture group name");a[l]=!0,o[o.length]=[l,u],c=!1,l="";continue}c?l+=e:r+=e}return[r,o]};if(a("RegExp",N)){for(var z=function(t,e){var n,i,r,o,a,u,p=l(k,this),v=f(t),m=void 0===e,g=[],b=t;if(!p&&v&&m&&t.constructor===z)return t;if((v||l(k,t))&&(t=t.source,m&&(e=d(b))),t=void 0===t?"":h(t),e=void 0===e?"":h(e),b=t,O&&"dotAll"in B&&(i=!!e&&T(e,"s")>-1,i&&(e=E(e,/s/g,""))),n=e,D&&"sticky"in B&&(r=!!e&&T(e,"y")>-1,r&&R&&(e=E(e,/y/g,""))),_&&(o=F(t),t=o[0],g=o[1]),a=s(C(t,e),p?this:k,z),(i||r||g.length)&&(u=y(a),i&&(u.dotAll=!0,u.raw=z(V(t),n)),r&&(u.sticky=!0),g.length&&(u.groups=g)),t!==b)try{c(a,"source",""===b?"(?:)":b)}catch(x){}return a},H=u(C),W=0;H.length>W;)v(z,C,H[W++]);k.constructor=z,z.prototype=k,m(r,"RegExp",z,{constructor:!0})}x("RegExp")},"4d64":function(t,e,n){var i=n("fc6a"),r=n("23cb"),o=n("07fa"),a=function(t){return function(e,n,a){var s,c=i(e),u=o(c),l=r(a,u);if(t&&n!=n){while(u>l)if(s=c[l++],s!=s)return!0}else for(;u>l;l++)if((t||l in c)&&c[l]===n)return t||l||0;return!t&&-1}};t.exports={includes:a(!0),indexOf:a(!1)}},"4dae":function(t,e,n){var i=n("da84"),r=n("23cb"),o=n("07fa"),a=n("8418"),s=i.Array,c=Math.max;t.exports=function(t,e,n){for(var i=o(t),u=r(e,i),l=r(void 0===n?i:n,i),f=s(c(l-u,0)),h=0;u<l;u++,h++)a(f,h,t[u]);return f.length=h,f}},"4de4":function(t,e,n){"use strict";var i=n("23e7"),r=n("b727").filter,o=n("1dde"),a=o("filter");i({target:"Array",proto:!0,forced:!a},{filter:function(t){return r(this,t,arguments.length>1?arguments[1]:void 0)}})},"4df4":function(t,e,n){"use strict";var i=n("da84"),r=n("0366"),o=n("c65b"),a=n("7b0b"),s=n("9bdd"),c=n("e95a"),u=n("68ee"),l=n("07fa"),f=n("8418"),h=n("9a1f"),d=n("35a1"),p=i.Array;t.exports=function(t){var e=a(t),n=u(this),i=arguments.length,v=i>1?arguments[1]:void 0,m=void 0!==v;m&&(v=r(v,i>2?arguments[2]:void 0));var g,b,y,x,w,O,_=d(e),S=0;if(!_||this==p&&c(_))for(g=l(e),b=n?new this(g):p(g);g>S;S++)O=m?v(e[S],S):e[S],f(b,S,O);else for(x=h(e,_),w=x.next,b=n?new this:[];!(y=o(w,x)).done;S++)O=m?s(x,v,[y.value,S],!0):y.value,f(b,S,O);return b.length=S,b}},"4e82":function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var i=n("ade3"),r=n("3206");function o(t,e,n){return Object(r["a"])(t,e,n).extend({name:"groupable",props:{activeClass:{type:String,default:function(){if(this[t])return this[t].activeClass}},disabled:Boolean},data:function(){return{isActive:!1}},computed:{groupClasses:function(){return this.activeClass?Object(i["a"])({},this.activeClass,this.isActive):{}}},created:function(){this[t]&&this[t].register(this)},beforeDestroy:function(){this[t]&&this[t].unregister(this)},methods:{toggle:function(){this.$emit("change")}}})}o("itemGroup")},"4e827":function(t,e,n){"use strict";var i=n("23e7"),r=n("e330"),o=n("59ed"),a=n("7b0b"),s=n("07fa"),c=n("577e"),u=n("d039"),l=n("addb"),f=n("a640"),h=n("04d1"),d=n("d998"),p=n("2d00"),v=n("512c"),m=[],g=r(m.sort),b=r(m.push),y=u((function(){m.sort(void 0)})),x=u((function(){m.sort(null)})),w=f("sort"),O=!u((function(){if(p)return p<70;if(!(h&&h>3)){if(d)return!0;if(v)return v<603;var t,e,n,i,r="";for(t=65;t<76;t++){switch(e=String.fromCharCode(t),t){case 66:case 69:case 70:case 72:n=3;break;case 68:case 71:n=4;break;default:n=2}for(i=0;i<47;i++)m.push({k:e+i,v:n})}for(m.sort((function(t,e){return e.v-t.v})),i=0;i<m.length;i++)e=m[i].k.charAt(0),r.charAt(r.length-1)!==e&&(r+=e);return"DGBEFHACIJK"!==r}})),_=y||!x||!w||!O,S=function(t){return function(e,n){return void 0===n?-1:void 0===e?1:void 0!==t?+t(e,n)||0:c(e)>c(n)?1:-1}};i({target:"Array",proto:!0,forced:_},{sort:function(t){void 0!==t&&o(t);var e=a(this);if(O)return void 0===t?g(e):g(e,t);var n,i,r=[],c=s(e);for(i=0;i<c;i++)i in e&&b(r,e[i]);l(r,S(t)),n=r.length,i=0;while(i<n)e[i]=r[i++];while(i<c)delete e[i++];return e}})},"4ec9":function(t,e,n){n("6f48")},"4fad":function(t,e,n){var i=n("d039"),r=n("861d"),o=n("c6b6"),a=n("d86b"),s=Object.isExtensible,c=i((function(){s(1)}));t.exports=c||a?function(t){return!!r(t)&&((!a||"ArrayBuffer"!=o(t))&&(!s||s(t)))}:s},"4ff9":function(t,e,n){},5087:function(t,e,n){var i=n("da84"),r=n("68ee"),o=n("0d51"),a=i.TypeError;t.exports=function(t){if(r(t))return t;throw a(o(t)+" is not a constructor")}},"50c4":function(t,e,n){var i=n("5926"),r=Math.min;t.exports=function(t){return t>0?r(i(t),9007199254740991):0}},"512c":function(t,e,n){var i=n("342f"),r=i.match(/AppleWebKit\/(\d+)\./);t.exports=!!r&&+r[1]},5270:function(t,e,n){"use strict";var i=n("c532"),r=n("c401"),o=n("2e67"),a=n("2444"),s=n("d925"),c=n("e683");function u(t){t.cancelToken&&t.cancelToken.throwIfRequested()}t.exports=function(t){u(t),t.baseURL&&!s(t.url)&&(t.url=c(t.baseURL,t.url)),t.headers=t.headers||{},t.data=r(t.data,t.headers,t.transformRequest),t.headers=i.merge(t.headers.common||{},t.headers[t.method]||{},t.headers||{}),i.forEach(["delete","get","head","post","put","patch","common"],(function(e){delete t.headers[e]}));var e=t.adapter||a.adapter;return e(t).then((function(e){return u(t),e.data=r(e.data,e.headers,t.transformResponse),e}),(function(e){return o(e)||(u(t),e&&e.response&&(e.response.data=r(e.response.data,e.response.headers,t.transformResponse))),Promise.reject(e)}))}},5319:function(t,e,n){"use strict";var i=n("2ba4"),r=n("c65b"),o=n("e330"),a=n("d784"),s=n("d039"),c=n("825a"),u=n("1626"),l=n("5926"),f=n("50c4"),h=n("577e"),d=n("1d80"),p=n("8aa5"),v=n("dc4a"),m=n("0cb2"),g=n("14c3"),b=n("b622"),y=b("replace"),x=Math.max,w=Math.min,O=o([].concat),_=o([].push),S=o("".indexOf),C=o("".slice),k=function(t){return void 0===t?t:String(t)},j=function(){return"$0"==="a".replace(/./,"$0")}(),$=function(){return!!/./[y]&&""===/./[y]("a","$0")}(),A=!s((function(){var t=/./;return t.exec=function(){var t=[];return t.groups={a:"7"},t},"7"!=="".replace(t,"$<a>")}));a("replace",(function(t,e,n){var o=$?"$":"$0";return[function(t,n){var i=d(this),o=void 0==t?void 0:v(t,y);return o?r(o,t,i,n):r(e,h(i),t,n)},function(t,r){var a=c(this),s=h(t);if("string"==typeof r&&-1===S(r,o)&&-1===S(r,"$<")){var d=n(e,a,s,r);if(d.done)return d.value}var v=u(r);v||(r=h(r));var b=a.global;if(b){var y=a.unicode;a.lastIndex=0}var j=[];while(1){var $=g(a,s);if(null===$)break;if(_(j,$),!b)break;var A=h($[0]);""===A&&(a.lastIndex=p(s,f(a.lastIndex),y))}for(var E="",T=0,L=0;L<j.length;L++){$=j[L];for(var I=h($[0]),B=x(w(l($.index),s.length),0),M=[],P=1;P<$.length;P++)_(M,k($[P]));var D=$.groups;if(v){var R=O([I],M,B,s);void 0!==D&&_(R,D);var N=h(i(r,void 0,R))}else N=m(I,s,B,M,D,r);B>=T&&(E+=C(s,T,B)+N,T=B+I.length)}return E+C(s,T)}]}),!A||!j||$)},"53ca":function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));n("a4d3"),n("e01a"),n("d3b7"),n("d28b"),n("3ca3"),n("ddb0");function i(t){return i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},i(t)}},5530:function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));n("b64b"),n("a4d3"),n("4de4"),n("d3b7"),n("e439"),n("159b"),n("dbb4");var i=n("ade3");function r(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(t);e&&(i=i.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),n.push.apply(n,i)}return n}function o(t){for(var e=1;e<arguments.length;e++){var n=null!=arguments[e]?arguments[e]:{};e%2?r(Object(n),!0).forEach((function(e){Object(i["a"])(t,e,n[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(n)):r(Object(n)).forEach((function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(n,e))}))}return t}},"553a":function(t,e,n){"use strict";var i=n("5530"),r=(n("a9e3"),n("c7cd"),n("b5b6"),n("8dd9")),o=n("3a66"),a=n("d10f"),s=n("58df"),c=n("80d2");e["a"]=Object(s["a"])(r["a"],Object(o["a"])("footer",["height","inset"]),a["a"]).extend({name:"v-footer",props:{height:{default:"auto",type:[Number,String]},inset:Boolean,padless:Boolean,tag:{type:String,default:"footer"}},computed:{applicationProperty:function(){return this.inset?"insetFooter":"footer"},classes:function(){return Object(i["a"])(Object(i["a"])({},r["a"].options.computed.classes.call(this)),{},{"v-footer--absolute":this.absolute,"v-footer--fixed":!this.absolute&&(this.app||this.fixed),"v-footer--padless":this.padless,"v-footer--inset":this.inset})},computedBottom:function(){if(this.isPositioned)return this.app?this.$vuetify.application.bottom:0},computedLeft:function(){if(this.isPositioned)return this.app&&this.inset?this.$vuetify.application.left:0},computedRight:function(){if(this.isPositioned)return this.app&&this.inset?this.$vuetify.application.right:0},isPositioned:function(){return Boolean(this.absolute||this.fixed||this.app)},styles:function(){var t=parseInt(this.height);return Object(i["a"])(Object(i["a"])({},r["a"].options.computed.styles.call(this)),{},{height:isNaN(t)?t:Object(c["d"])(t),left:Object(c["d"])(this.computedLeft),right:Object(c["d"])(this.computedRight),bottom:Object(c["d"])(this.computedBottom)})}},methods:{updateApplication:function(){var t=parseInt(this.height);return isNaN(t)?this.$el?this.$el.clientHeight:0:t}},render:function(t){var e=this.setBackgroundColor(this.color,{staticClass:"v-footer",class:this.classes,style:this.styles});return t(this.tag,e,this.$slots.default)}})},5607:function(t,e,n){"use strict";n("b0c0"),n("99af"),n("a9e3"),n("7435");var i=n("80d2"),r=80;function o(t,e){t.style.transform=e,t.style.webkitTransform=e}function a(t){return"TouchEvent"===t.constructor.name}function s(t){return"KeyboardEvent"===t.constructor.name}var c=function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},i=0,r=0;if(!s(t)){var o=e.getBoundingClientRect(),c=a(t)?t.touches[t.touches.length-1]:t;i=c.clientX-o.left,r=c.clientY-o.top}var u=0,l=.3;e._ripple&&e._ripple.circle?(l=.15,u=e.clientWidth/2,u=n.center?u:u+Math.sqrt(Math.pow(i-u,2)+Math.pow(r-u,2))/4):u=Math.sqrt(Math.pow(e.clientWidth,2)+Math.pow(e.clientHeight,2))/2;var f="".concat((e.clientWidth-2*u)/2,"px"),h="".concat((e.clientHeight-2*u)/2,"px"),d=n.center?f:"".concat(i-u,"px"),p=n.center?h:"".concat(r-u,"px");return{radius:u,scale:l,x:d,y:p,centerX:f,centerY:h}},u={show:function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if(e._ripple&&e._ripple.enabled){var i=document.createElement("span"),r=document.createElement("span");i.appendChild(r),i.className="v-ripple__container",n.class&&(i.className+=" ".concat(n.class));var a=c(t,e,n),s=a.radius,u=a.scale,l=a.x,f=a.y,h=a.centerX,d=a.centerY,p="".concat(2*s,"px");r.className="v-ripple__animation",r.style.width=p,r.style.height=p,e.appendChild(i);var v=window.getComputedStyle(e);v&&"static"===v.position&&(e.style.position="relative",e.dataset.previousPosition="static"),r.classList.add("v-ripple__animation--enter"),r.classList.add("v-ripple__animation--visible"),o(r,"translate(".concat(l,", ").concat(f,") scale3d(").concat(u,",").concat(u,",").concat(u,")")),r.dataset.activated=String(performance.now()),setTimeout((function(){r.classList.remove("v-ripple__animation--enter"),r.classList.add("v-ripple__animation--in"),o(r,"translate(".concat(h,", ").concat(d,") scale3d(1,1,1)"))}),0)}},hide:function(t){if(t&&t._ripple&&t._ripple.enabled){var e=t.getElementsByClassName("v-ripple__animation");if(0!==e.length){var n=e[e.length-1];if(!n.dataset.isHiding){n.dataset.isHiding="true";var i=performance.now()-Number(n.dataset.activated),r=Math.max(250-i,0);setTimeout((function(){n.classList.remove("v-ripple__animation--in"),n.classList.add("v-ripple__animation--out"),setTimeout((function(){var e=t.getElementsByClassName("v-ripple__animation");1===e.length&&t.dataset.previousPosition&&(t.style.position=t.dataset.previousPosition,delete t.dataset.previousPosition),n.parentNode&&t.removeChild(n.parentNode)}),300)}),r)}}}}};function l(t){return"undefined"===typeof t||!!t}function f(t){var e={},n=t.currentTarget;if(n&&n._ripple&&!n._ripple.touched&&!t.rippleStop){if(t.rippleStop=!0,a(t))n._ripple.touched=!0,n._ripple.isTouch=!0;else if(n._ripple.isTouch)return;if(e.center=n._ripple.centered||s(t),n._ripple.class&&(e.class=n._ripple.class),a(t)){if(n._ripple.showTimerCommit)return;n._ripple.showTimerCommit=function(){u.show(t,n,e)},n._ripple.showTimer=window.setTimeout((function(){n&&n._ripple&&n._ripple.showTimerCommit&&(n._ripple.showTimerCommit(),n._ripple.showTimerCommit=null)}),r)}else u.show(t,n,e)}}function h(t){var e=t.currentTarget;if(e&&e._ripple){if(window.clearTimeout(e._ripple.showTimer),"touchend"===t.type&&e._ripple.showTimerCommit)return e._ripple.showTimerCommit(),e._ripple.showTimerCommit=null,void(e._ripple.showTimer=setTimeout((function(){h(t)})));window.setTimeout((function(){e._ripple&&(e._ripple.touched=!1)})),u.hide(e)}}function d(t){var e=t.currentTarget;e&&e._ripple&&(e._ripple.showTimerCommit&&(e._ripple.showTimerCommit=null),window.clearTimeout(e._ripple.showTimer))}var p=!1;function v(t){p||t.keyCode!==i["p"].enter&&t.keyCode!==i["p"].space||(p=!0,f(t))}function m(t){p=!1,h(t)}function g(t){!0===p&&(p=!1,h(t))}function b(t,e,n){var i=l(e.value);i||u.hide(t),t._ripple=t._ripple||{},t._ripple.enabled=i;var r=e.value||{};r.center&&(t._ripple.centered=!0),r.class&&(t._ripple.class=e.value.class),r.circle&&(t._ripple.circle=r.circle),i&&!n?(t.addEventListener("touchstart",f,{passive:!0}),t.addEventListener("touchend",h,{passive:!0}),t.addEventListener("touchmove",d,{passive:!0}),t.addEventListener("touchcancel",h),t.addEventListener("mousedown",f),t.addEventListener("mouseup",h),t.addEventListener("mouseleave",h),t.addEventListener("keydown",v),t.addEventListener("keyup",m),t.addEventListener("blur",g),t.addEventListener("dragstart",h,{passive:!0})):!i&&n&&y(t)}function y(t){t.removeEventListener("mousedown",f),t.removeEventListener("touchstart",f),t.removeEventListener("touchend",h),t.removeEventListener("touchmove",d),t.removeEventListener("touchcancel",h),t.removeEventListener("mouseup",h),t.removeEventListener("mouseleave",h),t.removeEventListener("keydown",v),t.removeEventListener("keyup",m),t.removeEventListener("dragstart",h),t.removeEventListener("blur",g)}function x(t,e,n){b(t,e,!1)}function w(t){delete t._ripple,y(t)}function O(t,e){if(e.value!==e.oldValue){var n=l(e.oldValue);b(t,e,n)}}var _={bind:x,unbind:w,update:O};e["a"]=_},5692:function(t,e,n){var i=n("c430"),r=n("c6cd");(t.exports=function(t,e){return r[t]||(r[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.22.5",mode:i?"pure":"global",copyright:"© 2014-2022 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.22.5/LICENSE",source:"https://github.com/zloirock/core-js"})},"56ef":function(t,e,n){var i=n("d066"),r=n("e330"),o=n("241c"),a=n("7418"),s=n("825a"),c=r([].concat);t.exports=i("Reflect","ownKeys")||function(t){var e=o.f(s(t)),n=a.f;return n?c(e,n(t)):e}},"577e":function(t,e,n){var i=n("da84"),r=n("f5df"),o=i.String;t.exports=function(t){if("Symbol"===r(t))throw TypeError("Cannot convert a Symbol value to a string");return o(t)}},"57b9":function(t,e,n){var i=n("c65b"),r=n("d066"),o=n("b622"),a=n("cb2d");t.exports=function(){var t=r("Symbol"),e=t&&t.prototype,n=e&&e.valueOf,s=o("toPrimitive");e&&!e[s]&&a(e,s,(function(t){return i(n,this)}),{arity:1})}},5899:function(t,e){t.exports="\t\n\v\f\r                 \u2028\u2029\ufeff"},"58a8":function(t,e,n){var i=n("e330"),r=n("1d80"),o=n("577e"),a=n("5899"),s=i("".replace),c="["+a+"]",u=RegExp("^"+c+c+"*"),l=RegExp(c+c+"*$"),f=function(t){return function(e){var n=o(r(e));return 1&t&&(n=s(n,u,"")),2&t&&(n=s(n,l,"")),n}};t.exports={start:f(1),end:f(2),trim:f(3)}},"58df":function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));var i=n("2b0e");function r(){for(var t=arguments.length,e=new Array(t),n=0;n<t;n++)e[n]=arguments[n];return i["a"].extend({mixins:e})}},5926:function(t,e){var n=Math.ceil,i=Math.floor;t.exports=function(t){var e=+t;return e!==e||0===e?0:(e>0?i:n)(e)}},"59ed":function(t,e,n){var i=n("da84"),r=n("1626"),o=n("0d51"),a=i.TypeError;t.exports=function(t){if(r(t))return t;throw a(o(t)+" is not a function")}},"5a34":function(t,e,n){var i=n("da84"),r=n("44e7"),o=i.TypeError;t.exports=function(t){if(r(t))throw o("The method doesn't accept regular expressions");return t}},"5a47":function(t,e,n){var i=n("23e7"),r=n("4930"),o=n("d039"),a=n("7418"),s=n("7b0b"),c=!r||o((function(){a.f(1)}));i({target:"Object",stat:!0,forced:c},{getOwnPropertySymbols:function(t){var e=a.f;return e?e(s(t)):[]}})},"5c6c":function(t,e){t.exports=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}}},"5e23":function(t,e,n){},"5e77":function(t,e,n){var i=n("83ab"),r=n("1a2d"),o=Function.prototype,a=i&&Object.getOwnPropertyDescriptor,s=r(o,"name"),c=s&&"something"===function(){}.name,u=s&&(!i||i&&a(o,"name").configurable);t.exports={EXISTS:s,PROPER:c,CONFIGURABLE:u}},"5e7e":function(t,e,n){"use strict";var i,r,o,a,s=n("23e7"),c=n("c430"),u=n("605d"),l=n("da84"),f=n("c65b"),h=n("cb2d"),d=n("d2bb"),p=n("d44e"),v=n("2626"),m=n("59ed"),g=n("1626"),b=n("861d"),y=n("19aa"),x=n("4840"),w=n("2cf4").set,O=n("b575"),_=n("44de"),S=n("e667"),C=n("01b4"),k=n("69f3"),j=n("d256"),$=n("4738"),A=n("f069"),E="Promise",T=$.CONSTRUCTOR,L=$.REJECTION_EVENT,I=$.SUBCLASSING,B=k.getterFor(E),M=k.set,P=j&&j.prototype,D=j,R=P,N=l.TypeError,V=l.document,F=l.process,z=A.f,H=z,W=!!(V&&V.createEvent&&l.dispatchEvent),U="unhandledrejection",q="rejectionhandled",G=0,Y=1,Z=2,K=1,X=2,J=function(t){var e;return!(!b(t)||!g(e=t.then))&&e},Q=function(t,e){var n,i,r,o=e.value,a=e.state==Y,s=a?t.ok:t.fail,c=t.resolve,u=t.reject,l=t.domain;try{s?(a||(e.rejection===X&&rt(e),e.rejection=K),!0===s?n=o:(l&&l.enter(),n=s(o),l&&(l.exit(),r=!0)),n===t.promise?u(N("Promise-chain cycle")):(i=J(n))?f(i,n,c,u):c(n)):u(o)}catch(h){l&&!r&&l.exit(),u(h)}},tt=function(t,e){t.notified||(t.notified=!0,O((function(){var n,i=t.reactions;while(n=i.get())Q(n,t);t.notified=!1,e&&!t.rejection&&nt(t)})))},et=function(t,e,n){var i,r;W?(i=V.createEvent("Event"),i.promise=e,i.reason=n,i.initEvent(t,!1,!0),l.dispatchEvent(i)):i={promise:e,reason:n},!L&&(r=l["on"+t])?r(i):t===U&&_("Unhandled promise rejection",n)},nt=function(t){f(w,l,(function(){var e,n=t.facade,i=t.value,r=it(t);if(r&&(e=S((function(){u?F.emit("unhandledRejection",i,n):et(U,n,i)})),t.rejection=u||it(t)?X:K,e.error))throw e.value}))},it=function(t){return t.rejection!==K&&!t.parent},rt=function(t){f(w,l,(function(){var e=t.facade;u?F.emit("rejectionHandled",e):et(q,e,t.value)}))},ot=function(t,e,n){return function(i){t(e,i,n)}},at=function(t,e,n){t.done||(t.done=!0,n&&(t=n),t.value=e,t.state=Z,tt(t,!0))},st=function(t,e,n){if(!t.done){t.done=!0,n&&(t=n);try{if(t.facade===e)throw N("Promise can't be resolved itself");var i=J(e);i?O((function(){var n={done:!1};try{f(i,e,ot(st,n,t),ot(at,n,t))}catch(r){at(n,r,t)}})):(t.value=e,t.state=Y,tt(t,!1))}catch(r){at({done:!1},r,t)}}};if(T&&(D=function(t){y(this,R),m(t),f(i,this);var e=B(this);try{t(ot(st,e),ot(at,e))}catch(n){at(e,n)}},R=D.prototype,i=function(t){M(this,{type:E,done:!1,notified:!1,parent:!1,reactions:new C,rejection:!1,state:G,value:void 0})},i.prototype=h(R,"then",(function(t,e){var n=B(this),i=z(x(this,D));return n.parent=!0,i.ok=!g(t)||t,i.fail=g(e)&&e,i.domain=u?F.domain:void 0,n.state==G?n.reactions.add(i):O((function(){Q(i,n)})),i.promise})),r=function(){var t=new i,e=B(t);this.promise=t,this.resolve=ot(st,e),this.reject=ot(at,e)},A.f=z=function(t){return t===D||t===o?new r(t):H(t)},!c&&g(j)&&P!==Object.prototype)){a=P.then,I||h(P,"then",(function(t,e){var n=this;return new D((function(t,e){f(a,n,t,e)})).then(t,e)}),{unsafe:!0});try{delete P.constructor}catch(ct){}d&&d(P,R)}s({global:!0,constructor:!0,wrap:!0,forced:T},{Promise:D}),p(D,E,!1,!0),v(E)},"5eed":function(t,e,n){var i=n("d256"),r=n("1c7e"),o=n("4738").CONSTRUCTOR;t.exports=o||!r((function(t){i.all(t).then(void 0,(function(){}))}))},"605d":function(t,e,n){var i=n("c6b6"),r=n("da84");t.exports="process"==i(r.process)},6069:function(t,e){t.exports="object"==typeof window&&"object"!=typeof Deno},"60da":function(t,e,n){"use strict";var i=n("83ab"),r=n("e330"),o=n("c65b"),a=n("d039"),s=n("df75"),c=n("7418"),u=n("d1e7"),l=n("7b0b"),f=n("44ad"),h=Object.assign,d=Object.defineProperty,p=r([].concat);t.exports=!h||a((function(){if(i&&1!==h({b:1},h(d({},"a",{enumerable:!0,get:function(){d(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var t={},e={},n=Symbol(),r="abcdefghijklmnopqrst";return t[n]=7,r.split("").forEach((function(t){e[t]=t})),7!=h({},t)[n]||s(h({},e)).join("")!=r}))?function(t,e){var n=l(t),r=arguments.length,a=1,h=c.f,d=u.f;while(r>a){var v,m=f(arguments[a++]),g=h?p(s(m),h(m)):s(m),b=g.length,y=0;while(b>y)v=g[y++],i&&!o(d,m,v)||(n[v]=m[v])}return n}:h},"61d2":function(t,e,n){},6544:function(t,e){t.exports=function(t,e){var n="function"===typeof t.exports?t.exports.extendOptions:t.options;for(var i in"function"===typeof t.exports&&(n.components=t.exports.options.components),n.components=n.components||{},e)n.components[i]=n.components[i]||e[i]}},6547:function(t,e,n){var i=n("e330"),r=n("5926"),o=n("577e"),a=n("1d80"),s=i("".charAt),c=i("".charCodeAt),u=i("".slice),l=function(t){return function(e,n){var i,l,f=o(a(e)),h=r(n),d=f.length;return h<0||h>=d?t?"":void 0:(i=c(f,h),i<55296||i>56319||h+1===d||(l=c(f,h+1))<56320||l>57343?t?s(f,h):i:t?u(f,h,h+2):l-56320+(i-55296<<10)+65536)}};t.exports={codeAt:l(!1),charAt:l(!0)}},6566:function(t,e,n){"use strict";var i=n("9bf2").f,r=n("7c73"),o=n("6964"),a=n("0366"),s=n("19aa"),c=n("2266"),u=n("7dd0"),l=n("2626"),f=n("83ab"),h=n("f183").fastKey,d=n("69f3"),p=d.set,v=d.getterFor;t.exports={getConstructor:function(t,e,n,u){var l=t((function(t,i){s(t,d),p(t,{type:e,index:r(null),first:void 0,last:void 0,size:0}),f||(t.size=0),void 0!=i&&c(i,t[u],{that:t,AS_ENTRIES:n})})),d=l.prototype,m=v(e),g=function(t,e,n){var i,r,o=m(t),a=b(t,e);return a?a.value=n:(o.last=a={index:r=h(e,!0),key:e,value:n,previous:i=o.last,next:void 0,removed:!1},o.first||(o.first=a),i&&(i.next=a),f?o.size++:t.size++,"F"!==r&&(o.index[r]=a)),t},b=function(t,e){var n,i=m(t),r=h(e);if("F"!==r)return i.index[r];for(n=i.first;n;n=n.next)if(n.key==e)return n};return o(d,{clear:function(){var t=this,e=m(t),n=e.index,i=e.first;while(i)i.removed=!0,i.previous&&(i.previous=i.previous.next=void 0),delete n[i.index],i=i.next;e.first=e.last=void 0,f?e.size=0:t.size=0},delete:function(t){var e=this,n=m(e),i=b(e,t);if(i){var r=i.next,o=i.previous;delete n.index[i.index],i.removed=!0,o&&(o.next=r),r&&(r.previous=o),n.first==i&&(n.first=r),n.last==i&&(n.last=o),f?n.size--:e.size--}return!!i},forEach:function(t){var e,n=m(this),i=a(t,arguments.length>1?arguments[1]:void 0);while(e=e?e.next:n.first){i(e.value,e.key,this);while(e&&e.removed)e=e.previous}},has:function(t){return!!b(this,t)}}),o(d,n?{get:function(t){var e=b(this,t);return e&&e.value},set:function(t,e){return g(this,0===t?0:t,e)}}:{add:function(t){return g(this,t=0===t?0:t,t)}}),f&&i(d,"size",{get:function(){return m(this).size}}),l},setStrong:function(t,e,n){var i=e+" Iterator",r=v(e),o=v(i);u(t,e,(function(t,e){p(this,{type:i,target:t,state:r(t),kind:e,last:void 0})}),(function(){var t=o(this),e=t.kind,n=t.last;while(n&&n.removed)n=n.previous;return t.target&&(t.last=n=n?n.next:t.state.first)?"keys"==e?{value:n.key,done:!1}:"values"==e?{value:n.value,done:!1}:{value:[n.key,n.value],done:!1}:(t.target=void 0,{value:void 0,done:!0})}),n?"entries":"values",!n,!0),l(e)}}},"65f0":function(t,e,n){var i=n("0b42");t.exports=function(t,e){return new(i(t))(0===e?0:e)}},"68dd":function(t,e,n){},"68ee":function(t,e,n){var i=n("e330"),r=n("d039"),o=n("1626"),a=n("f5df"),s=n("d066"),c=n("8925"),u=function(){},l=[],f=s("Reflect","construct"),h=/^\s*(?:class|function)\b/,d=i(h.exec),p=!h.exec(u),v=function(t){if(!o(t))return!1;try{return f(u,l,t),!0}catch(e){return!1}},m=function(t){if(!o(t))return!1;switch(a(t)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return p||!!d(h,c(t))}catch(e){return!0}};m.sham=!0,t.exports=!f||r((function(){var t;return v(v.call)||!v(Object)||!v((function(){t=!0}))||t}))?m:v},6964:function(t,e,n){var i=n("cb2d");t.exports=function(t,e,n){for(var r in e)i(t,r,e[r],n);return t}},"69f3":function(t,e,n){var i,r,o,a=n("7f9a"),s=n("da84"),c=n("e330"),u=n("861d"),l=n("9112"),f=n("1a2d"),h=n("c6cd"),d=n("f772"),p=n("d012"),v="Object already initialized",m=s.TypeError,g=s.WeakMap,b=function(t){return o(t)?r(t):i(t,{})},y=function(t){return function(e){var n;if(!u(e)||(n=r(e)).type!==t)throw m("Incompatible receiver, "+t+" required");return n}};if(a||h.state){var x=h.state||(h.state=new g),w=c(x.get),O=c(x.has),_=c(x.set);i=function(t,e){if(O(x,t))throw new m(v);return e.facade=t,_(x,t,e),e},r=function(t){return w(x,t)||{}},o=function(t){return O(x,t)}}else{var S=d("state");p[S]=!0,i=function(t,e){if(f(t,S))throw new m(v);return e.facade=t,l(t,S,e),e},r=function(t){return f(t,S)?t[S]:{}},o=function(t){return f(t,S)}}t.exports={set:i,get:r,has:o,enforce:b,getterFor:y}},"6b75":function(t,e,n){"use strict";function i(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,i=new Array(e);n<e;n++)i[n]=t[n];return i}n.d(e,"a",(function(){return i}))},"6ca7":function(t,e,n){},"6d61":function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("e330"),a=n("94ca"),s=n("cb2d"),c=n("f183"),u=n("2266"),l=n("19aa"),f=n("1626"),h=n("861d"),d=n("d039"),p=n("1c7e"),v=n("d44e"),m=n("7156");t.exports=function(t,e,n){var g=-1!==t.indexOf("Map"),b=-1!==t.indexOf("Weak"),y=g?"set":"add",x=r[t],w=x&&x.prototype,O=x,_={},S=function(t){var e=o(w[t]);s(w,t,"add"==t?function(t){return e(this,0===t?0:t),this}:"delete"==t?function(t){return!(b&&!h(t))&&e(this,0===t?0:t)}:"get"==t?function(t){return b&&!h(t)?void 0:e(this,0===t?0:t)}:"has"==t?function(t){return!(b&&!h(t))&&e(this,0===t?0:t)}:function(t,n){return e(this,0===t?0:t,n),this})},C=a(t,!f(x)||!(b||w.forEach&&!d((function(){(new x).entries().next()}))));if(C)O=n.getConstructor(e,t,g,y),c.enable();else if(a(t,!0)){var k=new O,j=k[y](b?{}:-0,1)!=k,$=d((function(){k.has(1)})),A=p((function(t){new x(t)})),E=!b&&d((function(){var t=new x,e=5;while(e--)t[y](e,e);return!t.has(-0)}));A||(O=e((function(t,e){l(t,w);var n=m(new x,t,O);return void 0!=e&&u(e,n[y],{that:n,AS_ENTRIES:g}),n})),O.prototype=w,w.constructor=O),($||E)&&(S("delete"),S("has"),g&&S("get")),(E||j)&&S(y),b&&w.clear&&delete w.clear}return _[t]=O,i({global:!0,constructor:!0,forced:O!=x},_),v(O,t),b||n.setStrong(O,t,g),O}},"6ece":function(t,e,n){},"6f48":function(t,e,n){"use strict";var i=n("6d61"),r=n("6566");i("Map",(function(t){return function(){return t(this,arguments.length?arguments[0]:void 0)}}),r)},"6f53":function(t,e,n){var i=n("83ab"),r=n("e330"),o=n("df75"),a=n("fc6a"),s=n("d1e7").f,c=r(s),u=r([].push),l=function(t){return function(e){var n,r=a(e),s=o(r),l=s.length,f=0,h=[];while(l>f)n=s[f++],i&&!c(r,n)||u(h,t?[n,r[n]]:r[n]);return h}};t.exports={entries:l(!0),values:l(!1)}},7149:function(t,e,n){"use strict";var i=n("23e7"),r=n("d066"),o=n("c430"),a=n("d256"),s=n("4738").CONSTRUCTOR,c=n("cdf9"),u=r("Promise"),l=o&&!s;i({target:"Promise",stat:!0,forced:o||s},{resolve:function(t){return c(l&&this===u?a:this,t)}})},7156:function(t,e,n){var i=n("1626"),r=n("861d"),o=n("d2bb");t.exports=function(t,e,n){var a,s;return o&&i(a=e.constructor)&&a!==n&&r(s=a.prototype)&&s!==n.prototype&&o(t,s),t}},"71d9":function(t,e,n){"use strict";var i=n("3835"),r=n("5530"),o=(n("a9e3"),n("0481"),n("4069"),n("d3b7"),n("5e23"),n("8dd9")),a=n("53ca"),s=(n("a15b"),n("8a79"),n("2ca0"),n("8efc"),n("90a2")),c=(n("36a7"),n("24b2")),u=n("58df"),l=n("80d2"),f=Object(u["a"])(c["a"]).extend({name:"v-responsive",props:{aspectRatio:[String,Number],contentClass:String},computed:{computedAspectRatio:function(){return Number(this.aspectRatio)},aspectStyle:function(){return this.computedAspectRatio?{paddingBottom:1/this.computedAspectRatio*100+"%"}:void 0},__cachedSizer:function(){return this.aspectStyle?this.$createElement("div",{style:this.aspectStyle,staticClass:"v-responsive__sizer"}):[]}},methods:{genContent:function(){return this.$createElement("div",{staticClass:"v-responsive__content",class:this.contentClass},Object(l["l"])(this))}},render:function(t){return t("div",{staticClass:"v-responsive",style:this.measurableStyles,on:this.$listeners},[this.__cachedSizer,this.genContent()])}}),h=f,d=n("7560"),p=n("d9f7"),v=n("d9bd"),m="undefined"!==typeof window&&"IntersectionObserver"in window,g=Object(u["a"])(h,d["a"]).extend({name:"v-img",directives:{intersect:s["a"]},props:{alt:String,contain:Boolean,eager:Boolean,gradient:String,lazySrc:String,options:{type:Object,default:function(){return{root:void 0,rootMargin:void 0,threshold:void 0}}},position:{type:String,default:"center center"},sizes:String,src:{type:[String,Object],default:""},srcset:String,transition:{type:[Boolean,String],default:"fade-transition"}},data:function(){return{currentSrc:"",image:null,isLoading:!0,calculatedAspectRatio:void 0,naturalWidth:void 0,hasError:!1}},computed:{computedAspectRatio:function(){return Number(this.normalisedSrc.aspect||this.calculatedAspectRatio)},normalisedSrc:function(){return this.src&&"object"===Object(a["a"])(this.src)?{src:this.src.src,srcset:this.srcset||this.src.srcset,lazySrc:this.lazySrc||this.src.lazySrc,aspect:Number(this.aspectRatio||this.src.aspect)}:{src:this.src,srcset:this.srcset,lazySrc:this.lazySrc,aspect:Number(this.aspectRatio||0)}},__cachedImage:function(){if(!(this.normalisedSrc.src||this.normalisedSrc.lazySrc||this.gradient))return[];var t=[],e=this.isLoading?this.normalisedSrc.lazySrc:this.currentSrc;this.gradient&&t.push("linear-gradient(".concat(this.gradient,")")),e&&t.push('url("'.concat(e,'")'));var n=this.$createElement("div",{staticClass:"v-image__image",class:{"v-image__image--preload":this.isLoading,"v-image__image--contain":this.contain,"v-image__image--cover":!this.contain},style:{backgroundImage:t.join(", "),backgroundPosition:this.position},key:+this.isLoading});return this.transition?this.$createElement("transition",{attrs:{name:this.transition,mode:"in-out"}},[n]):n}},watch:{src:function(){this.isLoading?this.loadImage():this.init(void 0,void 0,!0)},"$vuetify.breakpoint.width":"getSrc"},mounted:function(){this.init()},methods:{init:function(t,e,n){if(!m||n||this.eager){if(this.normalisedSrc.lazySrc){var i=new Image;i.src=this.normalisedSrc.lazySrc,this.pollForSize(i,null)}this.normalisedSrc.src&&this.loadImage()}},onLoad:function(){this.getSrc(),this.isLoading=!1,this.$emit("load",this.src),this.image&&(this.normalisedSrc.src.endsWith(".svg")||this.normalisedSrc.src.startsWith("data:image/svg+xml"))&&(this.image.naturalHeight&&this.image.naturalWidth?(this.naturalWidth=this.image.naturalWidth,this.calculatedAspectRatio=this.image.naturalWidth/this.image.naturalHeight):this.calculatedAspectRatio=1)},onError:function(){this.hasError=!0,this.$emit("error",this.src)},getSrc:function(){this.image&&(this.currentSrc=this.image.currentSrc||this.image.src)},loadImage:function(){var t=this,e=new Image;this.image=e,e.onload=function(){e.decode?e.decode().catch((function(e){Object(v["c"])("Failed to decode image, trying to render anyway\n\n"+"src: ".concat(t.normalisedSrc.src)+(e.message?"\nOriginal error: ".concat(e.message):""),t)})).then(t.onLoad):t.onLoad()},e.onerror=this.onError,this.hasError=!1,this.sizes&&(e.sizes=this.sizes),this.normalisedSrc.srcset&&(e.srcset=this.normalisedSrc.srcset),e.src=this.normalisedSrc.src,this.$emit("loadstart",this.normalisedSrc.src),this.aspectRatio||this.pollForSize(e),this.getSrc()},pollForSize:function(t){var e=this,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:100,i=function i(){var r=t.naturalHeight,o=t.naturalWidth;r||o?(e.naturalWidth=o,e.calculatedAspectRatio=o/r):t.complete||!e.isLoading||e.hasError||null==n||setTimeout(i,n)};i()},genContent:function(){var t=h.options.methods.genContent.call(this);return this.naturalWidth&&this._b(t.data,"div",{style:{width:"".concat(this.naturalWidth,"px")}}),t},__genPlaceholder:function(){var t=Object(l["l"])(this,"placeholder");if(t){var e=this.isLoading?[this.$createElement("div",{staticClass:"v-image__placeholder"},t)]:[];return this.transition?this.$createElement("transition",{props:{appear:!0,name:this.transition}},e):e[0]}}},render:function(t){var e=h.options.render.call(this,t),n=Object(p["a"])(e.data,{staticClass:"v-image",attrs:{"aria-label":this.alt,role:this.alt?"img":void 0},class:this.themeClasses,directives:m?[{name:"intersect",modifiers:{once:!0},value:{handler:this.init,options:this.options}}]:void 0});return e.children=[this.__cachedSizer,this.__cachedImage,this.__genPlaceholder(),this.genContent()],t(e.tag,n,e.children)}});e["a"]=o["a"].extend({name:"v-toolbar",props:{absolute:Boolean,bottom:Boolean,collapse:Boolean,dense:Boolean,extended:Boolean,extensionHeight:{default:48,type:[Number,String]},flat:Boolean,floating:Boolean,prominent:Boolean,short:Boolean,src:{type:[String,Object],default:""},tag:{type:String,default:"header"}},data:function(){return{isExtended:!1}},computed:{computedHeight:function(){var t=this.computedContentHeight;if(!this.isExtended)return t;var e=parseInt(this.extensionHeight);return this.isCollapsed?t:t+(isNaN(e)?0:e)},computedContentHeight:function(){return this.height?parseInt(this.height):this.isProminent&&this.dense?96:this.isProminent&&this.short?112:this.isProminent?128:this.dense?48:this.short||this.$vuetify.breakpoint.smAndDown?56:64},classes:function(){return Object(r["a"])(Object(r["a"])({},o["a"].options.computed.classes.call(this)),{},{"v-toolbar":!0,"v-toolbar--absolute":this.absolute,"v-toolbar--bottom":this.bottom,"v-toolbar--collapse":this.collapse,"v-toolbar--collapsed":this.isCollapsed,"v-toolbar--dense":this.dense,"v-toolbar--extended":this.isExtended,"v-toolbar--flat":this.flat,"v-toolbar--floating":this.floating,"v-toolbar--prominent":this.isProminent})},isCollapsed:function(){return this.collapse},isProminent:function(){return this.prominent},styles:function(){return Object(r["a"])(Object(r["a"])({},this.measurableStyles),{},{height:Object(l["d"])(this.computedHeight)})}},created:function(){var t=this,e=[["app","<v-app-bar app>"],["manual-scroll",'<v-app-bar :value="false">'],["clipped-left","<v-app-bar clipped-left>"],["clipped-right","<v-app-bar clipped-right>"],["inverted-scroll","<v-app-bar inverted-scroll>"],["scroll-off-screen","<v-app-bar scroll-off-screen>"],["scroll-target","<v-app-bar scroll-target>"],["scroll-threshold","<v-app-bar scroll-threshold>"],["card","<v-app-bar flat>"]];e.forEach((function(e){var n=Object(i["a"])(e,2),r=n[0],o=n[1];t.$attrs.hasOwnProperty(r)&&Object(v["a"])(r,o,t)}))},methods:{genBackground:function(){var t={height:Object(l["d"])(this.computedHeight),src:this.src},e=this.$scopedSlots.img?this.$scopedSlots.img({props:t}):this.$createElement(g,{props:t});return this.$createElement("div",{staticClass:"v-toolbar__image"},[e])},genContent:function(){return this.$createElement("div",{staticClass:"v-toolbar__content",style:{height:Object(l["d"])(this.computedContentHeight)}},Object(l["l"])(this))},genExtension:function(){return this.$createElement("div",{staticClass:"v-toolbar__extension",style:{height:Object(l["d"])(this.extensionHeight)}},Object(l["l"])(this,"extension"))}},render:function(t){this.isExtended=this.extended||!!this.$scopedSlots.extension;var e=[this.genContent()],n=this.setBackgroundColor(this.color,{class:this.classes,style:this.styles,on:this.$listeners});return this.isExtended&&e.push(this.genExtension()),(this.src||this.$scopedSlots.img)&&e.unshift(this.genBackground()),t(this.tag,n,e)}})},7418:function(t,e){e.f=Object.getOwnPropertySymbols},7435:function(t,e,n){},"746f":function(t,e,n){var i=n("428f"),r=n("1a2d"),o=n("e538"),a=n("9bf2").f;t.exports=function(t){var e=i.Symbol||(i.Symbol={});r(e,t)||a(e,t,{value:o.f(t)})}},7496:function(t,e,n){"use strict";var i=n("5530"),r=(n("d9e2"),n("df86"),n("7560")),o=n("58df");e["a"]=Object(o["a"])(r["a"]).extend({name:"v-app",props:{dark:{type:Boolean,default:void 0},id:{type:String,default:"app"},light:{type:Boolean,default:void 0}},computed:{isDark:function(){return this.$vuetify.theme.dark}},beforeCreate:function(){if(!this.$vuetify||this.$vuetify===this.$root)throw new Error("Vuetify is not properly initialized, see https://vuetifyjs.com/getting-started/quick-start#bootstrapping-the-vuetify-object")},render:function(t){var e=t("div",{staticClass:"v-application--wrap"},this.$slots.default);return t("div",{staticClass:"v-application",class:Object(i["a"])({"v-application--is-rtl":this.$vuetify.rtl,"v-application--is-ltr":!this.$vuetify.rtl},this.themeClasses),attrs:{"data-app":!0},domProps:{id:this.id}},[e])}})},7560:function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var i=n("5530"),r=n("2b0e"),o=r["a"].extend().extend({name:"themeable",provide:function(){return{theme:this.themeableProvide}},inject:{theme:{default:{isDark:!1}}},props:{dark:{type:Boolean,default:null},light:{type:Boolean,default:null}},data:function(){return{themeableProvide:{isDark:!1}}},computed:{appIsDark:function(){return this.$vuetify.theme.dark||!1},isDark:function(){return!0===this.dark||!0!==this.light&&this.theme.isDark},themeClasses:function(){return{"theme--dark":this.isDark,"theme--light":!this.isDark}},rootIsDark:function(){return!0===this.dark||!0!==this.light&&this.appIsDark},rootThemeClasses:function(){return{"theme--dark":this.rootIsDark,"theme--light":!this.rootIsDark}}},watch:{isDark:{handler:function(t,e){t!==e&&(this.themeableProvide.isDark=this.isDark)},immediate:!0}}});function a(t){var e=Object(i["a"])(Object(i["a"])({},t.props),t.injections),n=o.options.computed.isDark.call(e);return o.options.computed.themeClasses.call({isDark:n})}e["a"]=o},7839:function(t,e){t.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},"785a":function(t,e,n){var i=n("cc12"),r=i("span").classList,o=r&&r.constructor&&r.constructor.prototype;t.exports=o===Object.prototype?void 0:o},"7a77":function(t,e,n){"use strict";function i(t){this.message=t}i.prototype.toString=function(){return"Cancel"+(this.message?": "+this.message:"")},i.prototype.__CANCEL__=!0,t.exports=i},"7aac":function(t,e,n){"use strict";var i=n("c532");t.exports=i.isStandardBrowserEnv()?function(){return{write:function(t,e,n,r,o,a){var s=[];s.push(t+"="+encodeURIComponent(e)),i.isNumber(n)&&s.push("expires="+new Date(n).toGMTString()),i.isString(r)&&s.push("path="+r),i.isString(o)&&s.push("domain="+o),!0===a&&s.push("secure"),document.cookie=s.join("; ")},read:function(t){var e=document.cookie.match(new RegExp("(^|;\\s*)("+t+")=([^;]*)"));return e?decodeURIComponent(e[3]):null},remove:function(t){this.write(t,"",Date.now()-864e5)}}}():function(){return{write:function(){},read:function(){return null},remove:function(){}}}()},"7b0b":function(t,e,n){var i=n("da84"),r=n("1d80"),o=i.Object;t.exports=function(t){return o(r(t))}},"7bc6":function(t,e,n){"use strict";n.d(e,"d",(function(){return r})),n.d(e,"b",(function(){return o})),n.d(e,"c",(function(){return a})),n.d(e,"a",(function(){return s}));n("5530"),n("3835"),n("ac1f"),n("466d"),n("a15b"),n("d81d"),n("1276"),n("d9e2"),n("b0c0"),n("5319"),n("498a"),n("d3b7"),n("25f0"),n("38cf"),n("99af"),n("fb6a"),n("2ca0"),n("07ac");var i=n("d9bd");n("80d2"),n("8da5");function r(t){return!!t&&!!t.match(/^(#|var\(--|(rgb|hsl)a?\()/)}function o(t){var e;if("number"===typeof t)e=t;else{if("string"!==typeof t)throw new TypeError("Colors can only be numbers or strings, recieved ".concat(null==t?t:t.constructor.name," instead"));var n="#"===t[0]?t.substring(1):t;3===n.length&&(n=n.split("").map((function(t){return t+t})).join("")),6!==n.length&&Object(i["c"])("'".concat(t,"' is not a valid rgb color")),e=parseInt(n,16)}return e<0?(Object(i["c"])("Colors cannot be negative: '".concat(t,"'")),e=0):(e>16777215||isNaN(e))&&(Object(i["c"])("'".concat(t,"' is not a valid rgb color")),e=16777215),e}function a(t){var e=t.toString(16);return e.length<6&&(e="0".repeat(6-e.length)+e),"#"+e}function s(t){return a(o(t))}},"7c73":function(t,e,n){var i,r=n("825a"),o=n("37e8"),a=n("7839"),s=n("d012"),c=n("1be4"),u=n("cc12"),l=n("f772"),f=">",h="<",d="prototype",p="script",v=l("IE_PROTO"),m=function(){},g=function(t){return h+p+f+t+h+"/"+p+f},b=function(t){t.write(g("")),t.close();var e=t.parentWindow.Object;return t=null,e},y=function(){var t,e=u("iframe"),n="java"+p+":";return e.style.display="none",c.appendChild(e),e.src=String(n),t=e.contentWindow.document,t.open(),t.write(g("document.F=Object")),t.close(),t.F},x=function(){try{i=new ActiveXObject("htmlfile")}catch(e){}x="undefined"!=typeof document?document.domain&&i?b(i):y():b(i);var t=a.length;while(t--)delete x[d][a[t]];return x()};s[v]=!0,t.exports=Object.create||function(t,e){var n;return null!==t?(m[d]=r(t),n=new m,m[d]=null,n[v]=t):n=x(),void 0===e?n:o.f(n,e)}},"7d8f":function(t,e,n){"use strict";n.d(e,"b",(function(){return $})),n.d(e,"a",(function(){return A}));var i={};n.r(i),n.d(i,"linear",(function(){return f})),n.d(i,"easeInQuad",(function(){return h})),n.d(i,"easeOutQuad",(function(){return d})),n.d(i,"easeInOutQuad",(function(){return p})),n.d(i,"easeInCubic",(function(){return v})),n.d(i,"easeOutCubic",(function(){return m})),n.d(i,"easeInOutCubic",(function(){return g})),n.d(i,"easeInQuart",(function(){return b})),n.d(i,"easeOutQuart",(function(){return y})),n.d(i,"easeInOutQuart",(function(){return x})),n.d(i,"easeInQuint",(function(){return w})),n.d(i,"easeOutQuint",(function(){return O})),n.d(i,"easeInOutQuint",(function(){return _}));var r=n("bee2"),o=n("d4ec"),a=n("99de"),s=n("262e"),c=n("2caf"),u=n("5530"),l=(n("d3b7"),n("d9e2"),n("fff9")),f=function(t){return t},h=function(t){return Math.pow(t,2)},d=function(t){return t*(2-t)},p=function(t){return t<.5?2*Math.pow(t,2):(4-2*t)*t-1},v=function(t){return Math.pow(t,3)},m=function(t){return Math.pow(--t,3)+1},g=function(t){return t<.5?4*Math.pow(t,3):(t-1)*(2*t-2)*(2*t-2)+1},b=function(t){return Math.pow(t,4)},y=function(t){return 1-Math.pow(--t,4)},x=function(t){return t<.5?8*t*t*t*t:1-8*--t*t*t*t},w=function(t){return Math.pow(t,5)},O=function(t){return 1+Math.pow(--t,5)},_=function(t){return t<.5?16*Math.pow(t,5):1+16*Math.pow(--t,5)};n("b0c0");function S(t){if("number"===typeof t)return t;var e=j(t);if(!e)throw"string"===typeof t?new Error('Target element "'.concat(t,'" not found.')):new TypeError("Target must be a Number/Selector/HTMLElement/VueComponent, received ".concat(k(t)," instead."));var n=0;while(e)n+=e.offsetTop,e=e.offsetParent;return n}function C(t){var e=j(t);if(e)return e;throw"string"===typeof t?new Error('Container element "'.concat(t,'" not found.')):new TypeError("Container must be a Selector/HTMLElement/VueComponent, received ".concat(k(t)," instead."))}function k(t){return null==t?t:t.constructor.name}function j(t){return"string"===typeof t?document.querySelector(t):t&&t._isVue?t.$el:t instanceof HTMLElement?t:null}function $(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=Object(u["a"])({container:document.scrollingElement||document.body||document.documentElement,duration:500,offset:0,easing:"easeInOutCubic",appOffset:!0},e),r=C(n.container);if(n.appOffset&&$.framework.application){var o=r.classList.contains("v-navigation-drawer"),a=r.classList.contains("v-navigation-drawer--clipped"),s=$.framework.application,c=s.bar,l=s.top;n.offset+=c,o&&!a||(n.offset+=l)}var f,h=performance.now();f="number"===typeof t?S(t)-n.offset:S(t)-S(r)-n.offset;var d=r.scrollTop;if(f===d)return Promise.resolve(f);var p="function"===typeof n.easing?n.easing:i[n.easing];if(!p)throw new TypeError('Easing function "'.concat(n.easing,'" not found.'));return new Promise((function(t){return requestAnimationFrame((function e(i){var o=i-h,a=Math.abs(n.duration?Math.min(o/n.duration,1):1);r.scrollTop=Math.floor(d+(f-d)*p(a));var s=r===document.body?document.documentElement.clientHeight:r.clientHeight,c=s+r.scrollTop>=r.scrollHeight;if(1===a||f>r.scrollTop&&c)return t(f);requestAnimationFrame(e)}))}))}$.framework={},$.init=function(){};var A=function(t){Object(s["a"])(n,t);var e=Object(c["a"])(n);function n(){var t;return Object(o["a"])(this,n),t=e.call(this),Object(a["a"])(t,$)}return Object(r["a"])(n)}(l["a"]);A.property="goTo"},"7db0":function(t,e,n){"use strict";var i=n("23e7"),r=n("b727").find,o=n("44d2"),a="find",s=!0;a in[]&&Array(1)[a]((function(){s=!1})),i({target:"Array",proto:!0,forced:s},{find:function(t){return r(this,t,arguments.length>1?arguments[1]:void 0)}}),o(a)},"7dd0":function(t,e,n){"use strict";var i=n("23e7"),r=n("c65b"),o=n("c430"),a=n("5e77"),s=n("1626"),c=n("9ed3"),u=n("e163"),l=n("d2bb"),f=n("d44e"),h=n("9112"),d=n("cb2d"),p=n("b622"),v=n("3f8c"),m=n("ae93"),g=a.PROPER,b=a.CONFIGURABLE,y=m.IteratorPrototype,x=m.BUGGY_SAFARI_ITERATORS,w=p("iterator"),O="keys",_="values",S="entries",C=function(){return this};t.exports=function(t,e,n,a,p,m,k){c(n,e,a);var j,$,A,E=function(t){if(t===p&&M)return M;if(!x&&t in I)return I[t];switch(t){case O:return function(){return new n(this,t)};case _:return function(){return new n(this,t)};case S:return function(){return new n(this,t)}}return function(){return new n(this)}},T=e+" Iterator",L=!1,I=t.prototype,B=I[w]||I["@@iterator"]||p&&I[p],M=!x&&B||E(p),P="Array"==e&&I.entries||B;if(P&&(j=u(P.call(new t)),j!==Object.prototype&&j.next&&(o||u(j)===y||(l?l(j,y):s(j[w])||d(j,w,C)),f(j,T,!0,!0),o&&(v[T]=C))),g&&p==_&&B&&B.name!==_&&(!o&&b?h(I,"name",_):(L=!0,M=function(){return r(B,this)})),p)if($={values:E(_),keys:m?M:E(O),entries:E(S)},k)for(A in $)(x||L||!(A in I))&&d(I,A,$[A]);else i({target:e,proto:!0,forced:x||L},$);return o&&!k||I[w]===M||d(I,w,M,{name:p}),v[e]=M,$}},"7e2b":function(t,e,n){"use strict";var i=n("2b0e");function r(t){return function(e,n){for(var i in n)Object.prototype.hasOwnProperty.call(e,i)||this.$delete(this.$data[t],i);for(var r in e)this.$set(this.$data[t],r,e[r])}}e["a"]=i["a"].extend({data:function(){return{attrs$:{},listeners$:{}}},created:function(){this.$watch("$attrs",r("attrs$"),{immediate:!0}),this.$watch("$listeners",r("listeners$"),{immediate:!0})}})},"7f9a":function(t,e,n){var i=n("da84"),r=n("1626"),o=n("8925"),a=i.WeakMap;t.exports=r(a)&&/native code/.test(o(a))},"80d2":function(t,e,n){"use strict";n.d(e,"e",(function(){return o})),n.d(e,"i",(function(){return s})),n.d(e,"f",(function(){return c})),n.d(e,"j",(function(){return u})),n.d(e,"k",(function(){return l})),n.d(e,"n",(function(){return f})),n.d(e,"g",(function(){return d})),n.d(e,"h",(function(){return p})),n.d(e,"d",(function(){return v})),n.d(e,"o",(function(){return m})),n.d(e,"p",(function(){return b})),n.d(e,"t",(function(){return y})),n.d(e,"q",(function(){return x})),n.d(e,"a",(function(){return O})),n.d(e,"u",(function(){return _})),n.d(e,"v",(function(){return S})),n.d(e,"m",(function(){return C})),n.d(e,"l",(function(){return k})),n.d(e,"c",(function(){return j})),n.d(e,"s",(function(){return $})),n.d(e,"b",(function(){return A})),n.d(e,"r",(function(){return E}));n("3835");var i=n("53ca"),r=(n("5530"),n("ac1f"),n("5319"),n("498a"),n("99af"),n("b64b"),n("d3b7"),n("1276"),n("a630"),n("3ca3"),n("a9e3"),n("dca8"),n("2ca0"),n("fb6a"),n("4e827"),n("d81d"),n("25f0"),n("4de4"),n("b0c0"),n("38cf"),n("b680"),n("cb29"),n("2b0e"));function o(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"div",n=arguments.length>2?arguments[2]:void 0;return r["a"].extend({name:n||t.replace(/__/g,"-"),functional:!0,props:{tag:{type:String,default:e}},render:function(e,n){var i=n.data,r=n.props,o=n.children;return i.staticClass="".concat(t," ").concat(i.staticClass||"").trim(),e(r.tag,i,o)}})}try{if("undefined"!==typeof window){var a=Object.defineProperty({},"passive",{get:function(){!0}});window.addEventListener("testListener",a,a),window.removeEventListener("testListener",a,a)}}catch(T){console.warn(T)}function s(t,e,n){var i=e.length-1;if(i<0)return void 0===t?n:t;for(var r=0;r<i;r++){if(null==t)return n;t=t[e[r]]}return null==t||void 0===t[e[i]]?n:t[e[i]]}function c(t,e){if(t===e)return!0;if(t instanceof Date&&e instanceof Date&&t.getTime()!==e.getTime())return!1;if(t!==Object(t)||e!==Object(e))return!1;var n=Object.keys(t);return n.length===Object.keys(e).length&&n.every((function(n){return c(t[n],e[n])}))}function u(t,e,n){return null!=t&&e&&"string"===typeof e?void 0!==t[e]?t[e]:(e=e.replace(/\[(\w+)\]/g,".$1"),e=e.replace(/^\./,""),s(t,e.split("."),n)):n}function l(t,e,n){if(null==e)return void 0===t?n:t;if(t!==Object(t))return void 0===n?t:n;if("string"===typeof e)return u(t,e,n);if(Array.isArray(e))return s(t,e,n);if("function"!==typeof e)return n;var i=e(t,n);return"undefined"===typeof i?n:i}function f(t){if(!t||t.nodeType!==Node.ELEMENT_NODE)return 0;var e=+window.getComputedStyle(t).getPropertyValue("z-index");return e||f(t.parentNode)}var h={"&":"&amp;","<":"&lt;",">":"&gt;"};function d(t){return t.replace(/[&<>]/g,(function(t){return h[t]||t}))}function p(t,e){for(var n={},i=0;i<e.length;i++){var r=e[i];"undefined"!==typeof t[r]&&(n[r]=t[r])}return n}function v(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"px";return null==t||""===t?void 0:isNaN(+t)?String(t):"".concat(Number(t)).concat(e)}function m(t){return(t||"").replace(/([a-z])([A-Z])/g,"$1-$2").toLowerCase()}function g(t){return null!==t&&"object"===Object(i["a"])(t)}var b=Object.freeze({enter:13,tab:9,delete:46,esc:27,space:32,up:38,down:40,left:37,right:39,end:35,home:36,del:46,backspace:8,insert:45,pageup:33,pagedown:34,shift:16});function y(t,e){var n=t.$vuetify.icons.component;if(e.startsWith("$")){var i="$vuetify.icons.values.".concat(e.split("$").pop().split(".").pop()),r=u(t,i,e);if("string"!==typeof r)return r;e=r}return null==n?e:{component:n,props:{icon:e}}}function x(t){return Object.keys(t)}var w=/-(\w)/g,O=function(t){return t.replace(w,(function(t,e){return e?e.toUpperCase():""}))};function _(t){return t.charAt(0).toUpperCase()+t.slice(1)}function S(t){return null!=t?Array.isArray(t)?t:[t]:[]}function C(t,e,n){return t.$slots.hasOwnProperty(e)&&t.$scopedSlots.hasOwnProperty(e)&&t.$scopedSlots[e].name?n?"v-slot":"scoped":t.$slots.hasOwnProperty(e)?"normal":t.$scopedSlots.hasOwnProperty(e)?"scoped":void 0}function k(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"default",n=arguments.length>2?arguments[2]:void 0,i=arguments.length>3&&void 0!==arguments[3]&&arguments[3];return t.$scopedSlots.hasOwnProperty(e)?t.$scopedSlots[e](n instanceof Function?n():n):!t.$slots.hasOwnProperty(e)||n&&!i?void 0:t.$slots[e]}function j(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1;return Math.max(e,Math.min(n,t))}function $(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"0";return t+n.repeat(Math.max(0,e-t.length))}function A(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,n=[],i=0;while(i<t.length)n.push(t.substr(i,e)),i+=e;return n}function E(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};for(var n in e){var i=t[n],r=e[n];g(i)&&g(r)?t[n]=E(i,r):t[n]=r}return t}},"81d5":function(t,e,n){"use strict";var i=n("7b0b"),r=n("23cb"),o=n("07fa");t.exports=function(t){var e=i(this),n=o(e),a=arguments.length,s=r(a>1?arguments[1]:void 0,n),c=a>2?arguments[2]:void 0,u=void 0===c?n:r(c,n);while(u>s)e[s++]=t;return e}},"825a":function(t,e,n){var i=n("da84"),r=n("861d"),o=i.String,a=i.TypeError;t.exports=function(t){if(r(t))return t;throw a(o(t)+" is not an object")}},8336:function(t,e,n){"use strict";var i=n("53ca"),r=n("3835"),o=n("5530"),a=(n("c7cd"),n("a9e3"),n("d3b7"),n("caad"),n("e9c4"),n("86cc"),n("8dd9")),s=a["a"],c=n("22da"),u=n("4e82"),l=n("f2e7"),f=n("c995"),h=n("fe6c"),d=n("1c87"),p=n("af2b"),v=n("58df"),m=n("d9bd"),g=Object(v["a"])(s,d["a"],h["a"],p["a"],Object(u["a"])("btnToggle"),Object(l["b"])("inputValue"));e["a"]=g.extend().extend({name:"v-btn",props:{activeClass:{type:String,default:function(){return this.btnToggle?this.btnToggle.activeClass:""}},block:Boolean,depressed:Boolean,fab:Boolean,icon:Boolean,loading:Boolean,outlined:Boolean,plain:Boolean,retainFocusOnClick:Boolean,rounded:Boolean,tag:{type:String,default:"button"},text:Boolean,tile:Boolean,type:{type:String,default:"button"},value:null},data:function(){return{proxyClass:"v-btn--active"}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])(Object(o["a"])(Object(o["a"])(Object(o["a"])({"v-btn":!0},d["a"].options.computed.classes.call(this)),{},{"v-btn--absolute":this.absolute,"v-btn--block":this.block,"v-btn--bottom":this.bottom,"v-btn--disabled":this.disabled,"v-btn--is-elevated":this.isElevated,"v-btn--fab":this.fab,"v-btn--fixed":this.fixed,"v-btn--has-bg":this.hasBg,"v-btn--icon":this.icon,"v-btn--left":this.left,"v-btn--loading":this.loading,"v-btn--outlined":this.outlined,"v-btn--plain":this.plain,"v-btn--right":this.right,"v-btn--round":this.isRound,"v-btn--rounded":this.rounded,"v-btn--router":this.to,"v-btn--text":this.text,"v-btn--tile":this.tile,"v-btn--top":this.top},this.themeClasses),this.groupClasses),this.elevationClasses),this.sizeableClasses)},computedElevation:function(){if(!this.disabled)return f["a"].options.computed.computedElevation.call(this)},computedRipple:function(){var t,e=!this.icon&&!this.fab||{circle:!0};return!this.disabled&&(null!=(t=this.ripple)?t:e)},hasBg:function(){return!this.text&&!this.plain&&!this.outlined&&!this.icon},isElevated:function(){return Boolean(!this.icon&&!this.text&&!this.outlined&&!this.depressed&&!this.disabled&&!this.plain&&(null==this.elevation||Number(this.elevation)>0))},isRound:function(){return Boolean(this.icon||this.fab)},styles:function(){return Object(o["a"])({},this.measurableStyles)}},created:function(){var t=this,e=[["flat","text"],["outline","outlined"],["round","rounded"]];e.forEach((function(e){var n=Object(r["a"])(e,2),i=n[0],o=n[1];t.$attrs.hasOwnProperty(i)&&Object(m["a"])(i,o,t)}))},methods:{click:function(t){!this.retainFocusOnClick&&!this.fab&&t.detail&&this.$el.blur(),this.$emit("click",t),this.btnToggle&&this.toggle()},genContent:function(){return this.$createElement("span",{staticClass:"v-btn__content"},this.$slots.default)},genLoader:function(){return this.$createElement("span",{class:"v-btn__loader"},this.$slots.loader||[this.$createElement(c["a"],{props:{indeterminate:!0,size:23,width:2}})])}},render:function(t){var e=[this.genContent(),this.loading&&this.genLoader()],n=this.generateRouteLink(),r=n.tag,o=n.data,a=this.hasBg?this.setBackgroundColor:this.setTextColor;return"button"===r&&(o.attrs.type=this.type,o.attrs.disabled=this.disabled),o.attrs.value=["string","number"].includes(Object(i["a"])(this.value))?this.value:JSON.stringify(this.value),t(r,this.disabled?o:a(this.color,o),e)}})},"83ab":function(t,e,n){var i=n("d039");t.exports=!i((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}))},8418:function(t,e,n){"use strict";var i=n("a04b"),r=n("9bf2"),o=n("5c6c");t.exports=function(t,e,n){var a=i(e);a in t?r.f(t,a,o(0,n)):t[a]=n}},8547:function(t,e,n){"use strict";var i=n("2b0e"),r=n("80d2");e["a"]=i["a"].extend({name:"comparable",props:{valueComparator:{type:Function,default:r["f"]}}})},"857a":function(t,e,n){var i=n("e330"),r=n("1d80"),o=n("577e"),a=/"/g,s=i("".replace);t.exports=function(t,e,n,i){var c=o(r(t)),u="<"+e;return""!==n&&(u+=" "+n+'="'+s(o(i),a,"&quot;")+'"'),u+">"+c+"</"+e+">"}},"861d":function(t,e,n){var i=n("1626");t.exports=function(t){return"object"==typeof t?null!==t:i(t)}},8654:function(t,e,n){"use strict";var i=n("15fd"),r=n("2909"),o=n("5530"),a=(n("a9e3"),n("0481"),n("4069"),n("d3b7"),n("25f0"),n("caad"),n("2b19"),n("4ff9"),n("c37a")),s=(n("99af"),n("e9b1"),n("7560")),c=n("58df"),u=Object(c["a"])(s["a"]).extend({name:"v-counter",functional:!0,props:{value:{type:[Number,String],default:""},max:[Number,String]},render:function(t,e){var n=e.props,i=parseInt(n.max,10),r=parseInt(n.value,10),a=i?"".concat(r," / ").concat(i):String(n.value),c=i&&r>i;return t("div",{staticClass:"v-counter",class:Object(o["a"])({"error--text":c},Object(s["b"])(e))},a)}}),l=u,f=n("ba87"),h=n("90a2"),d=n("d9bd"),p=n("2b0e");function v(t){return p["a"].extend({name:"intersectable",data:function(){return{isIntersecting:!1}},mounted:function(){h["a"].inserted(this.$el,{name:"intersect",value:this.onObserve},this.$vnode)},destroyed:function(){h["a"].unbind(this.$el,{name:"intersect",value:this.onObserve},this.$vnode)},methods:{onObserve:function(e,n,i){if(this.isIntersecting=i,i)for(var r=0,o=t.onVisible.length;r<o;r++){var a=this[t.onVisible[r]];"function"!==typeof a?Object(d["c"])(t.onVisible[r]+" method is not available on the instance but referenced in intersectable mixin options"):a()}}}})}var m=n("ade3"),g=(n("c7cd"),n("6ece"),n("0789")),b=n("a9ad"),y=n("fe6c"),x=n("a452"),w=n("80d2"),O=Object(c["a"])(b["a"],Object(y["b"])(["absolute","fixed","top","bottom"]),x["a"],s["a"]),_=O.extend({name:"v-progress-linear",directives:{intersect:h["a"]},props:{active:{type:Boolean,default:!0},backgroundColor:{type:String,default:null},backgroundOpacity:{type:[Number,String],default:null},bufferValue:{type:[Number,String],default:100},color:{type:String,default:"primary"},height:{type:[Number,String],default:4},indeterminate:Boolean,query:Boolean,reverse:Boolean,rounded:Boolean,stream:Boolean,striped:Boolean,value:{type:[Number,String],default:0}},data:function(){return{internalLazyValue:this.value||0,isVisible:!0}},computed:{__cachedBackground:function(){return this.$createElement("div",this.setBackgroundColor(this.backgroundColor||this.color,{staticClass:"v-progress-linear__background",style:this.backgroundStyle}))},__cachedBar:function(){return this.$createElement(this.computedTransition,[this.__cachedBarType])},__cachedBarType:function(){return this.indeterminate?this.__cachedIndeterminate:this.__cachedDeterminate},__cachedBuffer:function(){return this.$createElement("div",{staticClass:"v-progress-linear__buffer",style:this.styles})},__cachedDeterminate:function(){return this.$createElement("div",this.setBackgroundColor(this.color,{staticClass:"v-progress-linear__determinate",style:{width:Object(w["d"])(this.normalizedValue,"%")}}))},__cachedIndeterminate:function(){return this.$createElement("div",{staticClass:"v-progress-linear__indeterminate",class:{"v-progress-linear__indeterminate--active":this.active}},[this.genProgressBar("long"),this.genProgressBar("short")])},__cachedStream:function(){return this.stream?this.$createElement("div",this.setTextColor(this.color,{staticClass:"v-progress-linear__stream",style:{width:Object(w["d"])(100-this.normalizedBuffer,"%")}})):null},backgroundStyle:function(){var t,e=null==this.backgroundOpacity?this.backgroundColor?1:.3:parseFloat(this.backgroundOpacity);return t={opacity:e},Object(m["a"])(t,this.isReversed?"right":"left",Object(w["d"])(this.normalizedValue,"%")),Object(m["a"])(t,"width",Object(w["d"])(Math.max(0,this.normalizedBuffer-this.normalizedValue),"%")),t},classes:function(){return Object(o["a"])({"v-progress-linear--absolute":this.absolute,"v-progress-linear--fixed":this.fixed,"v-progress-linear--query":this.query,"v-progress-linear--reactive":this.reactive,"v-progress-linear--reverse":this.isReversed,"v-progress-linear--rounded":this.rounded,"v-progress-linear--striped":this.striped,"v-progress-linear--visible":this.isVisible},this.themeClasses)},computedTransition:function(){return this.indeterminate?g["c"]:g["e"]},isReversed:function(){return this.$vuetify.rtl!==this.reverse},normalizedBuffer:function(){return this.normalize(this.bufferValue)},normalizedValue:function(){return this.normalize(this.internalLazyValue)},reactive:function(){return Boolean(this.$listeners.change)},styles:function(){var t={};return this.active||(t.height=0),this.indeterminate||100===parseFloat(this.normalizedBuffer)||(t.width=Object(w["d"])(this.normalizedBuffer,"%")),t}},methods:{genContent:function(){var t=Object(w["l"])(this,"default",{value:this.internalLazyValue});return t?this.$createElement("div",{staticClass:"v-progress-linear__content"},t):null},genListeners:function(){var t=this.$listeners;return this.reactive&&(t.click=this.onClick),t},genProgressBar:function(t){return this.$createElement("div",this.setBackgroundColor(this.color,{staticClass:"v-progress-linear__indeterminate",class:Object(m["a"])({},t,!0)}))},onClick:function(t){if(this.reactive){var e=this.$el.getBoundingClientRect(),n=e.width;this.internalValue=t.offsetX/n*100}},onObserve:function(t,e,n){this.isVisible=n},normalize:function(t){return t<0?0:t>100?100:parseFloat(t)}},render:function(t){var e={staticClass:"v-progress-linear",attrs:{role:"progressbar","aria-valuemin":0,"aria-valuemax":this.normalizedBuffer,"aria-valuenow":this.indeterminate?void 0:this.normalizedValue},class:this.classes,directives:[{name:"intersect",value:this.onObserve}],style:{bottom:this.bottom?0:void 0,height:this.active?Object(w["d"])(this.height):0,top:this.top?0:void 0},on:this.genListeners()};return t("div",e,[this.__cachedStream,this.__cachedBackground,this.__cachedBuffer,this.__cachedBar,this.genContent()])}}),S=_,C=p["a"].extend().extend({name:"loadable",props:{loading:{type:[Boolean,String],default:!1},loaderHeight:{type:[Number,String],default:2}},methods:{genProgress:function(){return!1===this.loading?null:this.$slots.progress||this.$createElement(S,{props:{absolute:!0,color:!0===this.loading||""===this.loading?this.color||"primary":this.loading,height:this.loaderHeight,indeterminate:!0}})}}}),k=n("38cb"),j=n("dc22"),$=n("5607"),A=n("dd89"),E=["title"],T=Object(c["a"])(a["a"],v({onVisible:["onResize","tryAutofocus"]}),C),L=["color","file","time","date","datetime-local","week","month"];e["a"]=T.extend().extend({name:"v-text-field",directives:{resize:j["a"],ripple:$["a"]},inheritAttrs:!1,props:{appendOuterIcon:String,autofocus:Boolean,clearable:Boolean,clearIcon:{type:String,default:"$clear"},counter:[Boolean,Number,String],counterValue:Function,filled:Boolean,flat:Boolean,fullWidth:Boolean,label:String,outlined:Boolean,placeholder:String,prefix:String,prependInnerIcon:String,persistentPlaceholder:Boolean,reverse:Boolean,rounded:Boolean,shaped:Boolean,singleLine:Boolean,solo:Boolean,soloInverted:Boolean,suffix:String,type:{type:String,default:"text"}},data:function(){return{badInput:!1,labelWidth:0,prefixWidth:0,prependWidth:0,initialValue:null,isBooted:!1,isClearing:!1}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])({},a["a"].options.computed.classes.call(this)),{},{"v-text-field":!0,"v-text-field--full-width":this.fullWidth,"v-text-field--prefix":this.prefix,"v-text-field--single-line":this.isSingle,"v-text-field--solo":this.isSolo,"v-text-field--solo-inverted":this.soloInverted,"v-text-field--solo-flat":this.flat,"v-text-field--filled":this.filled,"v-text-field--is-booted":this.isBooted,"v-text-field--enclosed":this.isEnclosed,"v-text-field--reverse":this.reverse,"v-text-field--outlined":this.outlined,"v-text-field--placeholder":this.placeholder,"v-text-field--rounded":this.rounded,"v-text-field--shaped":this.shaped})},computedColor:function(){var t=k["a"].options.computed.computedColor.call(this);return this.soloInverted&&this.isFocused?this.color||"primary":t},computedCounterValue:function(){return"function"===typeof this.counterValue?this.counterValue(this.internalValue):Object(r["a"])((this.internalValue||"").toString()).length},hasCounter:function(){return!1!==this.counter&&null!=this.counter},hasDetails:function(){return a["a"].options.computed.hasDetails.call(this)||this.hasCounter},internalValue:{get:function(){return this.lazyValue},set:function(t){this.lazyValue=t,this.$emit("input",this.lazyValue)}},isDirty:function(){var t;return(null==(t=this.lazyValue)?void 0:t.toString().length)>0||this.badInput},isEnclosed:function(){return this.filled||this.isSolo||this.outlined},isLabelActive:function(){return this.isDirty||L.includes(this.type)},isSingle:function(){return this.isSolo||this.singleLine||this.fullWidth||this.filled&&!this.hasLabel},isSolo:function(){return this.solo||this.soloInverted},labelPosition:function(){var t=this.prefix&&!this.labelValue?this.prefixWidth:0;return this.labelValue&&this.prependWidth&&(t-=this.prependWidth),this.$vuetify.rtl===this.reverse?{left:t,right:"auto"}:{left:"auto",right:t}},showLabel:function(){return this.hasLabel&&!(this.isSingle&&this.labelValue)},labelValue:function(){return this.isFocused||this.isLabelActive||this.persistentPlaceholder}},watch:{outlined:"setLabelWidth",label:function(){this.$nextTick(this.setLabelWidth)},prefix:function(){this.$nextTick(this.setPrefixWidth)},isFocused:"updateValue",value:function(t){this.lazyValue=t}},created:function(){this.$attrs.hasOwnProperty("box")&&Object(d["a"])("box","filled",this),this.$attrs.hasOwnProperty("browser-autocomplete")&&Object(d["a"])("browser-autocomplete","autocomplete",this),this.shaped&&!(this.filled||this.outlined||this.isSolo)&&Object(d["c"])("shaped should be used with either filled or outlined",this)},mounted:function(){var t=this;this.$watch((function(){return t.labelValue}),this.setLabelWidth),this.autofocus&&this.tryAutofocus(),requestAnimationFrame((function(){t.isBooted=!0,requestAnimationFrame((function(){t.isIntersecting||t.onResize()}))}))},methods:{focus:function(){this.onFocus()},blur:function(t){var e=this;window.requestAnimationFrame((function(){e.$refs.input&&e.$refs.input.blur()}))},clearableCallback:function(){var t=this;this.$refs.input&&this.$refs.input.focus(),this.$nextTick((function(){return t.internalValue=null}))},genAppendSlot:function(){var t=[];return this.$slots["append-outer"]?t.push(this.$slots["append-outer"]):this.appendOuterIcon&&t.push(this.genIcon("appendOuter")),this.genSlot("append","outer",t)},genPrependInnerSlot:function(){var t=[];return this.$slots["prepend-inner"]?t.push(this.$slots["prepend-inner"]):this.prependInnerIcon&&t.push(this.genIcon("prependInner")),this.genSlot("prepend","inner",t)},genIconSlot:function(){var t=[];return this.$slots.append?t.push(this.$slots.append):this.appendIcon&&t.push(this.genIcon("append")),this.genSlot("append","inner",t)},genInputSlot:function(){var t=a["a"].options.methods.genInputSlot.call(this),e=this.genPrependInnerSlot();return e&&(t.children=t.children||[],t.children.unshift(e)),t},genClearIcon:function(){return this.clearable?this.isDirty?this.genSlot("append","inner",[this.genIcon("clear",this.clearableCallback)]):this.genSlot("append","inner",[this.$createElement("div")]):null},genCounter:function(){var t,e,n;if(!this.hasCounter)return null;var i=!0===this.counter?this.attrs$.maxlength:this.counter,r={dark:this.dark,light:this.light,max:i,value:this.computedCounterValue};return null!=(t=null==(e=(n=this.$scopedSlots).counter)?void 0:e.call(n,{props:r}))?t:this.$createElement(l,{props:r})},genControl:function(){return a["a"].options.methods.genControl.call(this)},genDefaultSlot:function(){return[this.genFieldset(),this.genTextFieldSlot(),this.genClearIcon(),this.genIconSlot(),this.genProgress()]},genFieldset:function(){return this.outlined?this.$createElement("fieldset",{attrs:{"aria-hidden":!0}},[this.genLegend()]):null},genLabel:function(){if(!this.showLabel)return null;var t={props:{absolute:!0,color:this.validationState,dark:this.dark,disabled:this.isDisabled,focused:!this.isSingle&&(this.isFocused||!!this.validationState),for:this.computedId,left:this.labelPosition.left,light:this.light,right:this.labelPosition.right,value:this.labelValue}};return this.$createElement(f["a"],t,this.$slots.label||this.label)},genLegend:function(){var t=this.singleLine||!this.labelValue&&!this.isDirty?0:this.labelWidth,e=this.$createElement("span",{domProps:{innerHTML:"&#8203;"},staticClass:"notranslate"});return this.$createElement("legend",{style:{width:this.isSingle?void 0:Object(w["d"])(t)}},[e])},genInput:function(){var t=Object.assign({},this.listeners$);delete t.change;var e=this.attrs$,n=(e.title,Object(i["a"])(e,E));return this.$createElement("input",{style:{},domProps:{value:"number"===this.type&&Object.is(this.lazyValue,-0)?"-0":this.lazyValue},attrs:Object(o["a"])(Object(o["a"])({},n),{},{autofocus:this.autofocus,disabled:this.isDisabled,id:this.computedId,placeholder:this.persistentPlaceholder||this.isFocused||!this.hasLabel?this.placeholder:void 0,readonly:this.isReadonly,type:this.type}),on:Object.assign(t,{blur:this.onBlur,input:this.onInput,focus:this.onFocus,keydown:this.onKeyDown}),ref:"input",directives:[{name:"resize",modifiers:{quiet:!0},value:this.onResize}]})},genMessages:function(){if(!this.showDetails)return null;var t=a["a"].options.methods.genMessages.call(this),e=this.genCounter();return this.$createElement("div",{staticClass:"v-text-field__details"},[t,e])},genTextFieldSlot:function(){return this.$createElement("div",{staticClass:"v-text-field__slot"},[this.genLabel(),this.prefix?this.genAffix("prefix"):null,this.genInput(),this.suffix?this.genAffix("suffix"):null])},genAffix:function(t){return this.$createElement("div",{class:"v-text-field__".concat(t),ref:t},this[t])},onBlur:function(t){var e=this;this.isFocused=!1,t&&this.$nextTick((function(){return e.$emit("blur",t)}))},onClick:function(){this.isFocused||this.isDisabled||!this.$refs.input||this.$refs.input.focus()},onFocus:function(t){if(this.$refs.input){var e=Object(A["a"])(this.$el);if(e)return e.activeElement!==this.$refs.input?this.$refs.input.focus():void(this.isFocused||(this.isFocused=!0,t&&this.$emit("focus",t)))}},onInput:function(t){var e=t.target;this.internalValue=e.value,this.badInput=e.validity&&e.validity.badInput},onKeyDown:function(t){t.keyCode===w["p"].enter&&this.lazyValue!==this.initialValue&&(this.initialValue=this.lazyValue,this.$emit("change",this.initialValue)),this.$emit("keydown",t)},onMouseDown:function(t){t.target!==this.$refs.input&&(t.preventDefault(),t.stopPropagation()),a["a"].options.methods.onMouseDown.call(this,t)},onMouseUp:function(t){this.hasMouseDown&&this.focus(),a["a"].options.methods.onMouseUp.call(this,t)},setLabelWidth:function(){this.outlined&&(this.labelWidth=this.$refs.label?Math.min(.75*this.$refs.label.scrollWidth+6,this.$el.offsetWidth-24):0)},setPrefixWidth:function(){this.$refs.prefix&&(this.prefixWidth=this.$refs.prefix.offsetWidth)},setPrependWidth:function(){this.outlined&&this.$refs["prepend-inner"]&&(this.prependWidth=this.$refs["prepend-inner"].offsetWidth)},tryAutofocus:function(){if(!this.autofocus||"undefined"===typeof document||!this.$refs.input)return!1;var t=Object(A["a"])(this.$el);return!(!t||t.activeElement===this.$refs.input)&&(this.$refs.input.focus(),!0)},updateValue:function(t){this.hasColor=t,t?this.initialValue=this.lazyValue:this.initialValue!==this.lazyValue&&this.$emit("change",this.lazyValue)},onResize:function(){this.setLabelWidth(),this.setPrefixWidth(),this.setPrependWidth()}}})},"86cc":function(t,e,n){},8925:function(t,e,n){var i=n("e330"),r=n("1626"),o=n("c6cd"),a=i(Function.toString);r(o.inspectSource)||(o.inspectSource=function(t){return a(t)}),t.exports=o.inspectSource},"899c":function(t,e,n){},"8a79":function(t,e,n){"use strict";var i=n("23e7"),r=n("e330"),o=n("06cf").f,a=n("50c4"),s=n("577e"),c=n("5a34"),u=n("1d80"),l=n("ab13"),f=n("c430"),h=r("".endsWith),d=r("".slice),p=Math.min,v=l("endsWith"),m=!f&&!v&&!!function(){var t=o(String.prototype,"endsWith");return t&&!t.writable}();i({target:"String",proto:!0,forced:!m&&!v},{endsWith:function(t){var e=s(u(this));c(t);var n=arguments.length>1?arguments[1]:void 0,i=e.length,r=void 0===n?i:p(a(n),i),o=s(t);return h?h(e,o,r):d(e,r-o.length,r)===o}})},"8aa5":function(t,e,n){"use strict";var i=n("6547").charAt;t.exports=function(t,e,n){return e+(n?i(t,e).length:1)}},"8adc":function(t,e,n){},"8b0d":function(t,e,n){},"8b37":function(t,e,n){},"8c4f":function(t,e,n){"use strict"; -/*! - * vue-router v3.5.3 - * (c) 2021 Evan You - * @license MIT - */function i(t,e){for(var n in e)t[n]=e[n];return t}var r=/[!'()*]/g,o=function(t){return"%"+t.charCodeAt(0).toString(16)},a=/%2C/g,s=function(t){return encodeURIComponent(t).replace(r,o).replace(a,",")};function c(t){try{return decodeURIComponent(t)}catch(e){0}return t}function u(t,e,n){void 0===e&&(e={});var i,r=n||f;try{i=r(t||"")}catch(s){i={}}for(var o in e){var a=e[o];i[o]=Array.isArray(a)?a.map(l):l(a)}return i}var l=function(t){return null==t||"object"===typeof t?t:String(t)};function f(t){var e={};return t=t.trim().replace(/^(\?|#|&)/,""),t?(t.split("&").forEach((function(t){var n=t.replace(/\+/g," ").split("="),i=c(n.shift()),r=n.length>0?c(n.join("=")):null;void 0===e[i]?e[i]=r:Array.isArray(e[i])?e[i].push(r):e[i]=[e[i],r]})),e):e}function h(t){var e=t?Object.keys(t).map((function(e){var n=t[e];if(void 0===n)return"";if(null===n)return s(e);if(Array.isArray(n)){var i=[];return n.forEach((function(t){void 0!==t&&(null===t?i.push(s(e)):i.push(s(e)+"="+s(t)))})),i.join("&")}return s(e)+"="+s(n)})).filter((function(t){return t.length>0})).join("&"):null;return e?"?"+e:""}var d=/\/?$/;function p(t,e,n,i){var r=i&&i.options.stringifyQuery,o=e.query||{};try{o=v(o)}catch(s){}var a={name:e.name||t&&t.name,meta:t&&t.meta||{},path:e.path||"/",hash:e.hash||"",query:o,params:e.params||{},fullPath:b(e,r),matched:t?g(t):[]};return n&&(a.redirectedFrom=b(n,r)),Object.freeze(a)}function v(t){if(Array.isArray(t))return t.map(v);if(t&&"object"===typeof t){var e={};for(var n in t)e[n]=v(t[n]);return e}return t}var m=p(null,{path:"/"});function g(t){var e=[];while(t)e.unshift(t),t=t.parent;return e}function b(t,e){var n=t.path,i=t.query;void 0===i&&(i={});var r=t.hash;void 0===r&&(r="");var o=e||h;return(n||"/")+o(i)+r}function y(t,e,n){return e===m?t===e:!!e&&(t.path&&e.path?t.path.replace(d,"")===e.path.replace(d,"")&&(n||t.hash===e.hash&&x(t.query,e.query)):!(!t.name||!e.name)&&(t.name===e.name&&(n||t.hash===e.hash&&x(t.query,e.query)&&x(t.params,e.params))))}function x(t,e){if(void 0===t&&(t={}),void 0===e&&(e={}),!t||!e)return t===e;var n=Object.keys(t).sort(),i=Object.keys(e).sort();return n.length===i.length&&n.every((function(n,r){var o=t[n],a=i[r];if(a!==n)return!1;var s=e[n];return null==o||null==s?o===s:"object"===typeof o&&"object"===typeof s?x(o,s):String(o)===String(s)}))}function w(t,e){return 0===t.path.replace(d,"/").indexOf(e.path.replace(d,"/"))&&(!e.hash||t.hash===e.hash)&&O(t.query,e.query)}function O(t,e){for(var n in e)if(!(n in t))return!1;return!0}function _(t){for(var e=0;e<t.matched.length;e++){var n=t.matched[e];for(var i in n.instances){var r=n.instances[i],o=n.enteredCbs[i];if(r&&o){delete n.enteredCbs[i];for(var a=0;a<o.length;a++)r._isBeingDestroyed||o[a](r)}}}}var S={name:"RouterView",functional:!0,props:{name:{type:String,default:"default"}},render:function(t,e){var n=e.props,r=e.children,o=e.parent,a=e.data;a.routerView=!0;var s=o.$createElement,c=n.name,u=o.$route,l=o._routerViewCache||(o._routerViewCache={}),f=0,h=!1;while(o&&o._routerRoot!==o){var d=o.$vnode?o.$vnode.data:{};d.routerView&&f++,d.keepAlive&&o._directInactive&&o._inactive&&(h=!0),o=o.$parent}if(a.routerViewDepth=f,h){var p=l[c],v=p&&p.component;return v?(p.configProps&&C(v,a,p.route,p.configProps),s(v,a,r)):s()}var m=u.matched[f],g=m&&m.components[c];if(!m||!g)return l[c]=null,s();l[c]={component:g},a.registerRouteInstance=function(t,e){var n=m.instances[c];(e&&n!==t||!e&&n===t)&&(m.instances[c]=e)},(a.hook||(a.hook={})).prepatch=function(t,e){m.instances[c]=e.componentInstance},a.hook.init=function(t){t.data.keepAlive&&t.componentInstance&&t.componentInstance!==m.instances[c]&&(m.instances[c]=t.componentInstance),_(u)};var b=m.props&&m.props[c];return b&&(i(l[c],{route:u,configProps:b}),C(g,a,u,b)),s(g,a,r)}};function C(t,e,n,r){var o=e.props=k(n,r);if(o){o=e.props=i({},o);var a=e.attrs=e.attrs||{};for(var s in o)t.props&&s in t.props||(a[s]=o[s],delete o[s])}}function k(t,e){switch(typeof e){case"undefined":return;case"object":return e;case"function":return e(t);case"boolean":return e?t.params:void 0;default:0}}function j(t,e,n){var i=t.charAt(0);if("/"===i)return t;if("?"===i||"#"===i)return e+t;var r=e.split("/");n&&r[r.length-1]||r.pop();for(var o=t.replace(/^\//,"").split("/"),a=0;a<o.length;a++){var s=o[a];".."===s?r.pop():"."!==s&&r.push(s)}return""!==r[0]&&r.unshift(""),r.join("/")}function $(t){var e="",n="",i=t.indexOf("#");i>=0&&(e=t.slice(i),t=t.slice(0,i));var r=t.indexOf("?");return r>=0&&(n=t.slice(r+1),t=t.slice(0,r)),{path:t,query:n,hash:e}}function A(t){return t.replace(/\/+/g,"/")}var E=Array.isArray||function(t){return"[object Array]"==Object.prototype.toString.call(t)},T=K,L=D,I=R,B=F,M=Z,P=new RegExp(["(\\\\.)","([\\/.])?(?:(?:\\:(\\w+)(?:\\(((?:\\\\.|[^\\\\()])+)\\))?|\\(((?:\\\\.|[^\\\\()])+)\\))([+*?])?|(\\*))"].join("|"),"g");function D(t,e){var n,i=[],r=0,o=0,a="",s=e&&e.delimiter||"/";while(null!=(n=P.exec(t))){var c=n[0],u=n[1],l=n.index;if(a+=t.slice(o,l),o=l+c.length,u)a+=u[1];else{var f=t[o],h=n[2],d=n[3],p=n[4],v=n[5],m=n[6],g=n[7];a&&(i.push(a),a="");var b=null!=h&&null!=f&&f!==h,y="+"===m||"*"===m,x="?"===m||"*"===m,w=n[2]||s,O=p||v;i.push({name:d||r++,prefix:h||"",delimiter:w,optional:x,repeat:y,partial:b,asterisk:!!g,pattern:O?H(O):g?".*":"[^"+z(w)+"]+?"})}}return o<t.length&&(a+=t.substr(o)),a&&i.push(a),i}function R(t,e){return F(D(t,e),e)}function N(t){return encodeURI(t).replace(/[\/?#]/g,(function(t){return"%"+t.charCodeAt(0).toString(16).toUpperCase()}))}function V(t){return encodeURI(t).replace(/[?#]/g,(function(t){return"%"+t.charCodeAt(0).toString(16).toUpperCase()}))}function F(t,e){for(var n=new Array(t.length),i=0;i<t.length;i++)"object"===typeof t[i]&&(n[i]=new RegExp("^(?:"+t[i].pattern+")$",U(e)));return function(e,i){for(var r="",o=e||{},a=i||{},s=a.pretty?N:encodeURIComponent,c=0;c<t.length;c++){var u=t[c];if("string"!==typeof u){var l,f=o[u.name];if(null==f){if(u.optional){u.partial&&(r+=u.prefix);continue}throw new TypeError('Expected "'+u.name+'" to be defined')}if(E(f)){if(!u.repeat)throw new TypeError('Expected "'+u.name+'" to not repeat, but received `'+JSON.stringify(f)+"`");if(0===f.length){if(u.optional)continue;throw new TypeError('Expected "'+u.name+'" to not be empty')}for(var h=0;h<f.length;h++){if(l=s(f[h]),!n[c].test(l))throw new TypeError('Expected all "'+u.name+'" to match "'+u.pattern+'", but received `'+JSON.stringify(l)+"`");r+=(0===h?u.prefix:u.delimiter)+l}}else{if(l=u.asterisk?V(f):s(f),!n[c].test(l))throw new TypeError('Expected "'+u.name+'" to match "'+u.pattern+'", but received "'+l+'"');r+=u.prefix+l}}else r+=u}return r}}function z(t){return t.replace(/([.+*?=^!:${}()[\]|\/\\])/g,"\\$1")}function H(t){return t.replace(/([=!:$\/()])/g,"\\$1")}function W(t,e){return t.keys=e,t}function U(t){return t&&t.sensitive?"":"i"}function q(t,e){var n=t.source.match(/\((?!\?)/g);if(n)for(var i=0;i<n.length;i++)e.push({name:i,prefix:null,delimiter:null,optional:!1,repeat:!1,partial:!1,asterisk:!1,pattern:null});return W(t,e)}function G(t,e,n){for(var i=[],r=0;r<t.length;r++)i.push(K(t[r],e,n).source);var o=new RegExp("(?:"+i.join("|")+")",U(n));return W(o,e)}function Y(t,e,n){return Z(D(t,n),e,n)}function Z(t,e,n){E(e)||(n=e||n,e=[]),n=n||{};for(var i=n.strict,r=!1!==n.end,o="",a=0;a<t.length;a++){var s=t[a];if("string"===typeof s)o+=z(s);else{var c=z(s.prefix),u="(?:"+s.pattern+")";e.push(s),s.repeat&&(u+="(?:"+c+u+")*"),u=s.optional?s.partial?c+"("+u+")?":"(?:"+c+"("+u+"))?":c+"("+u+")",o+=u}}var l=z(n.delimiter||"/"),f=o.slice(-l.length)===l;return i||(o=(f?o.slice(0,-l.length):o)+"(?:"+l+"(?=$))?"),o+=r?"$":i&&f?"":"(?="+l+"|$)",W(new RegExp("^"+o,U(n)),e)}function K(t,e,n){return E(e)||(n=e||n,e=[]),n=n||{},t instanceof RegExp?q(t,e):E(t)?G(t,e,n):Y(t,e,n)}T.parse=L,T.compile=I,T.tokensToFunction=B,T.tokensToRegExp=M;var X=Object.create(null);function J(t,e,n){e=e||{};try{var i=X[t]||(X[t]=T.compile(t));return"string"===typeof e.pathMatch&&(e[0]=e.pathMatch),i(e,{pretty:!0})}catch(r){return""}finally{delete e[0]}}function Q(t,e,n,r){var o="string"===typeof t?{path:t}:t;if(o._normalized)return o;if(o.name){o=i({},t);var a=o.params;return a&&"object"===typeof a&&(o.params=i({},a)),o}if(!o.path&&o.params&&e){o=i({},o),o._normalized=!0;var s=i(i({},e.params),o.params);if(e.name)o.name=e.name,o.params=s;else if(e.matched.length){var c=e.matched[e.matched.length-1].path;o.path=J(c,s,"path "+e.path)}else 0;return o}var l=$(o.path||""),f=e&&e.path||"/",h=l.path?j(l.path,f,n||o.append):f,d=u(l.query,o.query,r&&r.options.parseQuery),p=o.hash||l.hash;return p&&"#"!==p.charAt(0)&&(p="#"+p),{_normalized:!0,path:h,query:d,hash:p}}var tt,et=[String,Object],nt=[String,Array],it=function(){},rt={name:"RouterLink",props:{to:{type:et,required:!0},tag:{type:String,default:"a"},custom:Boolean,exact:Boolean,exactPath:Boolean,append:Boolean,replace:Boolean,activeClass:String,exactActiveClass:String,ariaCurrentValue:{type:String,default:"page"},event:{type:nt,default:"click"}},render:function(t){var e=this,n=this.$router,r=this.$route,o=n.resolve(this.to,r,this.append),a=o.location,s=o.route,c=o.href,u={},l=n.options.linkActiveClass,f=n.options.linkExactActiveClass,h=null==l?"router-link-active":l,d=null==f?"router-link-exact-active":f,v=null==this.activeClass?h:this.activeClass,m=null==this.exactActiveClass?d:this.exactActiveClass,g=s.redirectedFrom?p(null,Q(s.redirectedFrom),null,n):s;u[m]=y(r,g,this.exactPath),u[v]=this.exact||this.exactPath?u[m]:w(r,g);var b=u[m]?this.ariaCurrentValue:null,x=function(t){ot(t)&&(e.replace?n.replace(a,it):n.push(a,it))},O={click:ot};Array.isArray(this.event)?this.event.forEach((function(t){O[t]=x})):O[this.event]=x;var _={class:u},S=!this.$scopedSlots.$hasNormal&&this.$scopedSlots.default&&this.$scopedSlots.default({href:c,route:s,navigate:x,isActive:u[v],isExactActive:u[m]});if(S){if(1===S.length)return S[0];if(S.length>1||!S.length)return 0===S.length?t():t("span",{},S)}if("a"===this.tag)_.on=O,_.attrs={href:c,"aria-current":b};else{var C=at(this.$slots.default);if(C){C.isStatic=!1;var k=C.data=i({},C.data);for(var j in k.on=k.on||{},k.on){var $=k.on[j];j in O&&(k.on[j]=Array.isArray($)?$:[$])}for(var A in O)A in k.on?k.on[A].push(O[A]):k.on[A]=x;var E=C.data.attrs=i({},C.data.attrs);E.href=c,E["aria-current"]=b}else _.on=O}return t(this.tag,_,this.$slots.default)}};function ot(t){if(!(t.metaKey||t.altKey||t.ctrlKey||t.shiftKey)&&!t.defaultPrevented&&(void 0===t.button||0===t.button)){if(t.currentTarget&&t.currentTarget.getAttribute){var e=t.currentTarget.getAttribute("target");if(/\b_blank\b/i.test(e))return}return t.preventDefault&&t.preventDefault(),!0}}function at(t){if(t)for(var e,n=0;n<t.length;n++){if(e=t[n],"a"===e.tag)return e;if(e.children&&(e=at(e.children)))return e}}function st(t){if(!st.installed||tt!==t){st.installed=!0,tt=t;var e=function(t){return void 0!==t},n=function(t,n){var i=t.$options._parentVnode;e(i)&&e(i=i.data)&&e(i=i.registerRouteInstance)&&i(t,n)};t.mixin({beforeCreate:function(){e(this.$options.router)?(this._routerRoot=this,this._router=this.$options.router,this._router.init(this),t.util.defineReactive(this,"_route",this._router.history.current)):this._routerRoot=this.$parent&&this.$parent._routerRoot||this,n(this,this)},destroyed:function(){n(this)}}),Object.defineProperty(t.prototype,"$router",{get:function(){return this._routerRoot._router}}),Object.defineProperty(t.prototype,"$route",{get:function(){return this._routerRoot._route}}),t.component("RouterView",S),t.component("RouterLink",rt);var i=t.config.optionMergeStrategies;i.beforeRouteEnter=i.beforeRouteLeave=i.beforeRouteUpdate=i.created}}var ct="undefined"!==typeof window;function ut(t,e,n,i,r){var o=e||[],a=n||Object.create(null),s=i||Object.create(null);t.forEach((function(t){lt(o,a,s,t,r)}));for(var c=0,u=o.length;c<u;c++)"*"===o[c]&&(o.push(o.splice(c,1)[0]),u--,c--);return{pathList:o,pathMap:a,nameMap:s}}function lt(t,e,n,i,r,o){var a=i.path,s=i.name;var c=i.pathToRegexpOptions||{},u=ht(a,r,c.strict);"boolean"===typeof i.caseSensitive&&(c.sensitive=i.caseSensitive);var l={path:u,regex:ft(u,c),components:i.components||{default:i.component},alias:i.alias?"string"===typeof i.alias?[i.alias]:i.alias:[],instances:{},enteredCbs:{},name:s,parent:r,matchAs:o,redirect:i.redirect,beforeEnter:i.beforeEnter,meta:i.meta||{},props:null==i.props?{}:i.components?i.props:{default:i.props}};if(i.children&&i.children.forEach((function(i){var r=o?A(o+"/"+i.path):void 0;lt(t,e,n,i,l,r)})),e[l.path]||(t.push(l.path),e[l.path]=l),void 0!==i.alias)for(var f=Array.isArray(i.alias)?i.alias:[i.alias],h=0;h<f.length;++h){var d=f[h];0;var p={path:d,children:i.children};lt(t,e,n,p,r,l.path||"/")}s&&(n[s]||(n[s]=l))}function ft(t,e){var n=T(t,[],e);return n}function ht(t,e,n){return n||(t=t.replace(/\/$/,"")),"/"===t[0]||null==e?t:A(e.path+"/"+t)}function dt(t,e){var n=ut(t),i=n.pathList,r=n.pathMap,o=n.nameMap;function a(t){ut(t,i,r,o)}function s(t,e){var n="object"!==typeof t?o[t]:void 0;ut([e||t],i,r,o,n),n&&n.alias.length&&ut(n.alias.map((function(t){return{path:t,children:[e]}})),i,r,o,n)}function c(){return i.map((function(t){return r[t]}))}function u(t,n,a){var s=Q(t,n,!1,e),c=s.name;if(c){var u=o[c];if(!u)return h(null,s);var l=u.regex.keys.filter((function(t){return!t.optional})).map((function(t){return t.name}));if("object"!==typeof s.params&&(s.params={}),n&&"object"===typeof n.params)for(var f in n.params)!(f in s.params)&&l.indexOf(f)>-1&&(s.params[f]=n.params[f]);return s.path=J(u.path,s.params,'named route "'+c+'"'),h(u,s,a)}if(s.path){s.params={};for(var d=0;d<i.length;d++){var p=i[d],v=r[p];if(pt(v.regex,s.path,s.params))return h(v,s,a)}}return h(null,s)}function l(t,n){var i=t.redirect,r="function"===typeof i?i(p(t,n,null,e)):i;if("string"===typeof r&&(r={path:r}),!r||"object"!==typeof r)return h(null,n);var a=r,s=a.name,c=a.path,l=n.query,f=n.hash,d=n.params;if(l=a.hasOwnProperty("query")?a.query:l,f=a.hasOwnProperty("hash")?a.hash:f,d=a.hasOwnProperty("params")?a.params:d,s){o[s];return u({_normalized:!0,name:s,query:l,hash:f,params:d},void 0,n)}if(c){var v=vt(c,t),m=J(v,d,'redirect route with path "'+v+'"');return u({_normalized:!0,path:m,query:l,hash:f},void 0,n)}return h(null,n)}function f(t,e,n){var i=J(n,e.params,'aliased route with path "'+n+'"'),r=u({_normalized:!0,path:i});if(r){var o=r.matched,a=o[o.length-1];return e.params=r.params,h(a,e)}return h(null,e)}function h(t,n,i){return t&&t.redirect?l(t,i||n):t&&t.matchAs?f(t,n,t.matchAs):p(t,n,i,e)}return{match:u,addRoute:s,getRoutes:c,addRoutes:a}}function pt(t,e,n){var i=e.match(t);if(!i)return!1;if(!n)return!0;for(var r=1,o=i.length;r<o;++r){var a=t.keys[r-1];a&&(n[a.name||"pathMatch"]="string"===typeof i[r]?c(i[r]):i[r])}return!0}function vt(t,e){return j(t,e.parent?e.parent.path:"/",!0)}var mt=ct&&window.performance&&window.performance.now?window.performance:Date;function gt(){return mt.now().toFixed(3)}var bt=gt();function yt(){return bt}function xt(t){return bt=t}var wt=Object.create(null);function Ot(){"scrollRestoration"in window.history&&(window.history.scrollRestoration="manual");var t=window.location.protocol+"//"+window.location.host,e=window.location.href.replace(t,""),n=i({},window.history.state);return n.key=yt(),window.history.replaceState(n,"",e),window.addEventListener("popstate",Ct),function(){window.removeEventListener("popstate",Ct)}}function _t(t,e,n,i){if(t.app){var r=t.options.scrollBehavior;r&&t.app.$nextTick((function(){var o=kt(),a=r.call(t,e,n,i?o:null);a&&("function"===typeof a.then?a.then((function(t){It(t,o)})).catch((function(t){0})):It(a,o))}))}}function St(){var t=yt();t&&(wt[t]={x:window.pageXOffset,y:window.pageYOffset})}function Ct(t){St(),t.state&&t.state.key&&xt(t.state.key)}function kt(){var t=yt();if(t)return wt[t]}function jt(t,e){var n=document.documentElement,i=n.getBoundingClientRect(),r=t.getBoundingClientRect();return{x:r.left-i.left-e.x,y:r.top-i.top-e.y}}function $t(t){return Tt(t.x)||Tt(t.y)}function At(t){return{x:Tt(t.x)?t.x:window.pageXOffset,y:Tt(t.y)?t.y:window.pageYOffset}}function Et(t){return{x:Tt(t.x)?t.x:0,y:Tt(t.y)?t.y:0}}function Tt(t){return"number"===typeof t}var Lt=/^#\d/;function It(t,e){var n="object"===typeof t;if(n&&"string"===typeof t.selector){var i=Lt.test(t.selector)?document.getElementById(t.selector.slice(1)):document.querySelector(t.selector);if(i){var r=t.offset&&"object"===typeof t.offset?t.offset:{};r=Et(r),e=jt(i,r)}else $t(t)&&(e=At(t))}else n&&$t(t)&&(e=At(t));e&&("scrollBehavior"in document.documentElement.style?window.scrollTo({left:e.x,top:e.y,behavior:t.behavior}):window.scrollTo(e.x,e.y))}var Bt=ct&&function(){var t=window.navigator.userAgent;return(-1===t.indexOf("Android 2.")&&-1===t.indexOf("Android 4.0")||-1===t.indexOf("Mobile Safari")||-1!==t.indexOf("Chrome")||-1!==t.indexOf("Windows Phone"))&&(window.history&&"function"===typeof window.history.pushState)}();function Mt(t,e){St();var n=window.history;try{if(e){var r=i({},n.state);r.key=yt(),n.replaceState(r,"",t)}else n.pushState({key:xt(gt())},"",t)}catch(o){window.location[e?"replace":"assign"](t)}}function Pt(t){Mt(t,!0)}function Dt(t,e,n){var i=function(r){r>=t.length?n():t[r]?e(t[r],(function(){i(r+1)})):i(r+1)};i(0)}var Rt={redirected:2,aborted:4,cancelled:8,duplicated:16};function Nt(t,e){return Ht(t,e,Rt.redirected,'Redirected when going from "'+t.fullPath+'" to "'+Ut(e)+'" via a navigation guard.')}function Vt(t,e){var n=Ht(t,e,Rt.duplicated,'Avoided redundant navigation to current location: "'+t.fullPath+'".');return n.name="NavigationDuplicated",n}function Ft(t,e){return Ht(t,e,Rt.cancelled,'Navigation cancelled from "'+t.fullPath+'" to "'+e.fullPath+'" with a new navigation.')}function zt(t,e){return Ht(t,e,Rt.aborted,'Navigation aborted from "'+t.fullPath+'" to "'+e.fullPath+'" via a navigation guard.')}function Ht(t,e,n,i){var r=new Error(i);return r._isRouter=!0,r.from=t,r.to=e,r.type=n,r}var Wt=["params","query","hash"];function Ut(t){if("string"===typeof t)return t;if("path"in t)return t.path;var e={};return Wt.forEach((function(n){n in t&&(e[n]=t[n])})),JSON.stringify(e,null,2)}function qt(t){return Object.prototype.toString.call(t).indexOf("Error")>-1}function Gt(t,e){return qt(t)&&t._isRouter&&(null==e||t.type===e)}function Yt(t){return function(e,n,i){var r=!1,o=0,a=null;Zt(t,(function(t,e,n,s){if("function"===typeof t&&void 0===t.cid){r=!0,o++;var c,u=Qt((function(e){Jt(e)&&(e=e.default),t.resolved="function"===typeof e?e:tt.extend(e),n.components[s]=e,o--,o<=0&&i()})),l=Qt((function(t){var e="Failed to resolve async component "+s+": "+t;a||(a=qt(t)?t:new Error(e),i(a))}));try{c=t(u,l)}catch(h){l(h)}if(c)if("function"===typeof c.then)c.then(u,l);else{var f=c.component;f&&"function"===typeof f.then&&f.then(u,l)}}})),r||i()}}function Zt(t,e){return Kt(t.map((function(t){return Object.keys(t.components).map((function(n){return e(t.components[n],t.instances[n],t,n)}))})))}function Kt(t){return Array.prototype.concat.apply([],t)}var Xt="function"===typeof Symbol&&"symbol"===typeof Symbol.toStringTag;function Jt(t){return t.__esModule||Xt&&"Module"===t[Symbol.toStringTag]}function Qt(t){var e=!1;return function(){var n=[],i=arguments.length;while(i--)n[i]=arguments[i];if(!e)return e=!0,t.apply(this,n)}}var te=function(t,e){this.router=t,this.base=ee(e),this.current=m,this.pending=null,this.ready=!1,this.readyCbs=[],this.readyErrorCbs=[],this.errorCbs=[],this.listeners=[]};function ee(t){if(!t)if(ct){var e=document.querySelector("base");t=e&&e.getAttribute("href")||"/",t=t.replace(/^https?:\/\/[^\/]+/,"")}else t="/";return"/"!==t.charAt(0)&&(t="/"+t),t.replace(/\/$/,"")}function ne(t,e){var n,i=Math.max(t.length,e.length);for(n=0;n<i;n++)if(t[n]!==e[n])break;return{updated:e.slice(0,n),activated:e.slice(n),deactivated:t.slice(n)}}function ie(t,e,n,i){var r=Zt(t,(function(t,i,r,o){var a=re(t,e);if(a)return Array.isArray(a)?a.map((function(t){return n(t,i,r,o)})):n(a,i,r,o)}));return Kt(i?r.reverse():r)}function re(t,e){return"function"!==typeof t&&(t=tt.extend(t)),t.options[e]}function oe(t){return ie(t,"beforeRouteLeave",se,!0)}function ae(t){return ie(t,"beforeRouteUpdate",se)}function se(t,e){if(e)return function(){return t.apply(e,arguments)}}function ce(t){return ie(t,"beforeRouteEnter",(function(t,e,n,i){return ue(t,n,i)}))}function ue(t,e,n){return function(i,r,o){return t(i,r,(function(t){"function"===typeof t&&(e.enteredCbs[n]||(e.enteredCbs[n]=[]),e.enteredCbs[n].push(t)),o(t)}))}}te.prototype.listen=function(t){this.cb=t},te.prototype.onReady=function(t,e){this.ready?t():(this.readyCbs.push(t),e&&this.readyErrorCbs.push(e))},te.prototype.onError=function(t){this.errorCbs.push(t)},te.prototype.transitionTo=function(t,e,n){var i,r=this;try{i=this.router.match(t,this.current)}catch(a){throw this.errorCbs.forEach((function(t){t(a)})),a}var o=this.current;this.confirmTransition(i,(function(){r.updateRoute(i),e&&e(i),r.ensureURL(),r.router.afterHooks.forEach((function(t){t&&t(i,o)})),r.ready||(r.ready=!0,r.readyCbs.forEach((function(t){t(i)})))}),(function(t){n&&n(t),t&&!r.ready&&(Gt(t,Rt.redirected)&&o===m||(r.ready=!0,r.readyErrorCbs.forEach((function(e){e(t)}))))}))},te.prototype.confirmTransition=function(t,e,n){var i=this,r=this.current;this.pending=t;var o=function(t){!Gt(t)&&qt(t)&&(i.errorCbs.length?i.errorCbs.forEach((function(e){e(t)})):console.error(t)),n&&n(t)},a=t.matched.length-1,s=r.matched.length-1;if(y(t,r)&&a===s&&t.matched[a]===r.matched[s])return this.ensureURL(),t.hash&&_t(this.router,r,t,!1),o(Vt(r,t));var c=ne(this.current.matched,t.matched),u=c.updated,l=c.deactivated,f=c.activated,h=[].concat(oe(l),this.router.beforeHooks,ae(u),f.map((function(t){return t.beforeEnter})),Yt(f)),d=function(e,n){if(i.pending!==t)return o(Ft(r,t));try{e(t,r,(function(e){!1===e?(i.ensureURL(!0),o(zt(r,t))):qt(e)?(i.ensureURL(!0),o(e)):"string"===typeof e||"object"===typeof e&&("string"===typeof e.path||"string"===typeof e.name)?(o(Nt(r,t)),"object"===typeof e&&e.replace?i.replace(e):i.push(e)):n(e)}))}catch(a){o(a)}};Dt(h,d,(function(){var n=ce(f),a=n.concat(i.router.resolveHooks);Dt(a,d,(function(){if(i.pending!==t)return o(Ft(r,t));i.pending=null,e(t),i.router.app&&i.router.app.$nextTick((function(){_(t)}))}))}))},te.prototype.updateRoute=function(t){this.current=t,this.cb&&this.cb(t)},te.prototype.setupListeners=function(){},te.prototype.teardown=function(){this.listeners.forEach((function(t){t()})),this.listeners=[],this.current=m,this.pending=null};var le=function(t){function e(e,n){t.call(this,e,n),this._startLocation=fe(this.base)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.setupListeners=function(){var t=this;if(!(this.listeners.length>0)){var e=this.router,n=e.options.scrollBehavior,i=Bt&&n;i&&this.listeners.push(Ot());var r=function(){var n=t.current,r=fe(t.base);t.current===m&&r===t._startLocation||t.transitionTo(r,(function(t){i&&_t(e,t,n,!0)}))};window.addEventListener("popstate",r),this.listeners.push((function(){window.removeEventListener("popstate",r)}))}},e.prototype.go=function(t){window.history.go(t)},e.prototype.push=function(t,e,n){var i=this,r=this,o=r.current;this.transitionTo(t,(function(t){Mt(A(i.base+t.fullPath)),_t(i.router,t,o,!1),e&&e(t)}),n)},e.prototype.replace=function(t,e,n){var i=this,r=this,o=r.current;this.transitionTo(t,(function(t){Pt(A(i.base+t.fullPath)),_t(i.router,t,o,!1),e&&e(t)}),n)},e.prototype.ensureURL=function(t){if(fe(this.base)!==this.current.fullPath){var e=A(this.base+this.current.fullPath);t?Mt(e):Pt(e)}},e.prototype.getCurrentLocation=function(){return fe(this.base)},e}(te);function fe(t){var e=window.location.pathname,n=e.toLowerCase(),i=t.toLowerCase();return!t||n!==i&&0!==n.indexOf(A(i+"/"))||(e=e.slice(t.length)),(e||"/")+window.location.search+window.location.hash}var he=function(t){function e(e,n,i){t.call(this,e,n),i&&de(this.base)||pe()}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.setupListeners=function(){var t=this;if(!(this.listeners.length>0)){var e=this.router,n=e.options.scrollBehavior,i=Bt&&n;i&&this.listeners.push(Ot());var r=function(){var e=t.current;pe()&&t.transitionTo(ve(),(function(n){i&&_t(t.router,n,e,!0),Bt||be(n.fullPath)}))},o=Bt?"popstate":"hashchange";window.addEventListener(o,r),this.listeners.push((function(){window.removeEventListener(o,r)}))}},e.prototype.push=function(t,e,n){var i=this,r=this,o=r.current;this.transitionTo(t,(function(t){ge(t.fullPath),_t(i.router,t,o,!1),e&&e(t)}),n)},e.prototype.replace=function(t,e,n){var i=this,r=this,o=r.current;this.transitionTo(t,(function(t){be(t.fullPath),_t(i.router,t,o,!1),e&&e(t)}),n)},e.prototype.go=function(t){window.history.go(t)},e.prototype.ensureURL=function(t){var e=this.current.fullPath;ve()!==e&&(t?ge(e):be(e))},e.prototype.getCurrentLocation=function(){return ve()},e}(te);function de(t){var e=fe(t);if(!/^\/#/.test(e))return window.location.replace(A(t+"/#"+e)),!0}function pe(){var t=ve();return"/"===t.charAt(0)||(be("/"+t),!1)}function ve(){var t=window.location.href,e=t.indexOf("#");return e<0?"":(t=t.slice(e+1),t)}function me(t){var e=window.location.href,n=e.indexOf("#"),i=n>=0?e.slice(0,n):e;return i+"#"+t}function ge(t){Bt?Mt(me(t)):window.location.hash=t}function be(t){Bt?Pt(me(t)):window.location.replace(me(t))}var ye=function(t){function e(e,n){t.call(this,e,n),this.stack=[],this.index=-1}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.push=function(t,e,n){var i=this;this.transitionTo(t,(function(t){i.stack=i.stack.slice(0,i.index+1).concat(t),i.index++,e&&e(t)}),n)},e.prototype.replace=function(t,e,n){var i=this;this.transitionTo(t,(function(t){i.stack=i.stack.slice(0,i.index).concat(t),e&&e(t)}),n)},e.prototype.go=function(t){var e=this,n=this.index+t;if(!(n<0||n>=this.stack.length)){var i=this.stack[n];this.confirmTransition(i,(function(){var t=e.current;e.index=n,e.updateRoute(i),e.router.afterHooks.forEach((function(e){e&&e(i,t)}))}),(function(t){Gt(t,Rt.duplicated)&&(e.index=n)}))}},e.prototype.getCurrentLocation=function(){var t=this.stack[this.stack.length-1];return t?t.fullPath:"/"},e.prototype.ensureURL=function(){},e}(te),xe=function(t){void 0===t&&(t={}),this.app=null,this.apps=[],this.options=t,this.beforeHooks=[],this.resolveHooks=[],this.afterHooks=[],this.matcher=dt(t.routes||[],this);var e=t.mode||"hash";switch(this.fallback="history"===e&&!Bt&&!1!==t.fallback,this.fallback&&(e="hash"),ct||(e="abstract"),this.mode=e,e){case"history":this.history=new le(this,t.base);break;case"hash":this.history=new he(this,t.base,this.fallback);break;case"abstract":this.history=new ye(this,t.base);break;default:0}},we={currentRoute:{configurable:!0}};function Oe(t,e){return t.push(e),function(){var n=t.indexOf(e);n>-1&&t.splice(n,1)}}function _e(t,e,n){var i="hash"===n?"#"+e:e;return t?A(t+"/"+i):i}xe.prototype.match=function(t,e,n){return this.matcher.match(t,e,n)},we.currentRoute.get=function(){return this.history&&this.history.current},xe.prototype.init=function(t){var e=this;if(this.apps.push(t),t.$once("hook:destroyed",(function(){var n=e.apps.indexOf(t);n>-1&&e.apps.splice(n,1),e.app===t&&(e.app=e.apps[0]||null),e.app||e.history.teardown()})),!this.app){this.app=t;var n=this.history;if(n instanceof le||n instanceof he){var i=function(t){var i=n.current,r=e.options.scrollBehavior,o=Bt&&r;o&&"fullPath"in t&&_t(e,t,i,!1)},r=function(t){n.setupListeners(),i(t)};n.transitionTo(n.getCurrentLocation(),r,r)}n.listen((function(t){e.apps.forEach((function(e){e._route=t}))}))}},xe.prototype.beforeEach=function(t){return Oe(this.beforeHooks,t)},xe.prototype.beforeResolve=function(t){return Oe(this.resolveHooks,t)},xe.prototype.afterEach=function(t){return Oe(this.afterHooks,t)},xe.prototype.onReady=function(t,e){this.history.onReady(t,e)},xe.prototype.onError=function(t){this.history.onError(t)},xe.prototype.push=function(t,e,n){var i=this;if(!e&&!n&&"undefined"!==typeof Promise)return new Promise((function(e,n){i.history.push(t,e,n)}));this.history.push(t,e,n)},xe.prototype.replace=function(t,e,n){var i=this;if(!e&&!n&&"undefined"!==typeof Promise)return new Promise((function(e,n){i.history.replace(t,e,n)}));this.history.replace(t,e,n)},xe.prototype.go=function(t){this.history.go(t)},xe.prototype.back=function(){this.go(-1)},xe.prototype.forward=function(){this.go(1)},xe.prototype.getMatchedComponents=function(t){var e=t?t.matched?t:this.resolve(t).route:this.currentRoute;return e?[].concat.apply([],e.matched.map((function(t){return Object.keys(t.components).map((function(e){return t.components[e]}))}))):[]},xe.prototype.resolve=function(t,e,n){e=e||this.history.current;var i=Q(t,e,n,this),r=this.match(i,e),o=r.redirectedFrom||r.fullPath,a=this.history.base,s=_e(a,o,this.mode);return{location:i,route:r,href:s,normalizedTo:i,resolved:r}},xe.prototype.getRoutes=function(){return this.matcher.getRoutes()},xe.prototype.addRoute=function(t,e){this.matcher.addRoute(t,e),this.history.current!==m&&this.history.transitionTo(this.history.getCurrentLocation())},xe.prototype.addRoutes=function(t){this.matcher.addRoutes(t),this.history.current!==m&&this.history.transitionTo(this.history.getCurrentLocation())},Object.defineProperties(xe.prototype,we),xe.install=st,xe.version="3.5.3",xe.isNavigationFailure=Gt,xe.NavigationFailureType=Rt,xe.START_LOCATION=m,ct&&window.Vue&&window.Vue.use(xe),e["a"]=xe},"8ce9":function(t,e,n){},"8d4f":function(t,e,n){},"8da5":function(t,e,n){"use strict";n.d(e,"a",(function(){return c})),n.d(e,"b",(function(){return u}));var i=n("80d2"),r=[[3.2406,-1.5372,-.4986],[-.9689,1.8758,.0415],[.0557,-.204,1.057]],o=function(t){return t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055},a=[[.4124,.3576,.1805],[.2126,.7152,.0722],[.0193,.1192,.9505]],s=function(t){return t<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)};function c(t){for(var e=Array(3),n=o,a=r,s=0;s<3;++s)e[s]=Math.round(255*Object(i["c"])(n(a[s][0]*t[0]+a[s][1]*t[1]+a[s][2]*t[2])));return(e[0]<<16)+(e[1]<<8)+(e[2]<<0)}function u(t){for(var e=[0,0,0],n=s,i=a,r=n((t>>16&255)/255),o=n((t>>8&255)/255),c=n((t>>0&255)/255),u=0;u<3;++u)e[u]=i[u][0]*r+i[u][1]*o+i[u][2]*c;return e}},"8dd9":function(t,e,n){"use strict";var i=n("5530"),r=(n("25a8"),n("7e2b")),o=n("a9ad"),a=n("c995"),s=n("24b2"),c=n("a236"),u=n("7560"),l=n("58df");e["a"]=Object(l["a"])(r["a"],o["a"],a["a"],s["a"],c["a"],u["a"]).extend({name:"v-sheet",props:{outlined:Boolean,shaped:Boolean,tag:{type:String,default:"div"}},computed:{classes:function(){return Object(i["a"])(Object(i["a"])(Object(i["a"])({"v-sheet":!0,"v-sheet--outlined":this.outlined,"v-sheet--shaped":this.shaped},this.themeClasses),this.elevationClasses),this.roundedClasses)},styles:function(){return this.measurableStyles}},render:function(t){var e={class:this.classes,style:this.styles,on:this.listeners$};return t(this.tag,this.setBackgroundColor(this.color,e),this.$slots.default)}})},"8df4":function(t,e,n){"use strict";var i=n("7a77");function r(t){if("function"!==typeof t)throw new TypeError("executor must be a function.");var e;this.promise=new Promise((function(t){e=t}));var n=this;t((function(t){n.reason||(n.reason=new i(t),e(n.reason))}))}r.prototype.throwIfRequested=function(){if(this.reason)throw this.reason},r.source=function(){var t,e=new r((function(e){t=e}));return{token:e,cancel:t}},t.exports=r},"8efc":function(t,e,n){},"8ff2":function(t,e,n){},"90a2":function(t,e,n){"use strict";var i=n("53ca");n("d3b7");function r(t,e,n){if("undefined"!==typeof window&&"IntersectionObserver"in window){var r=e.modifiers||{},a=e.value,s="object"===Object(i["a"])(a)?a:{handler:a,options:{}},c=s.handler,u=s.options,l=new IntersectionObserver((function(){var i,a=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],s=arguments.length>1?arguments[1]:void 0,u=null==(i=t._observe)?void 0:i[n.context._uid];if(u){var l=a.some((function(t){return t.isIntersecting}));!c||r.quiet&&!u.init||r.once&&!l&&!u.init||c(a,s,l),l&&r.once?o(t,e,n):u.init=!0}}),u);t._observe=Object(t._observe),t._observe[n.context._uid]={init:!1,observer:l},l.observe(t)}}function o(t,e,n){var i,r=null==(i=t._observe)?void 0:i[n.context._uid];r&&(r.observer.unobserve(t),delete t._observe[n.context._uid])}var a={inserted:r,unbind:o};e["a"]=a},"90d8":function(t,e,n){var i=n("c65b"),r=n("1a2d"),o=n("3a9b"),a=n("ad6d"),s=RegExp.prototype;t.exports=function(t){var e=t.flags;return void 0!==e||"flags"in s||r(t,"flags")||!o(s,t)?e:i(a,t)}},"90e3":function(t,e,n){var i=n("e330"),r=0,o=Math.random(),a=i(1..toString);t.exports=function(t){return"Symbol("+(void 0===t?"":t)+")_"+a(++r+o,36)}},9112:function(t,e,n){var i=n("83ab"),r=n("9bf2"),o=n("5c6c");t.exports=i?function(t,e,n){return r.f(t,e,o(1,n))}:function(t,e,n){return t[e]=n,t}},9263:function(t,e,n){"use strict";var i=n("c65b"),r=n("e330"),o=n("577e"),a=n("ad6d"),s=n("9f7f"),c=n("5692"),u=n("7c73"),l=n("69f3").get,f=n("fce3"),h=n("107c"),d=c("native-string-replace",String.prototype.replace),p=RegExp.prototype.exec,v=p,m=r("".charAt),g=r("".indexOf),b=r("".replace),y=r("".slice),x=function(){var t=/a/,e=/b*/g;return i(p,t,"a"),i(p,e,"a"),0!==t.lastIndex||0!==e.lastIndex}(),w=s.BROKEN_CARET,O=void 0!==/()??/.exec("")[1],_=x||O||w||f||h;_&&(v=function(t){var e,n,r,s,c,f,h,_=this,S=l(_),C=o(t),k=S.raw;if(k)return k.lastIndex=_.lastIndex,e=i(v,k,C),_.lastIndex=k.lastIndex,e;var j=S.groups,$=w&&_.sticky,A=i(a,_),E=_.source,T=0,L=C;if($&&(A=b(A,"y",""),-1===g(A,"g")&&(A+="g"),L=y(C,_.lastIndex),_.lastIndex>0&&(!_.multiline||_.multiline&&"\n"!==m(C,_.lastIndex-1))&&(E="(?: "+E+")",L=" "+L,T++),n=new RegExp("^(?:"+E+")",A)),O&&(n=new RegExp("^"+E+"$(?!\\s)",A)),x&&(r=_.lastIndex),s=i(p,$?n:_,L),$?s?(s.input=y(s.input,T),s[0]=y(s[0],T),s.index=_.lastIndex,_.lastIndex+=s[0].length):_.lastIndex=0:x&&s&&(_.lastIndex=_.global?s.index+s[0].length:r),O&&s&&s.length>1&&i(d,s[0],n,(function(){for(c=1;c<arguments.length-2;c++)void 0===arguments[c]&&(s[c]=void 0)})),s&&j)for(s.groups=f=u(null),c=0;c<j.length;c++)h=j[c],f[h[0]]=s[h[1]];return s}),t.exports=v},"94ca":function(t,e,n){var i=n("d039"),r=n("1626"),o=/#|\.prototype\./,a=function(t,e){var n=c[s(t)];return n==l||n!=u&&(r(e)?i(e):!!e)},s=a.normalize=function(t){return String(t).replace(o,".").toLowerCase()},c=a.data={},u=a.NATIVE="N",l=a.POLYFILL="P";t.exports=a},"95ed":function(t,e,n){},"96cf":function(t,e,n){var i=function(t){"use strict";var e,n=Object.prototype,i=n.hasOwnProperty,r="function"===typeof Symbol?Symbol:{},o=r.iterator||"@@iterator",a=r.asyncIterator||"@@asyncIterator",s=r.toStringTag||"@@toStringTag";function c(t,e,n){return Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}),t[e]}try{c({},"")}catch(L){c=function(t,e,n){return t[e]=n}}function u(t,e,n,i){var r=e&&e.prototype instanceof m?e:m,o=Object.create(r.prototype),a=new A(i||[]);return o._invoke=C(t,n,a),o}function l(t,e,n){try{return{type:"normal",arg:t.call(e,n)}}catch(L){return{type:"throw",arg:L}}}t.wrap=u;var f="suspendedStart",h="suspendedYield",d="executing",p="completed",v={};function m(){}function g(){}function b(){}var y={};c(y,o,(function(){return this}));var x=Object.getPrototypeOf,w=x&&x(x(E([])));w&&w!==n&&i.call(w,o)&&(y=w);var O=b.prototype=m.prototype=Object.create(y);function _(t){["next","throw","return"].forEach((function(e){c(t,e,(function(t){return this._invoke(e,t)}))}))}function S(t,e){function n(r,o,a,s){var c=l(t[r],t,o);if("throw"!==c.type){var u=c.arg,f=u.value;return f&&"object"===typeof f&&i.call(f,"__await")?e.resolve(f.__await).then((function(t){n("next",t,a,s)}),(function(t){n("throw",t,a,s)})):e.resolve(f).then((function(t){u.value=t,a(u)}),(function(t){return n("throw",t,a,s)}))}s(c.arg)}var r;function o(t,i){function o(){return new e((function(e,r){n(t,i,e,r)}))}return r=r?r.then(o,o):o()}this._invoke=o}function C(t,e,n){var i=f;return function(r,o){if(i===d)throw new Error("Generator is already running");if(i===p){if("throw"===r)throw o;return T()}n.method=r,n.arg=o;while(1){var a=n.delegate;if(a){var s=k(a,n);if(s){if(s===v)continue;return s}}if("next"===n.method)n.sent=n._sent=n.arg;else if("throw"===n.method){if(i===f)throw i=p,n.arg;n.dispatchException(n.arg)}else"return"===n.method&&n.abrupt("return",n.arg);i=d;var c=l(t,e,n);if("normal"===c.type){if(i=n.done?p:h,c.arg===v)continue;return{value:c.arg,done:n.done}}"throw"===c.type&&(i=p,n.method="throw",n.arg=c.arg)}}}function k(t,n){var i=t.iterator[n.method];if(i===e){if(n.delegate=null,"throw"===n.method){if(t.iterator["return"]&&(n.method="return",n.arg=e,k(t,n),"throw"===n.method))return v;n.method="throw",n.arg=new TypeError("The iterator does not provide a 'throw' method")}return v}var r=l(i,t.iterator,n.arg);if("throw"===r.type)return n.method="throw",n.arg=r.arg,n.delegate=null,v;var o=r.arg;return o?o.done?(n[t.resultName]=o.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=e),n.delegate=null,v):o:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,v)}function j(t){var e={tryLoc:t[0]};1 in t&&(e.catchLoc=t[1]),2 in t&&(e.finallyLoc=t[2],e.afterLoc=t[3]),this.tryEntries.push(e)}function $(t){var e=t.completion||{};e.type="normal",delete e.arg,t.completion=e}function A(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(j,this),this.reset(!0)}function E(t){if(t){var n=t[o];if(n)return n.call(t);if("function"===typeof t.next)return t;if(!isNaN(t.length)){var r=-1,a=function n(){while(++r<t.length)if(i.call(t,r))return n.value=t[r],n.done=!1,n;return n.value=e,n.done=!0,n};return a.next=a}}return{next:T}}function T(){return{value:e,done:!0}}return g.prototype=b,c(O,"constructor",b),c(b,"constructor",g),g.displayName=c(b,s,"GeneratorFunction"),t.isGeneratorFunction=function(t){var e="function"===typeof t&&t.constructor;return!!e&&(e===g||"GeneratorFunction"===(e.displayName||e.name))},t.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,b):(t.__proto__=b,c(t,s,"GeneratorFunction")),t.prototype=Object.create(O),t},t.awrap=function(t){return{__await:t}},_(S.prototype),c(S.prototype,a,(function(){return this})),t.AsyncIterator=S,t.async=function(e,n,i,r,o){void 0===o&&(o=Promise);var a=new S(u(e,n,i,r),o);return t.isGeneratorFunction(n)?a:a.next().then((function(t){return t.done?t.value:a.next()}))},_(O),c(O,s,"Generator"),c(O,o,(function(){return this})),c(O,"toString",(function(){return"[object Generator]"})),t.keys=function(t){var e=[];for(var n in t)e.push(n);return e.reverse(),function n(){while(e.length){var i=e.pop();if(i in t)return n.value=i,n.done=!1,n}return n.done=!0,n}},t.values=E,A.prototype={constructor:A,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=e,this.done=!1,this.delegate=null,this.method="next",this.arg=e,this.tryEntries.forEach($),!t)for(var n in this)"t"===n.charAt(0)&&i.call(this,n)&&!isNaN(+n.slice(1))&&(this[n]=e)},stop:function(){this.done=!0;var t=this.tryEntries[0],e=t.completion;if("throw"===e.type)throw e.arg;return this.rval},dispatchException:function(t){if(this.done)throw t;var n=this;function r(i,r){return s.type="throw",s.arg=t,n.next=i,r&&(n.method="next",n.arg=e),!!r}for(var o=this.tryEntries.length-1;o>=0;--o){var a=this.tryEntries[o],s=a.completion;if("root"===a.tryLoc)return r("end");if(a.tryLoc<=this.prev){var c=i.call(a,"catchLoc"),u=i.call(a,"finallyLoc");if(c&&u){if(this.prev<a.catchLoc)return r(a.catchLoc,!0);if(this.prev<a.finallyLoc)return r(a.finallyLoc)}else if(c){if(this.prev<a.catchLoc)return r(a.catchLoc,!0)}else{if(!u)throw new Error("try statement without catch or finally");if(this.prev<a.finallyLoc)return r(a.finallyLoc)}}}},abrupt:function(t,e){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.tryLoc<=this.prev&&i.call(r,"finallyLoc")&&this.prev<r.finallyLoc){var o=r;break}}o&&("break"===t||"continue"===t)&&o.tryLoc<=e&&e<=o.finallyLoc&&(o=null);var a=o?o.completion:{};return a.type=t,a.arg=e,o?(this.method="next",this.next=o.finallyLoc,v):this.complete(a)},complete:function(t,e){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&e&&(this.next=e),v},finish:function(t){for(var e=this.tryEntries.length-1;e>=0;--e){var n=this.tryEntries[e];if(n.finallyLoc===t)return this.complete(n.completion,n.afterLoc),$(n),v}},catch:function(t){for(var e=this.tryEntries.length-1;e>=0;--e){var n=this.tryEntries[e];if(n.tryLoc===t){var i=n.completion;if("throw"===i.type){var r=i.arg;$(n)}return r}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,i){return this.delegate={iterator:E(t),resultName:n,nextLoc:i},"next"===this.method&&(this.arg=e),v}},t}(t.exports);try{regeneratorRuntime=i}catch(r){"object"===typeof globalThis?globalThis.regeneratorRuntime=i:Function("r","regeneratorRuntime = r")(i)}},9911:function(t,e,n){"use strict";var i=n("23e7"),r=n("857a"),o=n("af03");i({target:"String",proto:!0,forced:o("link")},{link:function(t){return r(this,"a","href",t)}})},"99af":function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("d039"),a=n("e8b5"),s=n("861d"),c=n("7b0b"),u=n("07fa"),l=n("8418"),f=n("65f0"),h=n("1dde"),d=n("b622"),p=n("2d00"),v=d("isConcatSpreadable"),m=9007199254740991,g="Maximum allowed index exceeded",b=r.TypeError,y=p>=51||!o((function(){var t=[];return t[v]=!1,t.concat()[0]!==t})),x=h("concat"),w=function(t){if(!s(t))return!1;var e=t[v];return void 0!==e?!!e:a(t)},O=!y||!x;i({target:"Array",proto:!0,arity:1,forced:O},{concat:function(t){var e,n,i,r,o,a=c(this),s=f(a,0),h=0;for(e=-1,i=arguments.length;e<i;e++)if(o=-1===e?a:arguments[e],w(o)){if(r=u(o),h+r>m)throw b(g);for(n=0;n<r;n++,h++)n in o&&l(s,h,o[n])}else{if(h>=m)throw b(g);l(s,h++,o)}return s.length=h,s}})},"99de":function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));n("d9e2");var i=n("53ca");function r(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}function o(t,e){if(e&&("object"===Object(i["a"])(e)||"function"===typeof e))return e;if(void 0!==e)throw new TypeError("Derived constructors may only return object or undefined");return r(t)}},"9a1f":function(t,e,n){var i=n("da84"),r=n("c65b"),o=n("59ed"),a=n("825a"),s=n("0d51"),c=n("35a1"),u=i.TypeError;t.exports=function(t,e){var n=arguments.length<2?c(t):e;if(o(n))return a(r(n,t));throw u(s(t)+" is not iterable")}},"9bdd":function(t,e,n){var i=n("825a"),r=n("2a62");t.exports=function(t,e,n,o){try{return o?e(i(n)[0],n[1]):e(n)}catch(a){r(t,"throw",a)}}},"9bf2":function(t,e,n){var i=n("da84"),r=n("83ab"),o=n("0cfb"),a=n("aed9"),s=n("825a"),c=n("a04b"),u=i.TypeError,l=Object.defineProperty,f=Object.getOwnPropertyDescriptor,h="enumerable",d="configurable",p="writable";e.f=r?a?function(t,e,n){if(s(t),e=c(e),s(n),"function"===typeof t&&"prototype"===e&&"value"in n&&p in n&&!n[p]){var i=f(t,e);i&&i[p]&&(t[e]=n.value,n={configurable:d in n?n[d]:i[d],enumerable:h in n?n[h]:i[h],writable:!1})}return l(t,e,n)}:l:function(t,e,n){if(s(t),e=c(e),s(n),o)try{return l(t,e,n)}catch(i){}if("get"in n||"set"in n)throw u("Accessors not supported");return"value"in n&&(t[e]=n.value),t}},"9d26":function(t,e,n){"use strict";var i=n("132d");e["a"]=i["a"]},"9ed3":function(t,e,n){"use strict";var i=n("ae93").IteratorPrototype,r=n("7c73"),o=n("5c6c"),a=n("d44e"),s=n("3f8c"),c=function(){return this};t.exports=function(t,e,n,u){var l=e+" Iterator";return t.prototype=r(i,{next:o(+!u,n)}),a(t,l,!1,!0),s[l]=c,t}},"9f7f":function(t,e,n){var i=n("d039"),r=n("da84"),o=r.RegExp,a=i((function(){var t=o("a","y");return t.lastIndex=2,null!=t.exec("abcd")})),s=a||i((function(){return!o("a","y").sticky})),c=a||i((function(){var t=o("^r","gy");return t.lastIndex=2,null!=t.exec("str")}));t.exports={BROKEN_CARET:c,MISSED_STICKY:s,UNSUPPORTED_Y:a}},a04b:function(t,e,n){var i=n("c04e"),r=n("d9b5");t.exports=function(t){var e=i(t,"string");return r(e)?e:e+""}},a15b:function(t,e,n){"use strict";var i=n("23e7"),r=n("e330"),o=n("44ad"),a=n("fc6a"),s=n("a640"),c=r([].join),u=o!=Object,l=s("join",",");i({target:"Array",proto:!0,forced:u||!l},{join:function(t){return c(a(this),void 0===t?",":t)}})},a236:function(t,e,n){"use strict";var i=n("ade3"),r=n("b85c"),o=(n("ac1f"),n("1276"),n("a15b"),n("2b0e"));e["a"]=o["a"].extend({name:"roundable",props:{rounded:[Boolean,String],tile:Boolean},computed:{roundedClasses:function(){var t=[],e="string"===typeof this.rounded?String(this.rounded):!0===this.rounded;if(this.tile)t.push("rounded-0");else if("string"===typeof e){var n,o=e.split(" "),a=Object(r["a"])(o);try{for(a.s();!(n=a.n()).done;){var s=n.value;t.push("rounded-".concat(s))}}catch(c){a.e(c)}finally{a.f()}}else e&&t.push("rounded");return t.length>0?Object(i["a"])({},t.join(" "),!0):{}}}})},a2bf:function(t,e,n){"use strict";var i=n("da84"),r=n("e8b5"),o=n("07fa"),a=n("0366"),s=i.TypeError,c=function(t,e,n,i,u,l,f,h){var d,p,v=u,m=0,g=!!f&&a(f,h);while(m<i){if(m in n){if(d=g?g(n[m],m,e):n[m],l>0&&r(d))p=o(d),v=c(t,e,d,p,v,l-1)-1;else{if(v>=9007199254740991)throw s("Exceed the acceptable array length");t[v]=d}v++}m++}return v};t.exports=c},a434:function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("23cb"),a=n("5926"),s=n("07fa"),c=n("7b0b"),u=n("65f0"),l=n("8418"),f=n("1dde"),h=f("splice"),d=r.TypeError,p=Math.max,v=Math.min,m=9007199254740991,g="Maximum allowed length exceeded";i({target:"Array",proto:!0,forced:!h},{splice:function(t,e){var n,i,r,f,h,b,y=c(this),x=s(y),w=o(t,x),O=arguments.length;if(0===O?n=i=0:1===O?(n=0,i=x-w):(n=O-2,i=v(p(a(e),0),x-w)),x+n-i>m)throw d(g);for(r=u(y,i),f=0;f<i;f++)h=w+f,h in y&&l(r,f,y[h]);if(r.length=i,n<i){for(f=w;f<x-i;f++)h=f+i,b=f+n,h in y?y[b]=y[h]:delete y[b];for(f=x;f>x-i+n;f--)delete y[f-1]}else if(n>i)for(f=x-i;f>w;f--)h=f+i-1,b=f+n-1,h in y?y[b]=y[h]:delete y[b];for(f=0;f<n;f++)y[f+w]=arguments[f+2];return y.length=x-i+n,r}})},a452:function(t,e,n){"use strict";var i=n("ade3"),r=n("2b0e");function o(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"value",e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"change";return r["a"].extend({name:"proxyable",model:{prop:t,event:e},props:Object(i["a"])({},t,{required:!1}),data:function(){return{internalLazyValue:this[t]}},computed:{internalValue:{get:function(){return this.internalLazyValue},set:function(t){t!==this.internalLazyValue&&(this.internalLazyValue=t,this.$emit(e,t))}}},watch:Object(i["a"])({},t,(function(t){this.internalLazyValue=t}))})}var a=o();e["a"]=a},a4b4:function(t,e,n){var i=n("342f");t.exports=/web0s(?!.*chrome)/i.test(i)},a4d3:function(t,e,n){n("d9f5"),n("b4f8"),n("c513"),n("e9c4"),n("5a47")},a630:function(t,e,n){var i=n("23e7"),r=n("4df4"),o=n("1c7e"),a=!o((function(t){Array.from(t)}));i({target:"Array",stat:!0,forced:a},{from:r})},a640:function(t,e,n){"use strict";var i=n("d039");t.exports=function(t,e){var n=[][t];return!!n&&i((function(){n.call(null,e||function(){return 1},1)}))}},a79d:function(t,e,n){"use strict";var i=n("23e7"),r=n("c430"),o=n("d256"),a=n("d039"),s=n("d066"),c=n("1626"),u=n("4840"),l=n("cdf9"),f=n("cb2d"),h=o&&o.prototype,d=!!o&&a((function(){h["finally"].call({then:function(){}},(function(){}))}));if(i({target:"Promise",proto:!0,real:!0,forced:d},{finally:function(t){var e=u(this,s("Promise")),n=c(t);return this.then(n?function(n){return l(e,t()).then((function(){return n}))}:t,n?function(n){return l(e,t()).then((function(){throw n}))}:t)}}),!r&&c(o)){var p=s("Promise").prototype["finally"];h["finally"]!==p&&f(h,"finally",p,{unsafe:!0})}},a844:function(t,e,n){"use strict";var i=n("5530"),r=(n("a9e3"),n("1681"),n("8654")),o=n("58df"),a=Object(o["a"])(r["a"]);e["a"]=a.extend({name:"v-textarea",props:{autoGrow:Boolean,noResize:Boolean,rowHeight:{type:[Number,String],default:24,validator:function(t){return!isNaN(parseFloat(t))}},rows:{type:[Number,String],default:5,validator:function(t){return!isNaN(parseInt(t,10))}}},computed:{classes:function(){return Object(i["a"])({"v-textarea":!0,"v-textarea--auto-grow":this.autoGrow,"v-textarea--no-resize":this.noResizeHandle},r["a"].options.computed.classes.call(this))},noResizeHandle:function(){return this.noResize||this.autoGrow}},watch:{autoGrow:function(t){var e=this;this.$nextTick((function(){var n;t?e.calculateInputHeight():null==(n=e.$refs.input)||n.style.removeProperty("height")}))},lazyValue:function(){this.autoGrow&&this.$nextTick(this.calculateInputHeight)},rowHeight:function(){this.autoGrow&&this.$nextTick(this.calculateInputHeight)}},mounted:function(){var t=this;setTimeout((function(){t.autoGrow&&t.calculateInputHeight()}),0)},methods:{calculateInputHeight:function(){var t=this.$refs.input;if(t){t.style.height="0";var e=t.scrollHeight,n=parseInt(this.rows,10)*parseFloat(this.rowHeight);t.style.height=Math.max(n,e)+"px"}},genInput:function(){var t=r["a"].options.methods.genInput.call(this);return t.tag="textarea",delete t.data.attrs.type,t.data.attrs.rows=this.rows,t},onInput:function(t){r["a"].options.methods.onInput.call(this,t),this.autoGrow&&this.calculateInputHeight()},onKeyDown:function(t){this.isFocused&&13===t.keyCode&&t.stopPropagation(),this.$emit("keydown",t)}}})},a9ad:function(t,e,n){"use strict";var i=n("3835"),r=n("ade3"),o=n("5530"),a=(n("ac1f"),n("1276"),n("498a"),n("d3b7"),n("25f0"),n("2b0e")),s=n("d9bd"),c=n("7bc6");e["a"]=a["a"].extend({name:"colorable",props:{color:String},methods:{setBackgroundColor:function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"string"===typeof e.style?(Object(s["b"])("style must be an object",this),e):"string"===typeof e.class?(Object(s["b"])("class must be an object",this),e):(Object(c["d"])(t)?e.style=Object(o["a"])(Object(o["a"])({},e.style),{},{"background-color":"".concat(t),"border-color":"".concat(t)}):t&&(e.class=Object(o["a"])(Object(o["a"])({},e.class),{},Object(r["a"])({},t,!0))),e)},setTextColor:function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if("string"===typeof e.style)return Object(s["b"])("style must be an object",this),e;if("string"===typeof e.class)return Object(s["b"])("class must be an object",this),e;if(Object(c["d"])(t))e.style=Object(o["a"])(Object(o["a"])({},e.style),{},{color:"".concat(t),"caret-color":"".concat(t)});else if(t){var n=t.toString().trim().split(" ",2),a=Object(i["a"])(n,2),u=a[0],l=a[1];e.class=Object(o["a"])(Object(o["a"])({},e.class),{},Object(r["a"])({},u+"--text",!0)),l&&(e.class["text--"+l]=!0)}return e}}})},a9e3:function(t,e,n){"use strict";var i=n("83ab"),r=n("da84"),o=n("e330"),a=n("94ca"),s=n("cb2d"),c=n("1a2d"),u=n("7156"),l=n("3a9b"),f=n("d9b5"),h=n("c04e"),d=n("d039"),p=n("241c").f,v=n("06cf").f,m=n("9bf2").f,g=n("408a"),b=n("58a8").trim,y="Number",x=r[y],w=x.prototype,O=r.TypeError,_=o("".slice),S=o("".charCodeAt),C=function(t){var e=h(t,"number");return"bigint"==typeof e?e:k(e)},k=function(t){var e,n,i,r,o,a,s,c,u=h(t,"number");if(f(u))throw O("Cannot convert a Symbol value to a number");if("string"==typeof u&&u.length>2)if(u=b(u),e=S(u,0),43===e||45===e){if(n=S(u,2),88===n||120===n)return NaN}else if(48===e){switch(S(u,1)){case 66:case 98:i=2,r=49;break;case 79:case 111:i=8,r=55;break;default:return+u}for(o=_(u,2),a=o.length,s=0;s<a;s++)if(c=S(o,s),c<48||c>r)return NaN;return parseInt(o,i)}return+u};if(a(y,!x(" 0o1")||!x("0b1")||x("+0x1"))){for(var j,$=function(t){var e=arguments.length<1?0:x(C(t)),n=this;return l(w,n)&&d((function(){g(n)}))?u(Object(e),n,$):e},A=i?p(x):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,isFinite,isInteger,isNaN,isSafeInteger,parseFloat,parseInt,fromString,range".split(","),E=0;A.length>E;E++)c(x,j=A[E])&&!c($,j)&&m($,j,v(x,j));$.prototype=w,w.constructor=$,s(r,y,$,{constructor:!0})}},ab13:function(t,e,n){var i=n("b622"),r=i("match");t.exports=function(t){var e=/./;try{"/./"[t](e)}catch(n){try{return e[r]=!1,"/./"[t](e)}catch(i){}}return!1}},ab36:function(t,e,n){var i=n("861d"),r=n("9112");t.exports=function(t,e){i(e)&&"cause"in e&&r(t,"cause",e.cause)}},ac1f:function(t,e,n){"use strict";var i=n("23e7"),r=n("9263");i({target:"RegExp",proto:!0,forced:/./.exec!==r},{exec:r})},ac7c:function(t,e,n){"use strict";var i=n("15fd"),r=n("5530"),o=(n("d3b7"),n("25f0"),n("6ca7"),n("ec29"),n("9d26")),a=n("c37a"),s=(n("4de4"),n("5607")),c=n("2b0e"),u=c["a"].extend({name:"rippleable",directives:{ripple:s["a"]},props:{ripple:{type:[Boolean,Object],default:!0}},methods:{genRipple:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return this.ripple?(t.staticClass="v-input--selection-controls__ripple",t.directives=t.directives||[],t.directives.push({name:"ripple",value:{center:!0}}),this.$createElement("div",t)):null}}}),l=n("8547"),f=n("58df");function h(t){t.preventDefault()}var d=Object(f["a"])(a["a"],u,l["a"]).extend({name:"selectable",model:{prop:"inputValue",event:"change"},props:{id:String,inputValue:null,falseValue:null,trueValue:null,multiple:{type:Boolean,default:null},label:String},data:function(){return{hasColor:this.inputValue,lazyValue:this.inputValue}},computed:{computedColor:function(){if(this.isActive)return this.color?this.color:this.isDark&&!this.appIsDark?"white":"primary"},isMultiple:function(){return!0===this.multiple||null===this.multiple&&Array.isArray(this.internalValue)},isActive:function(){var t=this,e=this.value,n=this.internalValue;return this.isMultiple?!!Array.isArray(n)&&n.some((function(n){return t.valueComparator(n,e)})):void 0===this.trueValue||void 0===this.falseValue?e?this.valueComparator(e,n):Boolean(n):this.valueComparator(n,this.trueValue)},isDirty:function(){return this.isActive},rippleState:function(){return this.isDisabled||this.validationState?this.validationState:void 0}},watch:{inputValue:function(t){this.lazyValue=t,this.hasColor=t}},methods:{genLabel:function(){var t=a["a"].options.methods.genLabel.call(this);return t?(t.data.on={click:h},t):t},genInput:function(t,e){return this.$createElement("input",{attrs:Object.assign({"aria-checked":this.isActive.toString(),disabled:this.isDisabled,id:this.computedId,role:t,type:t},e),domProps:{value:this.value,checked:this.isActive},on:{blur:this.onBlur,change:this.onChange,focus:this.onFocus,keydown:this.onKeydown,click:h},ref:"input"})},onClick:function(t){this.onChange(),this.$emit("click",t)},onChange:function(){var t=this;if(this.isInteractive){var e=this.value,n=this.internalValue;if(this.isMultiple){Array.isArray(n)||(n=[]);var i=n.length;n=n.filter((function(n){return!t.valueComparator(n,e)})),n.length===i&&n.push(e)}else n=void 0!==this.trueValue&&void 0!==this.falseValue?this.valueComparator(n,this.trueValue)?this.falseValue:this.trueValue:e?this.valueComparator(n,e)?null:e:!n;this.validate(!0,n),this.internalValue=n,this.hasColor=n}},onFocus:function(t){this.isFocused=!0,this.$emit("focus",t)},onBlur:function(t){this.isFocused=!1,this.$emit("blur",t)},onKeydown:function(t){}}}),p=["title"];e["a"]=d.extend({name:"v-checkbox",props:{indeterminate:Boolean,indeterminateIcon:{type:String,default:"$checkboxIndeterminate"},offIcon:{type:String,default:"$checkboxOff"},onIcon:{type:String,default:"$checkboxOn"}},data:function(){return{inputIndeterminate:this.indeterminate}},computed:{classes:function(){return Object(r["a"])(Object(r["a"])({},a["a"].options.computed.classes.call(this)),{},{"v-input--selection-controls":!0,"v-input--checkbox":!0,"v-input--indeterminate":this.inputIndeterminate})},computedIcon:function(){return this.inputIndeterminate?this.indeterminateIcon:this.isActive?this.onIcon:this.offIcon},validationState:function(){if(!this.isDisabled||this.inputIndeterminate)return this.hasError&&this.shouldValidate?"error":this.hasSuccess?"success":null!==this.hasColor?this.computedColor:void 0}},watch:{indeterminate:function(t){var e=this;this.$nextTick((function(){return e.inputIndeterminate=t}))},inputIndeterminate:function(t){this.$emit("update:indeterminate",t)},isActive:function(){this.indeterminate&&(this.inputIndeterminate=!1)}},methods:{genCheckbox:function(){var t=this.attrs$,e=(t.title,Object(i["a"])(t,p));return this.$createElement("div",{staticClass:"v-input--selection-controls__input"},[this.$createElement(o["a"],this.setTextColor(this.validationState,{props:{dense:this.dense,dark:this.dark,light:this.light}}),this.computedIcon),this.genInput("checkbox",Object(r["a"])(Object(r["a"])({},e),{},{"aria-checked":this.inputIndeterminate?"mixed":this.isActive.toString()})),this.genRipple(this.setTextColor(this.rippleState))])},genDefaultSlot:function(){return[this.genCheckbox(),this.genLabel()]}}})},ad6d:function(t,e,n){"use strict";var i=n("825a");t.exports=function(){var t=i(this),e="";return t.hasIndices&&(e+="d"),t.global&&(e+="g"),t.ignoreCase&&(e+="i"),t.multiline&&(e+="m"),t.dotAll&&(e+="s"),t.unicode&&(e+="u"),t.sticky&&(e+="y"),e}},addb:function(t,e,n){var i=n("4dae"),r=Math.floor,o=function(t,e){var n=t.length,c=r(n/2);return n<8?a(t,e):s(t,o(i(t,0,c),e),o(i(t,c),e),e)},a=function(t,e){var n,i,r=t.length,o=1;while(o<r){i=o,n=t[o];while(i&&e(t[i-1],n)>0)t[i]=t[--i];i!==o++&&(t[i]=n)}return t},s=function(t,e,n,i){var r=e.length,o=n.length,a=0,s=0;while(a<r||s<o)t[a+s]=a<r&&s<o?i(e[a],n[s])<=0?e[a++]:n[s++]:a<r?e[a++]:n[s++];return t};t.exports=o},ade3:function(t,e,n){"use strict";function i(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}n.d(e,"a",(function(){return i}))},ae93:function(t,e,n){"use strict";var i,r,o,a=n("d039"),s=n("1626"),c=n("7c73"),u=n("e163"),l=n("cb2d"),f=n("b622"),h=n("c430"),d=f("iterator"),p=!1;[].keys&&(o=[].keys(),"next"in o?(r=u(u(o)),r!==Object.prototype&&(i=r)):p=!0);var v=void 0==i||a((function(){var t={};return i[d].call(t)!==t}));v?i={}:h&&(i=c(i)),s(i[d])||l(i,d,(function(){return this})),t.exports={IteratorPrototype:i,BUGGY_SAFARI_ITERATORS:p}},aeb0:function(t,e,n){var i=n("9bf2").f;t.exports=function(t,e,n){n in t||i(t,n,{configurable:!0,get:function(){return e[n]},set:function(t){e[n]=t}})}},aed9:function(t,e,n){var i=n("83ab"),r=n("d039");t.exports=i&&r((function(){return 42!=Object.defineProperty((function(){}),"prototype",{value:42,writable:!1}).prototype}))},af03:function(t,e,n){var i=n("d039");t.exports=function(t){return i((function(){var e=""[t]('"');return e!==e.toLowerCase()||e.split('"').length>3}))}},af2b:function(t,e,n){"use strict";n("c96a");var i=n("2b0e");e["a"]=i["a"].extend({name:"sizeable",props:{large:Boolean,small:Boolean,xLarge:Boolean,xSmall:Boolean},computed:{medium:function(){return Boolean(!this.xSmall&&!this.small&&!this.large&&!this.xLarge)},sizeableClasses:function(){return{"v-size--x-small":this.xSmall,"v-size--small":this.small,"v-size--default":this.medium,"v-size--large":this.large,"v-size--x-large":this.xLarge}}}})},b041:function(t,e,n){"use strict";var i=n("00ee"),r=n("f5df");t.exports=i?{}.toString:function(){return"[object "+r(this)+"]"}},b0c0:function(t,e,n){var i=n("83ab"),r=n("5e77").EXISTS,o=n("e330"),a=n("9bf2").f,s=Function.prototype,c=o(s.toString),u=/function\b(?:\s|\/\*[\S\s]*?\*\/|\/\/[^\n\r]*[\n\r]+)*([^\s(/]*)/,l=o(u.exec),f="name";i&&!r&&a(s,f,{configurable:!0,get:function(){try{return l(u,c(this))[1]}catch(t){return""}}})},b4f8:function(t,e,n){var i=n("23e7"),r=n("d066"),o=n("1a2d"),a=n("577e"),s=n("5692"),c=n("3d87"),u=s("string-to-symbol-registry"),l=s("symbol-to-string-registry");i({target:"Symbol",stat:!0,forced:!c},{for:function(t){var e=a(t);if(o(u,e))return u[e];var n=r("Symbol")(e);return u[e]=n,l[n]=e,n}})},b50d:function(t,e,n){"use strict";var i=n("c532"),r=n("467f"),o=n("30b5"),a=n("c345"),s=n("3934"),c=n("2d83");t.exports=function(t){return new Promise((function(e,u){var l=t.data,f=t.headers;i.isFormData(l)&&delete f["Content-Type"];var h=new XMLHttpRequest;if(t.auth){var d=t.auth.username||"",p=t.auth.password||"";f.Authorization="Basic "+btoa(d+":"+p)}if(h.open(t.method.toUpperCase(),o(t.url,t.params,t.paramsSerializer),!0),h.timeout=t.timeout,h.onreadystatechange=function(){if(h&&4===h.readyState&&(0!==h.status||h.responseURL&&0===h.responseURL.indexOf("file:"))){var n="getAllResponseHeaders"in h?a(h.getAllResponseHeaders()):null,i=t.responseType&&"text"!==t.responseType?h.response:h.responseText,o={data:i,status:h.status,statusText:h.statusText,headers:n,config:t,request:h};r(e,u,o),h=null}},h.onerror=function(){u(c("Network Error",t,null,h)),h=null},h.ontimeout=function(){u(c("timeout of "+t.timeout+"ms exceeded",t,"ECONNABORTED",h)),h=null},i.isStandardBrowserEnv()){var v=n("7aac"),m=(t.withCredentials||s(t.url))&&t.xsrfCookieName?v.read(t.xsrfCookieName):void 0;m&&(f[t.xsrfHeaderName]=m)}if("setRequestHeader"in h&&i.forEach(f,(function(t,e){"undefined"===typeof l&&"content-type"===e.toLowerCase()?delete f[e]:h.setRequestHeader(e,t)})),t.withCredentials&&(h.withCredentials=!0),t.responseType)try{h.responseType=t.responseType}catch(g){if("json"!==t.responseType)throw g}"function"===typeof t.onDownloadProgress&&h.addEventListener("progress",t.onDownloadProgress),"function"===typeof t.onUploadProgress&&h.upload&&h.upload.addEventListener("progress",t.onUploadProgress),t.cancelToken&&t.cancelToken.promise.then((function(t){h&&(h.abort(),u(t),h=null)})),void 0===l&&(l=null),h.send(l)}))}},b575:function(t,e,n){var i,r,o,a,s,c,u,l,f=n("da84"),h=n("0366"),d=n("06cf").f,p=n("2cf4").set,v=n("1cdc"),m=n("d4c3"),g=n("a4b4"),b=n("605d"),y=f.MutationObserver||f.WebKitMutationObserver,x=f.document,w=f.process,O=f.Promise,_=d(f,"queueMicrotask"),S=_&&_.value;S||(i=function(){var t,e;b&&(t=w.domain)&&t.exit();while(r){e=r.fn,r=r.next;try{e()}catch(n){throw r?a():o=void 0,n}}o=void 0,t&&t.enter()},v||b||g||!y||!x?!m&&O&&O.resolve?(u=O.resolve(void 0),u.constructor=O,l=h(u.then,u),a=function(){l(i)}):b?a=function(){w.nextTick(i)}:(p=h(p,f),a=function(){p(i)}):(s=!0,c=x.createTextNode(""),new y(i).observe(c,{characterData:!0}),a=function(){c.data=s=!s})),t.exports=S||function(t){var e={fn:t,next:void 0};o&&(o.next=e),r||(r=e,a()),o=e}},b5b6:function(t,e,n){},b622:function(t,e,n){var i=n("da84"),r=n("5692"),o=n("1a2d"),a=n("90e3"),s=n("4930"),c=n("fdbf"),u=r("wks"),l=i.Symbol,f=l&&l["for"],h=c?l:l&&l.withoutSetter||a;t.exports=function(t){if(!o(u,t)||!s&&"string"!=typeof u[t]){var e="Symbol."+t;s&&o(l,t)?u[t]=l[t]:u[t]=c&&f?f(e):h(e)}return u[t]}},b64b:function(t,e,n){var i=n("23e7"),r=n("7b0b"),o=n("df75"),a=n("d039"),s=a((function(){o(1)}));i({target:"Object",stat:!0,forced:s},{keys:function(t){return o(r(t))}})},b680:function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("e330"),a=n("5926"),s=n("408a"),c=n("1148"),u=n("d039"),l=r.RangeError,f=r.String,h=Math.floor,d=o(c),p=o("".slice),v=o(1..toFixed),m=function(t,e,n){return 0===e?n:e%2===1?m(t,e-1,n*t):m(t*t,e/2,n)},g=function(t){var e=0,n=t;while(n>=4096)e+=12,n/=4096;while(n>=2)e+=1,n/=2;return e},b=function(t,e,n){var i=-1,r=n;while(++i<6)r+=e*t[i],t[i]=r%1e7,r=h(r/1e7)},y=function(t,e){var n=6,i=0;while(--n>=0)i+=t[n],t[n]=h(i/e),i=i%e*1e7},x=function(t){var e=6,n="";while(--e>=0)if(""!==n||0===e||0!==t[e]){var i=f(t[e]);n=""===n?i:n+d("0",7-i.length)+i}return n},w=u((function(){return"0.000"!==v(8e-5,3)||"1"!==v(.9,0)||"1.25"!==v(1.255,2)||"1000000000000000128"!==v(0xde0b6b3a7640080,0)}))||!u((function(){v({})}));i({target:"Number",proto:!0,forced:w},{toFixed:function(t){var e,n,i,r,o=s(this),c=a(t),u=[0,0,0,0,0,0],h="",v="0";if(c<0||c>20)throw l("Incorrect fraction digits");if(o!=o)return"NaN";if(o<=-1e21||o>=1e21)return f(o);if(o<0&&(h="-",o=-o),o>1e-21)if(e=g(o*m(2,69,1))-69,n=e<0?o*m(2,-e,1):o/m(2,e,1),n*=4503599627370496,e=52-e,e>0){b(u,0,n),i=c;while(i>=7)b(u,1e7,0),i-=7;b(u,m(10,i,1),0),i=e-1;while(i>=23)y(u,1<<23),i-=23;y(u,1<<i),b(u,1,1),y(u,2),v=x(u)}else b(u,0,n),b(u,1<<-e,0),v=x(u)+d("0",c);return c>0?(r=v.length,v=h+(r<=c?"0."+d("0",c-r)+v:p(v,0,r-c)+"."+p(v,r-c))):v=h+v,v}})},b727:function(t,e,n){var i=n("0366"),r=n("e330"),o=n("44ad"),a=n("7b0b"),s=n("07fa"),c=n("65f0"),u=r([].push),l=function(t){var e=1==t,n=2==t,r=3==t,l=4==t,f=6==t,h=7==t,d=5==t||f;return function(p,v,m,g){for(var b,y,x=a(p),w=o(x),O=i(v,m),_=s(w),S=0,C=g||c,k=e?C(p,_):n||h?C(p,0):void 0;_>S;S++)if((d||S in w)&&(b=w[S],y=O(b,S,x),t))if(e)k[S]=y;else if(y)switch(t){case 3:return!0;case 5:return b;case 6:return S;case 2:u(k,b)}else switch(t){case 4:return!1;case 7:u(k,b)}return f?-1:r||l?l:k}};t.exports={forEach:l(0),map:l(1),filter:l(2),some:l(3),every:l(4),find:l(5),findIndex:l(6),filterReject:l(7)}},b85c:function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));n("a4d3"),n("e01a"),n("d3b7"),n("d28b"),n("3ca3"),n("ddb0"),n("d9e2");var i=n("06c5");function r(t,e){var n="undefined"!==typeof Symbol&&t[Symbol.iterator]||t["@@iterator"];if(!n){if(Array.isArray(t)||(n=Object(i["a"])(t))||e&&t&&"number"===typeof t.length){n&&(t=n);var r=0,o=function(){};return{s:o,n:function(){return r>=t.length?{done:!0}:{done:!1,value:t[r++]}},e:function(t){throw t},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var a,s=!0,c=!1;return{s:function(){n=n.call(t)},n:function(){var t=n.next();return s=t.done,t},e:function(t){c=!0,a=t},f:function(){try{s||null==n["return"]||n["return"]()}finally{if(c)throw a}}}}},b974:function(t,e,n){"use strict";var i=n("b85c"),r=n("ade3"),o=n("5530"),a=(n("99af"),n("d3b7"),n("25f0"),n("fb6a"),n("ac1f"),n("1276"),n("498a"),n("4ec9"),n("3ca3"),n("ddb0"),n("a630"),n("c740"),n("e9c4"),n("b0c0"),n("d81d"),n("4de4"),n("2ca0"),n("caad"),n("2532"),n("a434"),n("4ff9"),n("68dd"),n("3835")),s=(n("8adc"),n("58df")),c=n("0789"),u=n("9d26"),l=n("a9ad"),f=n("4e82"),h=n("7560"),d=n("f2e7"),p=n("1c87"),v=n("af2b"),m=n("d9bd"),g=Object(s["a"])(l["a"],v["a"],p["a"],h["a"],Object(f["a"])("chipGroup"),Object(d["b"])("inputValue")).extend({name:"v-chip",props:{active:{type:Boolean,default:!0},activeClass:{type:String,default:function(){return this.chipGroup?this.chipGroup.activeClass:""}},close:Boolean,closeIcon:{type:String,default:"$delete"},closeLabel:{type:String,default:"$vuetify.close"},disabled:Boolean,draggable:Boolean,filter:Boolean,filterIcon:{type:String,default:"$complete"},label:Boolean,link:Boolean,outlined:Boolean,pill:Boolean,tag:{type:String,default:"span"},textColor:String,value:null},data:function(){return{proxyClass:"v-chip--active"}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])(Object(o["a"])(Object(o["a"])({"v-chip":!0},p["a"].options.computed.classes.call(this)),{},{"v-chip--clickable":this.isClickable,"v-chip--disabled":this.disabled,"v-chip--draggable":this.draggable,"v-chip--label":this.label,"v-chip--link":this.isLink,"v-chip--no-color":!this.color,"v-chip--outlined":this.outlined,"v-chip--pill":this.pill,"v-chip--removable":this.hasClose},this.themeClasses),this.sizeableClasses),this.groupClasses)},hasClose:function(){return Boolean(this.close)},isClickable:function(){return Boolean(p["a"].options.computed.isClickable.call(this)||this.chipGroup)}},created:function(){var t=this,e=[["outline","outlined"],["selected","input-value"],["value","active"],["@input","@active.sync"]];e.forEach((function(e){var n=Object(a["a"])(e,2),i=n[0],r=n[1];t.$attrs.hasOwnProperty(i)&&Object(m["a"])(i,r,t)}))},methods:{click:function(t){this.$emit("click",t),this.chipGroup&&this.toggle()},genFilter:function(){var t=[];return this.isActive&&t.push(this.$createElement(u["a"],{staticClass:"v-chip__filter",props:{left:!0}},this.filterIcon)),this.$createElement(c["b"],t)},genClose:function(){var t=this;return this.$createElement(u["a"],{staticClass:"v-chip__close",props:{right:!0,size:18},attrs:{"aria-label":this.$vuetify.lang.t(this.closeLabel)},on:{click:function(e){e.stopPropagation(),e.preventDefault(),t.$emit("click:close"),t.$emit("update:active",!1)}}},this.closeIcon)},genContent:function(){return this.$createElement("span",{staticClass:"v-chip__content"},[this.filter&&this.genFilter(),this.$slots.default,this.hasClose&&this.genClose()])}},render:function(t){var e=[this.genContent()],n=this.generateRouteLink(),i=n.tag,r=n.data;r.attrs=Object(o["a"])(Object(o["a"])({},r.attrs),{},{draggable:this.draggable?"true":void 0,tabindex:this.chipGroup&&!this.disabled?0:r.attrs.tabindex}),r.directives.push({name:"show",value:this.active}),r=this.setBackgroundColor(this.color,r);var a=this.textColor||this.outlined&&this.color;return t(i,this.setTextColor(a,r),e)}}),b=g,y=n("2909"),x=(n("a9e3"),n("7db0"),n("ee6f"),h["a"].extend({name:"v-theme-provider",props:{root:Boolean},computed:{isDark:function(){return this.root?this.rootIsDark:h["a"].options.computed.isDark.call(this)}},render:function(){return this.$slots.default&&this.$slots.default.find((function(t){return!t.isComment&&" "!==t.text}))}})),w=n("53ca"),O=(n("b64b"),n("2b0e")),_=O["a"].extend().extend({name:"delayable",props:{openDelay:{type:[Number,String],default:0},closeDelay:{type:[Number,String],default:0}},data:function(){return{openTimeout:void 0,closeTimeout:void 0}},methods:{clearDelay:function(){clearTimeout(this.openTimeout),clearTimeout(this.closeTimeout)},runDelay:function(t,e){var n=this;this.clearDelay();var i=parseInt(this["".concat(t,"Delay")],10);this["".concat(t,"Timeout")]=setTimeout(e||function(){n.isActive={open:!0,close:!1}[t]},i)}}}),S=n("80d2"),C=Object(s["a"])(_,d["a"]),k=C.extend({name:"activatable",props:{activator:{default:null,validator:function(t){return["string","object"].includes(Object(w["a"])(t))}},disabled:Boolean,internalActivator:Boolean,openOnClick:{type:Boolean,default:!0},openOnHover:Boolean,openOnFocus:Boolean},data:function(){return{activatorElement:null,activatorNode:[],events:["click","mouseenter","mouseleave","focus"],listeners:{}}},watch:{activator:"resetActivator",openOnFocus:"resetActivator",openOnHover:"resetActivator"},mounted:function(){var t=Object(S["m"])(this,"activator",!0);t&&["v-slot","normal"].includes(t)&&Object(m["b"])('The activator slot must be bound, try \'<template v-slot:activator="{ on }"><v-btn v-on="on">\'',this),this.addActivatorEvents()},beforeDestroy:function(){this.removeActivatorEvents()},methods:{addActivatorEvents:function(){if(this.activator&&!this.disabled&&this.getActivator()){this.listeners=this.genActivatorListeners();for(var t=Object.keys(this.listeners),e=0,n=t;e<n.length;e++){var i=n[e];this.getActivator().addEventListener(i,this.listeners[i])}}},genActivator:function(){var t=Object(S["l"])(this,"activator",Object.assign(this.getValueProxy(),{on:this.genActivatorListeners(),attrs:this.genActivatorAttributes()}))||[];return this.activatorNode=t,t},genActivatorAttributes:function(){return{role:this.openOnClick&&!this.openOnHover?"button":void 0,"aria-haspopup":!0,"aria-expanded":String(this.isActive)}},genActivatorListeners:function(){var t=this;if(this.disabled)return{};var e={};return this.openOnHover?(e.mouseenter=function(e){t.getActivator(e),t.runDelay("open")},e.mouseleave=function(e){t.getActivator(e),t.runDelay("close")}):this.openOnClick&&(e.click=function(e){var n=t.getActivator(e);n&&n.focus(),e.stopPropagation(),t.isActive=!t.isActive}),this.openOnFocus&&(e.focus=function(e){t.getActivator(e),e.stopPropagation(),t.isActive=!t.isActive}),e},getActivator:function(t){var e;if(this.activatorElement)return this.activatorElement;var n=null;if(this.activator){var i=this.internalActivator?this.$el:document;n="string"===typeof this.activator?i.querySelector(this.activator):this.activator.$el?this.activator.$el:this.activator}else if(1===this.activatorNode.length||this.activatorNode.length&&!t){var r=this.activatorNode[0].componentInstance;n=r&&r.$options.mixins&&r.$options.mixins.some((function(t){return t.options&&["activatable","menuable"].includes(t.options.name)}))?r.getActivator():this.activatorNode[0].elm}else t&&(n=t.currentTarget||t.target);return this.activatorElement=(null==(e=n)?void 0:e.nodeType)===Node.ELEMENT_NODE?n:null,this.activatorElement},getContentSlot:function(){return Object(S["l"])(this,"default",this.getValueProxy(),!0)},getValueProxy:function(){var t=this;return{get value(){return t.isActive},set value(e){t.isActive=e}}},removeActivatorEvents:function(){if(this.activator&&this.activatorElement){for(var t=Object.keys(this.listeners),e=0,n=t;e<n.length;e++){var i=n[e];this.activatorElement.removeEventListener(i,this.listeners[i])}this.listeners={}}},resetActivator:function(){this.removeActivatorEvents(),this.activatorElement=null,this.getActivator(),this.addActivatorEvents()}}});function j(t){for(var e=[],n=0;n<t.length;n++){var i=t[n];i.isActive&&i.isDependent?e.push(i):e.push.apply(e,Object(y["a"])(j(i.$children)))}return e}var $=Object(s["a"])().extend({name:"dependent",data:function(){return{closeDependents:!0,isActive:!1,isDependent:!0}},watch:{isActive:function(t){if(!t)for(var e=this.getOpenDependents(),n=0;n<e.length;n++)e[n].isActive=!1}},methods:{getOpenDependents:function(){return this.closeDependents?j(this.$children):[]},getOpenDependentElements:function(){for(var t=[],e=this.getOpenDependents(),n=0;n<e.length;n++)t.push.apply(t,Object(y["a"])(e[n].getClickableDependentElements()));return t},getClickableDependentElements:function(){var t=[this.$el];return this.$refs.content&&t.push(this.$refs.content),this.overlay&&t.push(this.overlay.$el),t.push.apply(t,Object(y["a"])(this.getOpenDependentElements())),t}}}),A=O["a"].extend().extend({name:"stackable",data:function(){return{stackElement:null,stackExclude:null,stackMinZIndex:0,isActive:!1}},computed:{activeZIndex:function(){if("undefined"===typeof window)return 0;var t=this.stackElement||this.$refs.content,e=this.isActive?this.getMaxZIndex(this.stackExclude||[t])+2:Object(S["n"])(t);return null==e?e:parseInt(e)}},methods:{getMaxZIndex:function(){for(var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],e=this.$el,n=[this.stackMinZIndex,Object(S["n"])(e)],i=[].concat(Object(y["a"])(document.getElementsByClassName("v-menu__content--active")),Object(y["a"])(document.getElementsByClassName("v-dialog__content--active"))),r=0;r<i.length;r++)t.includes(i[r])||n.push(Object(S["n"])(i[r]));return Math.max.apply(Math,n)}}}),E=n("fe6c"),T=(n("159b"),O["a"].extend().extend({name:"bootable",props:{eager:Boolean},data:function(){return{isBooted:!1}},computed:{hasContent:function(){return this.isBooted||this.eager||this.isActive}},watch:{isActive:function(){this.isBooted=!0}},created:function(){"lazy"in this.$attrs&&Object(m["e"])("lazy",this)},methods:{showLazyContent:function(t){return this.hasContent&&t?t():[this.$createElement()]}}}));function L(t){var e=Object(w["a"])(t);return"boolean"===e||"string"===e||t.nodeType===Node.ELEMENT_NODE}function I(t){t.forEach((function(t){t.elm&&t.elm.parentNode&&t.elm.parentNode.removeChild(t.elm)}))}var B=Object(s["a"])(T).extend({name:"detachable",props:{attach:{default:!1,validator:L},contentClass:{type:String,default:""}},data:function(){return{activatorNode:null,hasDetached:!1}},watch:{attach:function(){this.hasDetached=!1,this.initDetach()},hasContent:function(){this.$nextTick(this.initDetach)}},beforeMount:function(){var t=this;this.$nextTick((function(){if(t.activatorNode){var e=Array.isArray(t.activatorNode)?t.activatorNode:[t.activatorNode];e.forEach((function(e){if(e.elm&&t.$el.parentNode){var n=t.$el===t.$el.parentNode.firstChild?t.$el:t.$el.nextSibling;t.$el.parentNode.insertBefore(e.elm,n)}}))}}))},mounted:function(){this.hasContent&&this.initDetach()},deactivated:function(){this.isActive=!1},beforeDestroy:function(){this.$refs.content&&this.$refs.content.parentNode&&this.$refs.content.parentNode.removeChild(this.$refs.content)},destroyed:function(){var t=this;if(this.activatorNode){var e=Array.isArray(this.activatorNode)?this.activatorNode:[this.activatorNode];if(this.$el.isConnected){var n=new MutationObserver((function(i){i.some((function(e){return Array.from(e.removedNodes).includes(t.$el)}))&&(n.disconnect(),I(e))}));n.observe(this.$el.parentNode,{subtree:!1,childList:!0})}else I(e)}},methods:{getScopeIdAttrs:function(){var t=Object(S["j"])(this.$vnode,"context.$options._scopeId");return t&&Object(r["a"])({},t,"")},initDetach:function(){var t;this._isDestroyed||!this.$refs.content||this.hasDetached||""===this.attach||!0===this.attach||"attach"===this.attach||(t=!1===this.attach?document.querySelector("[data-app]"):"string"===typeof this.attach?document.querySelector(this.attach):this.attach,t?(t.appendChild(this.$refs.content),this.hasDetached=!0):Object(m["c"])("Unable to locate target ".concat(this.attach||"[data-app]"),this))}}}),M=Object(s["a"])(A,Object(E["b"])(["top","right","bottom","left","absolute"]),k,B),P=M.extend().extend({name:"menuable",props:{allowOverflow:Boolean,light:Boolean,dark:Boolean,maxWidth:{type:[Number,String],default:"auto"},minWidth:[Number,String],nudgeBottom:{type:[Number,String],default:0},nudgeLeft:{type:[Number,String],default:0},nudgeRight:{type:[Number,String],default:0},nudgeTop:{type:[Number,String],default:0},nudgeWidth:{type:[Number,String],default:0},offsetOverflow:Boolean,positionX:{type:Number,default:null},positionY:{type:Number,default:null},zIndex:{type:[Number,String],default:null}},data:function(){return{activatorNode:[],absoluteX:0,absoluteY:0,activatedBy:null,activatorFixed:!1,dimensions:{activator:{top:0,left:0,bottom:0,right:0,width:0,height:0,offsetTop:0,scrollHeight:0,offsetLeft:0},content:{top:0,left:0,bottom:0,right:0,width:0,height:0,offsetTop:0,scrollHeight:0}},relativeYOffset:0,hasJustFocused:!1,hasWindow:!1,inputActivator:!1,isContentActive:!1,pageWidth:0,pageYOffset:0,stackClass:"v-menu__content--active",stackMinZIndex:6}},computed:{computedLeft:function(){var t=this.dimensions.activator,e=this.dimensions.content,n=(!1!==this.attach?t.offsetLeft:t.left)||0,i=Math.max(t.width,e.width),r=0;if(r+=n,(this.left||this.$vuetify.rtl&&!this.right)&&(r-=i-t.width),this.offsetX){var o=isNaN(Number(this.maxWidth))?t.width:Math.min(t.width,Number(this.maxWidth));r+=this.left?-o:t.width}return this.nudgeLeft&&(r-=parseInt(this.nudgeLeft)),this.nudgeRight&&(r+=parseInt(this.nudgeRight)),r},computedTop:function(){var t=this.dimensions.activator,e=this.dimensions.content,n=0;return this.top&&(n+=t.height-e.height),!1!==this.attach?n+=t.offsetTop:n+=t.top+this.pageYOffset,this.offsetY&&(n+=this.top?-t.height:t.height),this.nudgeTop&&(n-=parseInt(this.nudgeTop)),this.nudgeBottom&&(n+=parseInt(this.nudgeBottom)),n},hasActivator:function(){return!!this.$slots.activator||!!this.$scopedSlots.activator||!!this.activator||!!this.inputActivator},absoluteYOffset:function(){return this.pageYOffset-this.relativeYOffset}},watch:{disabled:function(t){t&&this.callDeactivate()},isActive:function(t){this.disabled||(t?this.callActivate():this.callDeactivate())},positionX:"updateDimensions",positionY:"updateDimensions"},beforeMount:function(){this.hasWindow="undefined"!==typeof window,this.hasWindow&&window.addEventListener("resize",this.updateDimensions,!1)},beforeDestroy:function(){this.hasWindow&&window.removeEventListener("resize",this.updateDimensions,!1)},methods:{absolutePosition:function(){return{offsetTop:this.positionY||this.absoluteY,offsetLeft:this.positionX||this.absoluteX,scrollHeight:0,top:this.positionY||this.absoluteY,bottom:this.positionY||this.absoluteY,left:this.positionX||this.absoluteX,right:this.positionX||this.absoluteX,height:0,width:0}},activate:function(){},calcLeft:function(t){return Object(S["d"])(!1!==this.attach?this.computedLeft:this.calcXOverflow(this.computedLeft,t))},calcTop:function(){return Object(S["d"])(!1!==this.attach?this.computedTop:this.calcYOverflow(this.computedTop))},calcXOverflow:function(t,e){var n=t+e-this.pageWidth+12;return t=(!this.left||this.right)&&n>0?Math.max(t-n,0):Math.max(t,12),t+this.getOffsetLeft()},calcYOverflow:function(t){var e=this.getInnerHeight(),n=this.absoluteYOffset+e,i=this.dimensions.activator,r=this.dimensions.content.height,o=t+r,a=n<o;return a&&this.offsetOverflow&&i.top>r?t=this.pageYOffset+(i.top-r):a&&!this.allowOverflow?t=n-r-12:t<this.absoluteYOffset&&!this.allowOverflow&&(t=this.absoluteYOffset+12),t<12?12:t},callActivate:function(){this.hasWindow&&this.activate()},callDeactivate:function(){this.isContentActive=!1,this.deactivate()},checkForPageYOffset:function(){this.hasWindow&&(this.pageYOffset=this.activatorFixed?0:this.getOffsetTop())},checkActivatorFixed:function(){if(!1===this.attach){var t=this.getActivator();while(t){if("fixed"===window.getComputedStyle(t).position)return void(this.activatorFixed=!0);t=t.offsetParent}this.activatorFixed=!1}},deactivate:function(){},genActivatorListeners:function(){var t=this,e=k.options.methods.genActivatorListeners.call(this),n=e.click;return n&&(e.click=function(e){t.openOnClick&&n&&n(e),t.absoluteX=e.clientX,t.absoluteY=e.clientY}),e},getInnerHeight:function(){return this.hasWindow?window.innerHeight||document.documentElement.clientHeight:0},getOffsetLeft:function(){return this.hasWindow?window.pageXOffset||document.documentElement.scrollLeft:0},getOffsetTop:function(){return this.hasWindow?window.pageYOffset||document.documentElement.scrollTop:0},getRoundedBoundedClientRect:function(t){var e=t.getBoundingClientRect();return{top:Math.round(e.top),left:Math.round(e.left),bottom:Math.round(e.bottom),right:Math.round(e.right),width:Math.round(e.width),height:Math.round(e.height)}},measure:function(t){if(!t||!this.hasWindow)return null;var e=this.getRoundedBoundedClientRect(t);if(!1!==this.attach){var n=window.getComputedStyle(t);e.left=parseInt(n.marginLeft),e.top=parseInt(n.marginTop)}return e},sneakPeek:function(t){var e=this;requestAnimationFrame((function(){var n=e.$refs.content;n&&"none"===n.style.display?(n.style.display="inline-block",t(),n.style.display="none"):t()}))},startTransition:function(){var t=this;return new Promise((function(e){return requestAnimationFrame((function(){t.isContentActive=t.hasJustFocused=t.isActive,e()}))}))},updateDimensions:function(){var t=this;this.hasWindow="undefined"!==typeof window,this.checkActivatorFixed(),this.checkForPageYOffset(),this.pageWidth=document.documentElement.clientWidth;var e={activator:Object(o["a"])({},this.dimensions.activator),content:Object(o["a"])({},this.dimensions.content)};if(!this.hasActivator||this.absolute)e.activator=this.absolutePosition();else{var n=this.getActivator();if(!n)return;e.activator=this.measure(n),e.activator.offsetLeft=n.offsetLeft,!1!==this.attach?e.activator.offsetTop=n.offsetTop:e.activator.offsetTop=0}this.sneakPeek((function(){if(t.$refs.content){if(t.$refs.content.offsetParent){var n=t.getRoundedBoundedClientRect(t.$refs.content.offsetParent);t.relativeYOffset=window.pageYOffset+n.top,e.activator.top-=t.relativeYOffset,e.activator.left-=window.pageXOffset+n.left}e.content=t.measure(t.$refs.content)}t.dimensions=e}))}}}),D=O["a"].extend({name:"returnable",props:{returnValue:null},data:function(){return{isActive:!1,originalValue:null}},watch:{isActive:function(t){t?this.originalValue=this.returnValue:this.$emit("update:return-value",this.originalValue)}},methods:{save:function(t){var e=this;this.originalValue=t,setTimeout((function(){e.isActive=!1}))}}}),R=n("a236"),N=n("dd89");function V(){return!0}function F(t,e,n){if(!t||!1===z(t,n))return!1;var i=Object(N["a"])(e);if("undefined"!==typeof ShadowRoot&&i instanceof ShadowRoot&&i.host===t.target)return!1;var r=("object"===Object(w["a"])(n.value)&&n.value.include||function(){return[]})();return r.push(e),!r.some((function(e){return e.contains(t.target)}))}function z(t,e){var n="object"===Object(w["a"])(e.value)&&e.value.closeConditional||V;return n(t)}function H(t,e,n,i){var r="function"===typeof n.value?n.value:n.value.handler;e._clickOutside.lastMousedownWasOutside&&F(t,e,n)&&setTimeout((function(){z(t,n)&&r&&r(t)}),0)}function W(t,e){var n=Object(N["a"])(t);e(document),"undefined"!==typeof ShadowRoot&&n instanceof ShadowRoot&&e(n)}var U={inserted:function(t,e,n){var i=function(i){return H(i,t,e,n)},r=function(n){t._clickOutside.lastMousedownWasOutside=F(n,t,e)};W(t,(function(t){t.addEventListener("click",i,!0),t.addEventListener("mousedown",r,!0)})),t._clickOutside||(t._clickOutside={lastMousedownWasOutside:!0}),t._clickOutside[n.context._uid]={onClick:i,onMousedown:r}},unbind:function(t,e,n){t._clickOutside&&(W(t,(function(e){var i;if(e&&null!=(i=t._clickOutside)&&i[n.context._uid]){var r=t._clickOutside[n.context._uid],o=r.onClick,a=r.onMousedown;e.removeEventListener("click",o,!0),e.removeEventListener("mousedown",a,!0)}})),delete t._clickOutside[n.context._uid])}},q=U,G=n("dc22"),Y=n("7d8f"),Z=Object(s["a"])($,_,D,R["a"],h["a"],P),K=Z.extend({name:"v-menu",directives:{ClickOutside:q,Resize:G["a"]},provide:function(){return{isInMenu:!0,theme:this.theme}},props:{auto:Boolean,closeOnClick:{type:Boolean,default:!0},closeOnContentClick:{type:Boolean,default:!0},disabled:Boolean,disableKeys:Boolean,maxHeight:{type:[Number,String],default:"auto"},offsetX:Boolean,offsetY:Boolean,openOnHover:Boolean,origin:{type:String,default:"top left"},transition:{type:[Boolean,String],default:"v-menu-transition"}},data:function(){return{calculatedTopAuto:0,defaultOffset:8,hasJustFocused:!1,listIndex:-1,resizeTimeout:0,selectedIndex:null,tiles:[]}},computed:{activeTile:function(){return this.tiles[this.listIndex]},calculatedLeft:function(){var t=Math.max(this.dimensions.content.width,parseFloat(this.calculatedMinWidth));return this.auto?Object(S["d"])(this.calcXOverflow(this.calcLeftAuto(),t))||"0":this.calcLeft(t)||"0"},calculatedMaxHeight:function(){var t=this.auto?"200px":Object(S["d"])(this.maxHeight);return t||"0"},calculatedMaxWidth:function(){return Object(S["d"])(this.maxWidth)||"0"},calculatedMinWidth:function(){if(this.minWidth)return Object(S["d"])(this.minWidth)||"0";var t=Math.min(this.dimensions.activator.width+Number(this.nudgeWidth)+(this.auto?16:0),Math.max(this.pageWidth-24,0)),e=isNaN(parseInt(this.calculatedMaxWidth))?t:parseInt(this.calculatedMaxWidth);return Object(S["d"])(Math.min(e,t))||"0"},calculatedTop:function(){var t=this.auto?Object(S["d"])(this.calcYOverflow(this.calculatedTopAuto)):this.calcTop();return t||"0"},hasClickableTiles:function(){return Boolean(this.tiles.find((function(t){return t.tabIndex>-1})))},styles:function(){return{maxHeight:this.calculatedMaxHeight,minWidth:this.calculatedMinWidth,maxWidth:this.calculatedMaxWidth,top:this.calculatedTop,left:this.calculatedLeft,transformOrigin:this.origin,zIndex:this.zIndex||this.activeZIndex}}},watch:{isActive:function(t){t||(this.listIndex=-1)},isContentActive:function(t){this.hasJustFocused=t},listIndex:function(t,e){if(t in this.tiles){var n=this.tiles[t];n.classList.add("v-list-item--highlighted");var i=this.$refs.content.scrollTop,r=this.$refs.content.clientHeight;i>n.offsetTop-8?Object(Y["b"])(n.offsetTop-n.clientHeight,{appOffset:!1,duration:300,container:this.$refs.content}):i+r<n.offsetTop+n.clientHeight+8&&Object(Y["b"])(n.offsetTop-r+2*n.clientHeight,{appOffset:!1,duration:300,container:this.$refs.content})}e in this.tiles&&this.tiles[e].classList.remove("v-list-item--highlighted")}},created:function(){this.$attrs.hasOwnProperty("full-width")&&Object(m["e"])("full-width",this)},mounted:function(){this.isActive&&this.callActivate()},methods:{activate:function(){var t=this;this.updateDimensions(),requestAnimationFrame((function(){t.startTransition().then((function(){t.$refs.content&&(t.calculatedTopAuto=t.calcTopAuto(),t.auto&&(t.$refs.content.scrollTop=t.calcScrollPosition()))}))}))},calcScrollPosition:function(){var t=this.$refs.content,e=t.querySelector(".v-list-item--active"),n=t.scrollHeight-t.offsetHeight;return e?Math.min(n,Math.max(0,e.offsetTop-t.offsetHeight/2+e.offsetHeight/2)):t.scrollTop},calcLeftAuto:function(){return parseInt(this.dimensions.activator.left-2*this.defaultOffset)},calcTopAuto:function(){var t=this.$refs.content,e=t.querySelector(".v-list-item--active");if(e||(this.selectedIndex=null),this.offsetY||!e)return this.computedTop;this.selectedIndex=Array.from(this.tiles).indexOf(e);var n=e.offsetTop-this.calcScrollPosition(),i=t.querySelector(".v-list-item").offsetTop;return this.computedTop-n-i-1},changeListIndex:function(t){if(this.getTiles(),this.isActive&&this.hasClickableTiles)if(t.keyCode!==S["p"].tab){if(t.keyCode===S["p"].down)this.nextTile();else if(t.keyCode===S["p"].up)this.prevTile();else if(t.keyCode===S["p"].end)this.lastTile();else if(t.keyCode===S["p"].home)this.firstTile();else{if(t.keyCode!==S["p"].enter||-1===this.listIndex)return;this.tiles[this.listIndex].click()}t.preventDefault()}else this.isActive=!1},closeConditional:function(t){var e=t.target;return this.isActive&&!this._isDestroyed&&this.closeOnClick&&!this.$refs.content.contains(e)},genActivatorAttributes:function(){var t=k.options.methods.genActivatorAttributes.call(this);return this.activeTile&&this.activeTile.id?Object(o["a"])(Object(o["a"])({},t),{},{"aria-activedescendant":this.activeTile.id}):t},genActivatorListeners:function(){var t=P.options.methods.genActivatorListeners.call(this);return this.disableKeys||(t.keydown=this.onKeyDown),t},genTransition:function(){var t=this.genContent();return this.transition?this.$createElement("transition",{props:{name:this.transition}},[t]):t},genDirectives:function(){var t=this,e=[{name:"show",value:this.isContentActive}];return!this.openOnHover&&this.closeOnClick&&e.push({name:"click-outside",value:{handler:function(){t.isActive=!1},closeConditional:this.closeConditional,include:function(){return[t.$el].concat(Object(y["a"])(t.getOpenDependentElements()))}}}),e},genContent:function(){var t=this,e={attrs:Object(o["a"])(Object(o["a"])({},this.getScopeIdAttrs()),{},{role:"role"in this.$attrs?this.$attrs.role:"menu"}),staticClass:"v-menu__content",class:Object(o["a"])(Object(o["a"])(Object(o["a"])({},this.rootThemeClasses),this.roundedClasses),{},Object(r["a"])({"v-menu__content--auto":this.auto,"v-menu__content--fixed":this.activatorFixed,menuable__content__active:this.isActive},this.contentClass.trim(),!0)),style:this.styles,directives:this.genDirectives(),ref:"content",on:{click:function(e){var n=e.target;n.getAttribute("disabled")||t.closeOnContentClick&&(t.isActive=!1)},keydown:this.onKeyDown}};return this.$listeners.scroll&&(e.on=e.on||{},e.on.scroll=this.$listeners.scroll),!this.disabled&&this.openOnHover&&(e.on=e.on||{},e.on.mouseenter=this.mouseEnterHandler),this.openOnHover&&(e.on=e.on||{},e.on.mouseleave=this.mouseLeaveHandler),this.$createElement("div",e,this.getContentSlot())},getTiles:function(){this.$refs.content&&(this.tiles=Array.from(this.$refs.content.querySelectorAll(".v-list-item, .v-divider, .v-subheader")))},mouseEnterHandler:function(){var t=this;this.runDelay("open",(function(){t.hasJustFocused||(t.hasJustFocused=!0)}))},mouseLeaveHandler:function(t){var e=this;this.runDelay("close",(function(){var n;null!=(n=e.$refs.content)&&n.contains(t.relatedTarget)||requestAnimationFrame((function(){e.isActive=!1,e.callDeactivate()}))}))},nextTile:function(){var t=this.tiles[this.listIndex+1];if(!t){if(!this.tiles.length)return;return this.listIndex=-1,void this.nextTile()}this.listIndex++,-1===t.tabIndex&&this.nextTile()},prevTile:function(){var t=this.tiles[this.listIndex-1];if(!t){if(!this.tiles.length)return;return this.listIndex=this.tiles.length,void this.prevTile()}this.listIndex--,-1===t.tabIndex&&this.prevTile()},lastTile:function(){var t=this.tiles[this.tiles.length-1];t&&(this.listIndex=this.tiles.length-1,-1===t.tabIndex&&this.prevTile())},firstTile:function(){var t=this.tiles[0];t&&(this.listIndex=0,-1===t.tabIndex&&this.nextTile())},onKeyDown:function(t){var e=this;if(t.keyCode===S["p"].esc){setTimeout((function(){e.isActive=!1}));var n=this.getActivator();this.$nextTick((function(){return n&&n.focus()}))}else!this.isActive&&[S["p"].up,S["p"].down].includes(t.keyCode)&&(this.isActive=!0);this.$nextTick((function(){return e.changeListIndex(t)}))},onResize:function(){this.isActive&&(this.$refs.content.offsetWidth,this.updateDimensions(),clearTimeout(this.resizeTimeout),this.resizeTimeout=window.setTimeout(this.updateDimensions,100))}},render:function(t){var e=this,n={staticClass:"v-menu",class:{"v-menu--attached":""===this.attach||!0===this.attach||"attach"===this.attach},directives:[{arg:"500",name:"resize",value:this.onResize}]};return t("div",n,[!this.activator&&this.genActivator(),this.showLazyContent((function(){return[e.$createElement(x,{props:{root:!0,light:e.light,dark:e.dark}},[e.genTransition()])]}))])}}),X=K,J=(n("a15b"),n("cf36"),n("5607")),Q=n("132d"),tt=n("d9f7"),et=O["a"].extend({name:"v-simple-checkbox",functional:!0,directives:{ripple:J["a"]},props:Object(o["a"])(Object(o["a"])(Object(o["a"])({},l["a"].options.props),h["a"].options.props),{},{disabled:Boolean,ripple:{type:Boolean,default:!0},value:Boolean,indeterminate:Boolean,indeterminateIcon:{type:String,default:"$checkboxIndeterminate"},onIcon:{type:String,default:"$checkboxOn"},offIcon:{type:String,default:"$checkboxOff"}}),render:function(t,e){var n=e.props,i=e.data,r=(e.listeners,[]),o=n.offIcon;if(n.indeterminate?o=n.indeterminateIcon:n.value&&(o=n.onIcon),r.push(t(Q["a"],l["a"].options.methods.setTextColor(n.value&&n.color,{props:{disabled:n.disabled,dark:n.dark,light:n.light}}),o)),n.ripple&&!n.disabled){var a=t("div",l["a"].options.methods.setTextColor(n.color,{staticClass:"v-input--selection-controls__ripple",directives:[{name:"ripple",value:{center:!0}}]}));r.push(a)}return t("div",Object(tt["a"])(i,{class:{"v-simple-checkbox":!0,"v-simple-checkbox--disabled":n.disabled},on:{click:function(t){t.stopPropagation(),i.on&&i.on.input&&!n.disabled&&Object(S["v"])(i.on.input).forEach((function(t){return t(!n.value)}))}}}),[t("div",{staticClass:"v-input--selection-controls__input"},r)])}}),nt=(n("8ce9"),h["a"].extend({name:"v-divider",props:{inset:Boolean,vertical:Boolean},render:function(t){var e;return this.$attrs.role&&"separator"!==this.$attrs.role||(e=this.vertical?"vertical":"horizontal"),t("hr",{class:Object(o["a"])({"v-divider":!0,"v-divider--inset":this.inset,"v-divider--vertical":this.vertical},this.themeClasses),attrs:Object(o["a"])({role:"separator","aria-orientation":e},this.$attrs),on:this.$listeners})}})),it=nt,rt=(n("0bc6"),Object(s["a"])(h["a"]).extend({name:"v-subheader",props:{inset:Boolean},render:function(t){return t("div",{staticClass:"v-subheader",class:Object(o["a"])({"v-subheader--inset":this.inset},this.themeClasses),attrs:this.$attrs,on:this.$listeners},this.$slots.default)}})),ot=rt,at=(n("61d2"),Object(s["a"])(l["a"],p["a"],h["a"],Object(f["a"])("listItemGroup"),Object(d["b"])("inputValue"))),st=at.extend().extend({name:"v-list-item",directives:{Ripple:J["a"]},inject:{isInGroup:{default:!1},isInList:{default:!1},isInMenu:{default:!1},isInNav:{default:!1}},inheritAttrs:!1,props:{activeClass:{type:String,default:function(){return this.listItemGroup?this.listItemGroup.activeClass:""}},dense:Boolean,inactive:Boolean,link:Boolean,selectable:{type:Boolean},tag:{type:String,default:"div"},threeLine:Boolean,twoLine:Boolean,value:null},data:function(){return{proxyClass:"v-list-item--active"}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])({"v-list-item":!0},p["a"].options.computed.classes.call(this)),{},{"v-list-item--dense":this.dense,"v-list-item--disabled":this.disabled,"v-list-item--link":this.isClickable&&!this.inactive,"v-list-item--selectable":this.selectable,"v-list-item--three-line":this.threeLine,"v-list-item--two-line":this.twoLine},this.themeClasses)},isClickable:function(){return Boolean(p["a"].options.computed.isClickable.call(this)||this.listItemGroup)}},created:function(){this.$attrs.hasOwnProperty("avatar")&&Object(m["e"])("avatar",this)},methods:{click:function(t){t.detail&&this.$el.blur(),this.$emit("click",t),this.to||this.toggle()},genAttrs:function(){var t=Object(o["a"])({"aria-disabled":!!this.disabled||void 0,tabindex:this.isClickable&&!this.disabled?0:-1},this.$attrs);return this.$attrs.hasOwnProperty("role")||this.isInNav||(this.isInGroup?(t.role="option",t["aria-selected"]=String(this.isActive)):this.isInMenu?(t.role=this.isClickable?"menuitem":void 0,t.id=t.id||"list-item-".concat(this._uid)):this.isInList&&(t.role="listitem")),t},toggle:function(){this.to&&void 0===this.inputValue&&(this.isActive=!this.isActive),this.$emit("change")}},render:function(t){var e=this,n=this.generateRouteLink(),i=n.tag,r=n.data;r.attrs=Object(o["a"])(Object(o["a"])({},r.attrs),this.genAttrs()),r[this.to?"nativeOn":"on"]=Object(o["a"])(Object(o["a"])({},r[this.to?"nativeOn":"on"]),{},{keydown:function(t){t.keyCode===S["p"].enter&&e.click(t),e.$emit("keydown",t)}}),this.inactive&&(i="div"),this.inactive&&this.to&&(r.on=r.nativeOn,delete r.nativeOn);var a=this.$scopedSlots.default?this.$scopedSlots.default({active:this.isActive,toggle:this.toggle}):this.$slots.default;return t(i,this.isActive?this.setTextColor(this.color,r):r,a)}}),ct=O["a"].extend({name:"v-list-item-action",functional:!0,render:function(t,e){var n=e.data,i=e.children,r=void 0===i?[]:i;n.staticClass=n.staticClass?"v-list-item__action ".concat(n.staticClass):"v-list-item__action";var o=r.filter((function(t){return!1===t.isComment&&" "!==t.text}));return o.length>1&&(n.staticClass+=" v-list-item__action--stack"),t("div",n,r)}}),ut=(n("0481"),n("4069"),n("3ad0"),n("8dd9")),lt=ut["a"].extend().extend({name:"v-list",provide:function(){return{isInList:!0,list:this}},inject:{isInMenu:{default:!1},isInNav:{default:!1}},props:{dense:Boolean,disabled:Boolean,expand:Boolean,flat:Boolean,nav:Boolean,rounded:Boolean,subheader:Boolean,threeLine:Boolean,twoLine:Boolean},data:function(){return{groups:[]}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])({},ut["a"].options.computed.classes.call(this)),{},{"v-list--dense":this.dense,"v-list--disabled":this.disabled,"v-list--flat":this.flat,"v-list--nav":this.nav,"v-list--rounded":this.rounded,"v-list--subheader":this.subheader,"v-list--two-line":this.twoLine,"v-list--three-line":this.threeLine})}},methods:{register:function(t){this.groups.push(t)},unregister:function(t){var e=this.groups.findIndex((function(e){return e._uid===t._uid}));e>-1&&this.groups.splice(e,1)},listClick:function(t){if(!this.expand){var e,n=Object(i["a"])(this.groups);try{for(n.s();!(e=n.n()).done;){var r=e.value;r.toggle(t)}}catch(o){n.e(o)}finally{n.f()}}}},render:function(t){var e={staticClass:"v-list",class:this.classes,style:this.styles,attrs:Object(o["a"])({role:this.isInNav||this.isInMenu?void 0:"list"},this.attrs$)};return t(this.tag,this.setBackgroundColor(this.color,e),[this.$slots.default])}}),ft=(n("4d63"),n("c607"),n("2c3e"),n("466d"),n("db42"),O["a"].extend({name:"v-list-item-icon",functional:!0,render:function(t,e){var n=e.data,i=e.children;return n.staticClass="v-list-item__icon ".concat(n.staticClass||"").trim(),t("div",n,i)}})),ht=n("7e2b"),dt=n("3206"),pt=Object(s["a"])(ht["a"],T,l["a"],Object(dt["a"])("list"),d["a"]),vt=(pt.extend().extend({name:"v-list-group",directives:{ripple:J["a"]},props:{activeClass:{type:String,default:""},appendIcon:{type:String,default:"$expand"},color:{type:String,default:"primary"},disabled:Boolean,group:[String,RegExp],noAction:Boolean,prependIcon:String,ripple:{type:[Boolean,Object],default:!0},subGroup:Boolean},computed:{classes:function(){return{"v-list-group--active":this.isActive,"v-list-group--disabled":this.disabled,"v-list-group--no-action":this.noAction,"v-list-group--sub-group":this.subGroup}}},watch:{isActive:function(t){!this.subGroup&&t&&this.list&&this.list.listClick(this._uid)},$route:"onRouteChange"},created:function(){this.list&&this.list.register(this),this.group&&this.$route&&null==this.value&&(this.isActive=this.matchRoute(this.$route.path))},beforeDestroy:function(){this.list&&this.list.unregister(this)},methods:{click:function(t){var e=this;this.disabled||(this.isBooted=!0,this.$emit("click",t),this.$nextTick((function(){return e.isActive=!e.isActive})))},genIcon:function(t){return this.$createElement(u["a"],t)},genAppendIcon:function(){var t=!this.subGroup&&this.appendIcon;return t||this.$slots.appendIcon?this.$createElement(ft,{staticClass:"v-list-group__header__append-icon"},[this.$slots.appendIcon||this.genIcon(t)]):null},genHeader:function(){return this.$createElement(st,{staticClass:"v-list-group__header",attrs:{"aria-expanded":String(this.isActive),role:"button"},class:Object(r["a"])({},this.activeClass,this.isActive),props:{inputValue:this.isActive},directives:[{name:"ripple",value:this.ripple}],on:Object(o["a"])(Object(o["a"])({},this.listeners$),{},{click:this.click})},[this.genPrependIcon(),this.$slots.activator,this.genAppendIcon()])},genItems:function(){var t=this;return this.showLazyContent((function(){return[t.$createElement("div",{staticClass:"v-list-group__items",directives:[{name:"show",value:t.isActive}]},Object(S["l"])(t))]}))},genPrependIcon:function(){var t=this.subGroup&&null==this.prependIcon?"$subgroup":this.prependIcon;return t||this.$slots.prependIcon?this.$createElement(ft,{staticClass:"v-list-group__header__prepend-icon"},[this.$slots.prependIcon||this.genIcon(t)]):null},onRouteChange:function(t){if(this.group){var e=this.matchRoute(t.path);e&&this.isActive!==e&&this.list&&this.list.listClick(this._uid),this.isActive=e}},toggle:function(t){var e=this,n=this._uid===t;n&&(this.isBooted=!0),this.$nextTick((function(){return e.isActive=n}))},matchRoute:function(t){return null!==t.match(this.group)}},render:function(t){return t("div",this.setTextColor(this.isActive&&this.color,{staticClass:"v-list-group",class:this.classes}),[this.genHeader(),t(c["a"],this.genItems())])}}),n("899c"),n("166a"),n("8547")),mt=n("a452"),gt=Object(s["a"])(vt["a"],mt["a"],h["a"]).extend({name:"base-item-group",props:{activeClass:{type:String,default:"v-item--active"},mandatory:Boolean,max:{type:[Number,String],default:null},multiple:Boolean,tag:{type:String,default:"div"}},data:function(){return{internalLazyValue:void 0!==this.value?this.value:this.multiple?[]:void 0,items:[]}},computed:{classes:function(){return Object(o["a"])({"v-item-group":!0},this.themeClasses)},selectedIndex:function(){return this.selectedItem&&this.items.indexOf(this.selectedItem)||-1},selectedItem:function(){if(!this.multiple)return this.selectedItems[0]},selectedItems:function(){var t=this;return this.items.filter((function(e,n){return t.toggleMethod(t.getValue(e,n))}))},selectedValues:function(){return null==this.internalValue?[]:Array.isArray(this.internalValue)?this.internalValue:[this.internalValue]},toggleMethod:function(){var t=this;if(!this.multiple)return function(e){return t.valueComparator(t.internalValue,e)};var e=this.internalValue;return Array.isArray(e)?function(n){return e.some((function(e){return t.valueComparator(e,n)}))}:function(){return!1}}},watch:{internalValue:"updateItemsState",items:"updateItemsState"},created:function(){this.multiple&&!Array.isArray(this.internalValue)&&Object(m["c"])("Model must be bound to an array if the multiple property is true.",this)},methods:{genData:function(){return{class:this.classes}},getValue:function(t,e){return void 0===t.value?e:t.value},onClick:function(t){this.updateInternalValue(this.getValue(t,this.items.indexOf(t)))},register:function(t){var e=this,n=this.items.push(t)-1;t.$on("change",(function(){return e.onClick(t)})),this.mandatory&&!this.selectedValues.length&&this.updateMandatory(),this.updateItem(t,n)},unregister:function(t){if(!this._isDestroyed){var e=this.items.indexOf(t),n=this.getValue(t,e);this.items.splice(e,1);var i=this.selectedValues.indexOf(n);if(!(i<0)){if(!this.mandatory)return this.updateInternalValue(n);this.multiple&&Array.isArray(this.internalValue)?this.internalValue=this.internalValue.filter((function(t){return t!==n})):this.internalValue=void 0,this.selectedItems.length||this.updateMandatory(!0)}}},updateItem:function(t,e){var n=this.getValue(t,e);t.isActive=this.toggleMethod(n)},updateItemsState:function(){var t=this;this.$nextTick((function(){if(t.mandatory&&!t.selectedItems.length)return t.updateMandatory();t.items.forEach(t.updateItem)}))},updateInternalValue:function(t){this.multiple?this.updateMultiple(t):this.updateSingle(t)},updateMandatory:function(t){if(this.items.length){var e=this.items.slice();t&&e.reverse();var n=e.find((function(t){return!t.disabled}));if(n){var i=this.items.indexOf(n);this.updateInternalValue(this.getValue(n,i))}}},updateMultiple:function(t){var e=Array.isArray(this.internalValue)?this.internalValue:[],n=e.slice(),i=n.findIndex((function(e){return e===t}));this.mandatory&&i>-1&&n.length-1<1||null!=this.max&&i<0&&n.length+1>this.max||(i>-1?n.splice(i,1):n.push(t),this.internalValue=n)},updateSingle:function(t){var e=t===this.internalValue;this.mandatory&&e||(this.internalValue=e?void 0:t)}},render:function(t){return t(this.tag,this.genData(),this.$slots.default)}}),bt=(gt.extend({name:"v-item-group",provide:function(){return{itemGroup:this}}}),Object(s["a"])(gt,l["a"]).extend({name:"v-list-item-group",provide:function(){return{isInGroup:!0,listItemGroup:this}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])({},gt.options.computed.classes.call(this)),{},{"v-list-item-group":!0})}},methods:{genData:function(){return this.setTextColor(this.color,Object(o["a"])(Object(o["a"])({},gt.options.methods.genData.call(this)),{},{attrs:{role:"listbox"}}))}}}),n("3408"),n("24b2")),yt=Object(s["a"])(l["a"],bt["a"],R["a"]).extend({name:"v-avatar",props:{left:Boolean,right:Boolean,size:{type:[Number,String],default:48}},computed:{classes:function(){return Object(o["a"])({"v-avatar--left":this.left,"v-avatar--right":this.right},this.roundedClasses)},styles:function(){return Object(o["a"])({height:Object(S["d"])(this.size),minWidth:Object(S["d"])(this.size),width:Object(S["d"])(this.size)},this.measurableStyles)}},render:function(t){var e={staticClass:"v-avatar",class:this.classes,style:this.styles,on:this.$listeners};return t("div",this.setBackgroundColor(this.color,e),this.$slots.default)}}),xt=yt,wt=(xt.extend({name:"v-list-item-avatar",props:{horizontal:Boolean,size:{type:[Number,String],default:40}},computed:{classes:function(){return Object(o["a"])(Object(o["a"])({"v-list-item__avatar--horizontal":this.horizontal},xt.options.computed.classes.call(this)),{},{"v-avatar--tile":this.tile||this.horizontal})}},render:function(t){var e=xt.options.render.call(this,t);return e.data=e.data||{},e.data.staticClass+=" v-list-item__avatar",e}}),Object(S["e"])("v-list-item__action-text","span"),Object(S["e"])("v-list-item__content","div")),Ot=Object(S["e"])("v-list-item__title","div"),_t=(Object(S["e"])("v-list-item__subtitle","div"),Object(s["a"])(l["a"],h["a"]).extend({name:"v-select-list",directives:{ripple:J["a"]},props:{action:Boolean,dense:Boolean,hideSelected:Boolean,items:{type:Array,default:function(){return[]}},itemDisabled:{type:[String,Array,Function],default:"disabled"},itemText:{type:[String,Array,Function],default:"text"},itemValue:{type:[String,Array,Function],default:"value"},noDataText:String,noFilter:Boolean,searchInput:null,selectedItems:{type:Array,default:function(){return[]}}},computed:{parsedItems:function(){var t=this;return this.selectedItems.map((function(e){return t.getValue(e)}))},tileActiveClass:function(){return Object.keys(this.setTextColor(this.color).class||{}).join(" ")},staticNoDataTile:function(){var t={attrs:{role:void 0},on:{mousedown:function(t){return t.preventDefault()}}};return this.$createElement(st,t,[this.genTileContent(this.noDataText)])}},methods:{genAction:function(t,e){var n=this;return this.$createElement(ct,[this.$createElement(et,{props:{color:this.color,value:e,ripple:!1},on:{input:function(){return n.$emit("select",t)}}})])},genDivider:function(t){return this.$createElement(it,{props:t})},genFilteredText:function(t){if(t=t||"",!this.searchInput||this.noFilter)return Object(S["g"])(t);var e=this.getMaskedCharacters(t),n=e.start,i=e.middle,r=e.end;return"".concat(Object(S["g"])(n)).concat(this.genHighlight(i)).concat(Object(S["g"])(r))},genHeader:function(t){return this.$createElement(ot,{props:t},t.header)},genHighlight:function(t){return'<span class="v-list-item__mask">'.concat(Object(S["g"])(t),"</span>")},getMaskedCharacters:function(t){var e=(this.searchInput||"").toString().toLocaleLowerCase(),n=t.toLocaleLowerCase().indexOf(e);if(n<0)return{start:t,middle:"",end:""};var i=t.slice(0,n),r=t.slice(n,n+e.length),o=t.slice(n+e.length);return{start:i,middle:r,end:o}},genTile:function(t){var e=this,n=t.item,i=t.index,r=t.disabled,a=void 0===r?null:r,s=t.value,c=void 0!==s&&s;c||(c=this.hasItem(n)),n===Object(n)&&(a=null!==a?a:this.getDisabled(n));var u={attrs:{"aria-selected":String(c),id:"list-item-".concat(this._uid,"-").concat(i),role:"option"},on:{mousedown:function(t){t.preventDefault()},click:function(){return a||e.$emit("select",n)}},props:{activeClass:this.tileActiveClass,disabled:a,ripple:!0,inputValue:c}};if(!this.$scopedSlots.item)return this.$createElement(st,u,[this.action&&!this.hideSelected&&this.items.length>0?this.genAction(n,c):null,this.genTileContent(n,i)]);var l=this,f=this.$scopedSlots.item({parent:l,item:n,attrs:Object(o["a"])(Object(o["a"])({},u.attrs),u.props),on:u.on});return this.needsTile(f)?this.$createElement(st,u,f):f},genTileContent:function(t){var e=this.genFilteredText(this.getText(t));return this.$createElement(wt,[this.$createElement(Ot,{domProps:{innerHTML:e}})])},hasItem:function(t){return this.parsedItems.indexOf(this.getValue(t))>-1},needsTile:function(t){return 1!==t.length||null==t[0].componentOptions||"v-list-item"!==t[0].componentOptions.Ctor.options.name},getDisabled:function(t){return Boolean(Object(S["k"])(t,this.itemDisabled,!1))},getText:function(t){return String(Object(S["k"])(t,this.itemText,t))},getValue:function(t){return Object(S["k"])(t,this.itemValue,this.getText(t))}},render:function(){for(var t=[],e=this.items.length,n=0;n<e;n++){var i=this.items[n];this.hideSelected&&this.hasItem(i)||(null==i?t.push(this.genTile({item:i,index:n})):i.header?t.push(this.genHeader(i)):i.divider?t.push(this.genDivider(i)):t.push(this.genTile({item:i,index:n})))}return t.length||t.push(this.$slots["no-data"]||this.staticNoDataTile),this.$slots["prepend-item"]&&t.unshift(this.$slots["prepend-item"]),this.$slots["append-item"]&&t.push(this.$slots["append-item"]),this.$createElement(lt,{staticClass:"v-select-list",class:this.themeClasses,attrs:{role:"listbox",tabindex:-1},props:{dense:this.dense}},t)}})),St=n("c37a"),Ct=n("8654"),kt=O["a"].extend({name:"filterable",props:{noDataText:{type:String,default:"$vuetify.noDataText"}}}),jt={closeOnClick:!1,closeOnContentClick:!1,disableKeys:!0,openOnClick:!1,maxHeight:304},$t=Object(s["a"])(Ct["a"],vt["a"],$,kt);e["a"]=$t.extend().extend({name:"v-select",directives:{ClickOutside:q},props:{appendIcon:{type:String,default:"$dropdown"},attach:{type:null,default:!1},cacheItems:Boolean,chips:Boolean,clearable:Boolean,deletableChips:Boolean,disableLookup:Boolean,eager:Boolean,hideSelected:Boolean,items:{type:Array,default:function(){return[]}},itemColor:{type:String,default:"primary"},itemDisabled:{type:[String,Array,Function],default:"disabled"},itemText:{type:[String,Array,Function],default:"text"},itemValue:{type:[String,Array,Function],default:"value"},menuProps:{type:[String,Array,Object],default:function(){return jt}},multiple:Boolean,openOnClear:Boolean,returnObject:Boolean,smallChips:Boolean},data:function(){return{cachedItems:this.cacheItems?this.items:[],menuIsBooted:!1,isMenuActive:!1,lastItem:20,lazyValue:void 0!==this.value?this.value:this.multiple?[]:void 0,selectedIndex:-1,selectedItems:[],keyboardLookupPrefix:"",keyboardLookupLastTime:0}},computed:{allItems:function(){return this.filterDuplicates(this.cachedItems.concat(this.items))},classes:function(){return Object(o["a"])(Object(o["a"])({},Ct["a"].options.computed.classes.call(this)),{},{"v-select":!0,"v-select--chips":this.hasChips,"v-select--chips--small":this.smallChips,"v-select--is-menu-active":this.isMenuActive,"v-select--is-multi":this.multiple})},computedItems:function(){return this.allItems},computedOwns:function(){return"list-".concat(this._uid)},computedCounterValue:function(){var t=this.multiple?this.selectedItems:(this.getText(this.selectedItems[0])||"").toString();return"function"===typeof this.counterValue?this.counterValue(t):t.length},directives:function(){var t=this;return this.isFocused?[{name:"click-outside",value:{handler:this.blur,closeConditional:this.closeConditional,include:function(){return t.getOpenDependentElements()}}}]:void 0},dynamicHeight:function(){return"auto"},hasChips:function(){return this.chips||this.smallChips},hasSlot:function(){return Boolean(this.hasChips||this.$scopedSlots.selection)},isDirty:function(){return this.selectedItems.length>0},listData:function(){var t=this.$vnode&&this.$vnode.context.$options._scopeId,e=t?Object(r["a"])({},t,!0):{};return{attrs:Object(o["a"])(Object(o["a"])({},e),{},{id:this.computedOwns}),props:{action:this.multiple,color:this.itemColor,dense:this.dense,hideSelected:this.hideSelected,items:this.virtualizedItems,itemDisabled:this.itemDisabled,itemText:this.itemText,itemValue:this.itemValue,noDataText:this.$vuetify.lang.t(this.noDataText),selectedItems:this.selectedItems},on:{select:this.selectItem},scopedSlots:{item:this.$scopedSlots.item}}},staticList:function(){return(this.$slots["no-data"]||this.$slots["prepend-item"]||this.$slots["append-item"])&&Object(m["b"])("assert: staticList should not be called if slots are used"),this.$createElement(_t,this.listData)},virtualizedItems:function(){return this.$_menuProps.auto?this.computedItems:this.computedItems.slice(0,this.lastItem)},menuCanShow:function(){return!0},$_menuProps:function(){var t="string"===typeof this.menuProps?this.menuProps.split(","):this.menuProps;return Array.isArray(t)&&(t=t.reduce((function(t,e){return t[e.trim()]=!0,t}),{})),Object(o["a"])(Object(o["a"])({},jt),{},{eager:this.eager,value:this.menuCanShow&&this.isMenuActive,nudgeBottom:t.offsetY?1:0},t)}},watch:{internalValue:function(t){var e=this;this.initialValue=t,this.setSelectedItems(),this.multiple&&this.$nextTick((function(){var t;null==(t=e.$refs.menu)||t.updateDimensions()}))},isMenuActive:function(t){var e=this;window.setTimeout((function(){return e.onMenuActiveChange(t)}))},items:{immediate:!0,handler:function(t){var e=this;this.cacheItems&&this.$nextTick((function(){e.cachedItems=e.filterDuplicates(e.cachedItems.concat(t))})),this.setSelectedItems()}}},methods:{blur:function(t){Ct["a"].options.methods.blur.call(this,t),this.isMenuActive=!1,this.isFocused=!1,this.selectedIndex=-1,this.setMenuIndex(-1)},activateMenu:function(){this.isInteractive&&!this.isMenuActive&&(this.isMenuActive=!0)},clearableCallback:function(){var t=this;this.setValue(this.multiple?[]:null),this.setMenuIndex(-1),this.$nextTick((function(){return t.$refs.input&&t.$refs.input.focus()})),this.openOnClear&&(this.isMenuActive=!0)},closeConditional:function(t){return!this.isMenuActive||!this._isDestroyed&&(!this.getContent()||!this.getContent().contains(t.target))&&this.$el&&!this.$el.contains(t.target)&&t.target!==this.$el},filterDuplicates:function(t){for(var e=new Map,n=0;n<t.length;++n){var i=t[n];if(null!=i)if(i.header||i.divider)e.set(i,i);else{var r=this.getValue(i);!e.has(r)&&e.set(r,i)}}return Array.from(e.values())},findExistingIndex:function(t){var e=this,n=this.getValue(t);return(this.internalValue||[]).findIndex((function(t){return e.valueComparator(e.getValue(t),n)}))},getContent:function(){return this.$refs.menu&&this.$refs.menu.$refs.content},genChipSelection:function(t,e){var n=this,i=this.isDisabled||this.getDisabled(t),r=!i&&this.isInteractive;return this.$createElement(b,{staticClass:"v-chip--select",attrs:{tabindex:-1},props:{close:this.deletableChips&&r,disabled:i,inputValue:e===this.selectedIndex,small:this.smallChips},on:{click:function(t){r&&(t.stopPropagation(),n.selectedIndex=e)},"click:close":function(){return n.onChipInput(t)}},key:JSON.stringify(this.getValue(t))},this.getText(t))},genCommaSelection:function(t,e,n){var i=e===this.selectedIndex&&this.computedColor,r=this.isDisabled||this.getDisabled(t);return this.$createElement("div",this.setTextColor(i,{staticClass:"v-select__selection v-select__selection--comma",class:{"v-select__selection--disabled":r},key:JSON.stringify(this.getValue(t))}),"".concat(this.getText(t)).concat(n?"":", "))},genDefaultSlot:function(){var t=this.genSelections(),e=this.genInput();return Array.isArray(t)?t.push(e):(t.children=t.children||[],t.children.push(e)),[this.genFieldset(),this.$createElement("div",{staticClass:"v-select__slot",directives:this.directives},[this.genLabel(),this.prefix?this.genAffix("prefix"):null,t,this.suffix?this.genAffix("suffix"):null,this.genClearIcon(),this.genIconSlot(),this.genHiddenInput()]),this.genMenu(),this.genProgress()]},genIcon:function(t,e,n){var i=St["a"].options.methods.genIcon.call(this,t,e,n);return"append"===t&&(i.children[0].data=Object(tt["a"])(i.children[0].data,{attrs:{tabindex:i.children[0].componentOptions.listeners&&"-1","aria-hidden":"true","aria-label":void 0}})),i},genInput:function(){var t=Ct["a"].options.methods.genInput.call(this);return delete t.data.attrs.name,t.data=Object(tt["a"])(t.data,{domProps:{value:null},attrs:{readonly:!0,type:"text","aria-readonly":String(this.isReadonly),"aria-activedescendant":Object(S["j"])(this.$refs.menu,"activeTile.id"),autocomplete:Object(S["j"])(t.data,"attrs.autocomplete","off"),placeholder:this.isDirty||!this.persistentPlaceholder&&!this.isFocused&&this.hasLabel?void 0:this.placeholder},on:{keypress:this.onKeyPress}}),t},genHiddenInput:function(){return this.$createElement("input",{domProps:{value:this.lazyValue},attrs:{type:"hidden",name:this.attrs$.name}})},genInputSlot:function(){var t=Ct["a"].options.methods.genInputSlot.call(this);return t.data.attrs=Object(o["a"])(Object(o["a"])({},t.data.attrs),{},{role:"button","aria-haspopup":"listbox","aria-expanded":String(this.isMenuActive),"aria-owns":this.computedOwns}),t},genList:function(){return this.$slots["no-data"]||this.$slots["prepend-item"]||this.$slots["append-item"]?this.genListWithSlot():this.staticList},genListWithSlot:function(){var t=this,e=["prepend-item","no-data","append-item"].filter((function(e){return t.$slots[e]})).map((function(e){return t.$createElement("template",{slot:e},t.$slots[e])}));return this.$createElement(_t,Object(o["a"])({},this.listData),e)},genMenu:function(){var t=this,e=this.$_menuProps;return e.activator=this.$refs["input-slot"],""===this.attach||!0===this.attach||"attach"===this.attach?e.attach=this.$el:e.attach=this.attach,this.$createElement(X,{attrs:{role:void 0},props:e,on:{input:function(e){t.isMenuActive=e,t.isFocused=e},scroll:this.onScroll},ref:"menu"},[this.genList()])},genSelections:function(){var t,e=this.selectedItems.length,n=new Array(e);t=this.$scopedSlots.selection?this.genSlotSelection:this.hasChips?this.genChipSelection:this.genCommaSelection;while(e--)n[e]=t(this.selectedItems[e],e,e===n.length-1);return this.$createElement("div",{staticClass:"v-select__selections"},n)},genSlotSelection:function(t,e){var n=this;return this.$scopedSlots.selection({attrs:{class:"v-chip--select"},parent:this,item:t,index:e,select:function(t){t.stopPropagation(),n.selectedIndex=e},selected:e===this.selectedIndex,disabled:!this.isInteractive})},getMenuIndex:function(){return this.$refs.menu?this.$refs.menu.listIndex:-1},getDisabled:function(t){return Object(S["k"])(t,this.itemDisabled,!1)},getText:function(t){return Object(S["k"])(t,this.itemText,t)},getValue:function(t){return Object(S["k"])(t,this.itemValue,this.getText(t))},onBlur:function(t){t&&this.$emit("blur",t)},onChipInput:function(t){this.multiple?this.selectItem(t):this.setValue(null),0===this.selectedItems.length?this.isMenuActive=!0:this.isMenuActive=!1,this.selectedIndex=-1},onClick:function(t){this.isInteractive&&(this.isAppendInner(t.target)||(this.isMenuActive=!0),this.isFocused||(this.isFocused=!0,this.$emit("focus")),this.$emit("click",t))},onEscDown:function(t){t.preventDefault(),this.isMenuActive&&(t.stopPropagation(),this.isMenuActive=!1)},onKeyPress:function(t){var e=this;if(!this.multiple&&this.isInteractive&&!this.disableLookup){var n=1e3,i=performance.now();i-this.keyboardLookupLastTime>n&&(this.keyboardLookupPrefix=""),this.keyboardLookupPrefix+=t.key.toLowerCase(),this.keyboardLookupLastTime=i;var r=this.allItems.findIndex((function(t){var n=(e.getText(t)||"").toString();return n.toLowerCase().startsWith(e.keyboardLookupPrefix)})),o=this.allItems[r];-1!==r&&(this.lastItem=Math.max(this.lastItem,r+5),this.setValue(this.returnObject?o:this.getValue(o)),this.$nextTick((function(){return e.$refs.menu.getTiles()})),setTimeout((function(){return e.setMenuIndex(r)})))}},onKeyDown:function(t){var e=this;if(!this.isReadonly||t.keyCode===S["p"].tab){var n=t.keyCode,i=this.$refs.menu;if(this.$emit("keydown",t),i)return this.isMenuActive&&[S["p"].up,S["p"].down,S["p"].home,S["p"].end,S["p"].enter].includes(n)&&this.$nextTick((function(){i.changeListIndex(t),e.$emit("update:list-index",i.listIndex)})),[S["p"].enter,S["p"].space].includes(n)&&this.activateMenu(),!this.isMenuActive&&[S["p"].up,S["p"].down,S["p"].home,S["p"].end].includes(n)?this.onUpDown(t):n===S["p"].esc?this.onEscDown(t):n===S["p"].tab?this.onTabDown(t):n===S["p"].space?this.onSpaceDown(t):void 0}},onMenuActiveChange:function(t){if(!(this.multiple&&!t||this.getMenuIndex()>-1)){var e=this.$refs.menu;if(e&&this.isDirty){this.$refs.menu.getTiles();for(var n=0;n<e.tiles.length;n++)if("true"===e.tiles[n].getAttribute("aria-selected")){this.setMenuIndex(n);break}}}},onMouseUp:function(t){var e=this;this.hasMouseDown&&3!==t.which&&this.isInteractive&&this.isAppendInner(t.target)&&this.$nextTick((function(){return e.isMenuActive=!e.isMenuActive})),Ct["a"].options.methods.onMouseUp.call(this,t)},onScroll:function(){var t=this;if(this.isMenuActive){if(this.lastItem>this.computedItems.length)return;var e=this.getContent().scrollHeight-(this.getContent().scrollTop+this.getContent().clientHeight)<200;e&&(this.lastItem+=20)}else requestAnimationFrame((function(){return t.getContent().scrollTop=0}))},onSpaceDown:function(t){t.preventDefault()},onTabDown:function(t){var e=this.$refs.menu;if(e){var n=e.activeTile;!this.multiple&&n&&this.isMenuActive?(t.preventDefault(),t.stopPropagation(),n.click()):this.blur(t)}},onUpDown:function(t){var e=this,n=this.$refs.menu;if(n){if(t.preventDefault(),this.multiple)return this.activateMenu();var i=t.keyCode;n.isBooted=!0,window.requestAnimationFrame((function(){if(n.getTiles(),!n.hasClickableTiles)return e.activateMenu();switch(i){case S["p"].up:n.prevTile();break;case S["p"].down:n.nextTile();break;case S["p"].home:n.firstTile();break;case S["p"].end:n.lastTile();break}e.selectItem(e.allItems[e.getMenuIndex()])}))}},selectItem:function(t){var e=this;if(this.multiple){var n=(this.internalValue||[]).slice(),i=this.findExistingIndex(t);if(-1!==i?n.splice(i,1):n.push(t),this.setValue(n.map((function(t){return e.returnObject?t:e.getValue(t)}))),this.hideSelected)this.setMenuIndex(-1);else{var r=this.allItems.indexOf(t);~r&&(this.$nextTick((function(){return e.$refs.menu.getTiles()})),setTimeout((function(){return e.setMenuIndex(r)})))}}else this.setValue(this.returnObject?t:this.getValue(t)),this.isMenuActive=!1},setMenuIndex:function(t){this.$refs.menu&&(this.$refs.menu.listIndex=t)},setSelectedItems:function(){var t,e=this,n=[],r=this.multiple&&Array.isArray(this.internalValue)?this.internalValue:[this.internalValue],o=Object(i["a"])(r);try{var a=function(){var i=t.value,r=e.allItems.findIndex((function(t){return e.valueComparator(e.getValue(t),e.getValue(i))}));r>-1&&n.push(e.allItems[r])};for(o.s();!(t=o.n()).done;)a()}catch(s){o.e(s)}finally{o.f()}this.selectedItems=n},setValue:function(t){this.valueComparator(t,this.internalValue)||(this.internalValue=t,this.$emit("change",t))},isAppendInner:function(t){var e=this.$refs["append-inner"];return e&&(e===t||e.contains(t))}}})},b980:function(t,e,n){var i=n("d039"),r=n("5c6c");t.exports=!i((function(){var t=Error("a");return!("stack"in t)||(Object.defineProperty(t,"stack",r(1,7)),7!==t.stack)}))},ba87:function(t,e,n){"use strict";var i=n("5530"),r=(n("a9e3"),n("1b2c"),n("a9ad")),o=n("7560"),a=n("58df"),s=n("80d2"),c=Object(a["a"])(o["a"]).extend({name:"v-label",functional:!0,props:{absolute:Boolean,color:{type:String,default:"primary"},disabled:Boolean,focused:Boolean,for:String,left:{type:[Number,String],default:0},right:{type:[Number,String],default:"auto"},value:Boolean},render:function(t,e){var n=e.children,a=e.listeners,c=e.props,u={staticClass:"v-label",class:Object(i["a"])({"v-label--active":c.value,"v-label--is-disabled":c.disabled},Object(o["b"])(e)),attrs:{for:c.for,"aria-hidden":!c.for},on:a,style:{left:Object(s["d"])(c.left),right:Object(s["d"])(c.right),position:c.absolute?"absolute":"relative"},ref:"label"};return t("label",r["a"].options.methods.setTextColor(c.focused&&c.color,u),n)}});e["a"]=c},bb2f:function(t,e,n){var i=n("d039");t.exports=!i((function(){return Object.isExtensible(Object.preventExtensions({}))}))},bc3a:function(t,e,n){t.exports=n("cee4")},bd0c:function(t,e,n){},bee2:function(t,e,n){"use strict";function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function r(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),Object.defineProperty(t,"prototype",{writable:!1}),t}n.d(e,"a",(function(){return r}))},c04e:function(t,e,n){var i=n("da84"),r=n("c65b"),o=n("861d"),a=n("d9b5"),s=n("dc4a"),c=n("485a"),u=n("b622"),l=i.TypeError,f=u("toPrimitive");t.exports=function(t,e){if(!o(t)||a(t))return t;var n,i=s(t,f);if(i){if(void 0===e&&(e="default"),n=r(i,t,e),!o(n)||a(n))return n;throw l("Can't convert object to primitive value")}return void 0===e&&(e="number"),c(t,e)}},c345:function(t,e,n){"use strict";var i=n("c532"),r=["age","authorization","content-length","content-type","etag","expires","from","host","if-modified-since","if-unmodified-since","last-modified","location","max-forwards","proxy-authorization","referer","retry-after","user-agent"];t.exports=function(t){var e,n,o,a={};return t?(i.forEach(t.split("\n"),(function(t){if(o=t.indexOf(":"),e=i.trim(t.substr(0,o)).toLowerCase(),n=i.trim(t.substr(o+1)),e){if(a[e]&&r.indexOf(e)>=0)return;a[e]="set-cookie"===e?(a[e]?a[e]:[]).concat([n]):a[e]?a[e]+", "+n:n}})),a):a}},c37a:function(t,e,n){"use strict";var i=n("5530"),r=(n("a9e3"),n("4de4"),n("d3b7"),n("d81d"),n("ac1f"),n("1276"),n("99af"),n("d191"),n("9d26")),o=n("ba87"),a=(n("8ff2"),n("a9ad")),s=n("7560"),c=n("58df"),u=n("80d2"),l=Object(c["a"])(a["a"],s["a"]).extend({name:"v-messages",props:{value:{type:Array,default:function(){return[]}}},methods:{genChildren:function(){return this.$createElement("transition-group",{staticClass:"v-messages__wrapper",attrs:{name:"message-transition",tag:"div"}},this.value.map(this.genMessage))},genMessage:function(t,e){return this.$createElement("div",{staticClass:"v-messages__message",key:e},Object(u["l"])(this,"default",{message:t,key:e})||[t])}},render:function(t){return t("div",this.setTextColor(this.color,{staticClass:"v-messages",class:this.themeClasses}),[this.genChildren()])}}),f=l,h=n("7e2b"),d=n("38cb"),p=n("d9f7"),v=Object(c["a"])(h["a"],d["a"]),m=v.extend().extend({name:"v-input",inheritAttrs:!1,props:{appendIcon:String,backgroundColor:{type:String,default:""},dense:Boolean,height:[Number,String],hideDetails:[Boolean,String],hideSpinButtons:Boolean,hint:String,id:String,label:String,loading:Boolean,persistentHint:Boolean,prependIcon:String,value:null},data:function(){return{lazyValue:this.value,hasMouseDown:!1}},computed:{classes:function(){return Object(i["a"])({"v-input--has-state":this.hasState,"v-input--hide-details":!this.showDetails,"v-input--is-label-active":this.isLabelActive,"v-input--is-dirty":this.isDirty,"v-input--is-disabled":this.isDisabled,"v-input--is-focused":this.isFocused,"v-input--is-loading":!1!==this.loading&&null!=this.loading,"v-input--is-readonly":this.isReadonly,"v-input--dense":this.dense,"v-input--hide-spin-buttons":this.hideSpinButtons},this.themeClasses)},computedId:function(){return this.id||"input-".concat(this._uid)},hasDetails:function(){return this.messagesToDisplay.length>0},hasHint:function(){return!this.hasMessages&&!!this.hint&&(this.persistentHint||this.isFocused)},hasLabel:function(){return!(!this.$slots.label&&!this.label)},internalValue:{get:function(){return this.lazyValue},set:function(t){this.lazyValue=t,this.$emit(this.$_modelEvent,t)}},isDirty:function(){return!!this.lazyValue},isLabelActive:function(){return this.isDirty},messagesToDisplay:function(){var t=this;return this.hasHint?[this.hint]:this.hasMessages?this.validations.map((function(e){if("string"===typeof e)return e;var n=e(t.internalValue);return"string"===typeof n?n:""})).filter((function(t){return""!==t})):[]},showDetails:function(){return!1===this.hideDetails||"auto"===this.hideDetails&&this.hasDetails}},watch:{value:function(t){this.lazyValue=t}},beforeCreate:function(){this.$_modelEvent=this.$options.model&&this.$options.model.event||"input"},methods:{genContent:function(){return[this.genPrependSlot(),this.genControl(),this.genAppendSlot()]},genControl:function(){return this.$createElement("div",{staticClass:"v-input__control",attrs:{title:this.attrs$.title}},[this.genInputSlot(),this.genMessages()])},genDefaultSlot:function(){return[this.genLabel(),this.$slots.default]},genIcon:function(t,e){var n=this,i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=this["".concat(t,"Icon")],a="click:".concat(Object(u["o"])(t)),s=!(!this.listeners$[a]&&!e),c=Object(p["a"])({attrs:{"aria-label":s?Object(u["o"])(t).split("-")[0]+" icon":void 0,color:this.validationState,dark:this.dark,disabled:this.isDisabled,light:this.light},on:s?{click:function(t){t.preventDefault(),t.stopPropagation(),n.$emit(a,t),e&&e(t)},mouseup:function(t){t.preventDefault(),t.stopPropagation()}}:void 0},i);return this.$createElement("div",{staticClass:"v-input__icon",class:t?"v-input__icon--".concat(Object(u["o"])(t)):void 0},[this.$createElement(r["a"],c,o)])},genInputSlot:function(){return this.$createElement("div",this.setBackgroundColor(this.backgroundColor,{staticClass:"v-input__slot",style:{height:Object(u["d"])(this.height)},on:{click:this.onClick,mousedown:this.onMouseDown,mouseup:this.onMouseUp},ref:"input-slot"}),[this.genDefaultSlot()])},genLabel:function(){return this.hasLabel?this.$createElement(o["a"],{props:{color:this.validationState,dark:this.dark,disabled:this.isDisabled,focused:this.hasState,for:this.computedId,light:this.light}},this.$slots.label||this.label):null},genMessages:function(){var t=this;return this.showDetails?this.$createElement(f,{props:{color:this.hasHint?"":this.validationState,dark:this.dark,light:this.light,value:this.messagesToDisplay},attrs:{role:this.hasMessages?"alert":null},scopedSlots:{default:function(e){return Object(u["l"])(t,"message",e)}}}):null},genSlot:function(t,e,n){if(!n.length)return null;var i="".concat(t,"-").concat(e);return this.$createElement("div",{staticClass:"v-input__".concat(i),ref:i},n)},genPrependSlot:function(){var t=[];return this.$slots.prepend?t.push(this.$slots.prepend):this.prependIcon&&t.push(this.genIcon("prepend")),this.genSlot("prepend","outer",t)},genAppendSlot:function(){var t=[];return this.$slots.append?t.push(this.$slots.append):this.appendIcon&&t.push(this.genIcon("append")),this.genSlot("append","outer",t)},onClick:function(t){this.$emit("click",t)},onMouseDown:function(t){this.hasMouseDown=!0,this.$emit("mousedown",t)},onMouseUp:function(t){this.hasMouseDown=!1,this.$emit("mouseup",t)}},render:function(t){return t("div",this.setTextColor(this.validationState,{staticClass:"v-input",class:this.classes}),this.genContent())}});e["a"]=m},c401:function(t,e,n){"use strict";var i=n("c532");t.exports=function(t,e,n){return i.forEach(n,(function(n){t=n(t,e)})),t}},c430:function(t,e){t.exports=!1},c513:function(t,e,n){var i=n("23e7"),r=n("1a2d"),o=n("d9b5"),a=n("0d51"),s=n("5692"),c=n("3d87"),u=s("symbol-to-string-registry");i({target:"Symbol",stat:!0,forced:!c},{keyFor:function(t){if(!o(t))throw TypeError(a(t)+" is not a symbol");if(r(u,t))return u[t]}})},c532:function(t,e,n){"use strict";var i=n("1d2b"),r=n("c7ce"),o=Object.prototype.toString;function a(t){return"[object Array]"===o.call(t)}function s(t){return"[object ArrayBuffer]"===o.call(t)}function c(t){return"undefined"!==typeof FormData&&t instanceof FormData}function u(t){var e;return e="undefined"!==typeof ArrayBuffer&&ArrayBuffer.isView?ArrayBuffer.isView(t):t&&t.buffer&&t.buffer instanceof ArrayBuffer,e}function l(t){return"string"===typeof t}function f(t){return"number"===typeof t}function h(t){return"undefined"===typeof t}function d(t){return null!==t&&"object"===typeof t}function p(t){return"[object Date]"===o.call(t)}function v(t){return"[object File]"===o.call(t)}function m(t){return"[object Blob]"===o.call(t)}function g(t){return"[object Function]"===o.call(t)}function b(t){return d(t)&&g(t.pipe)}function y(t){return"undefined"!==typeof URLSearchParams&&t instanceof URLSearchParams}function x(t){return t.replace(/^\s*/,"").replace(/\s*$/,"")}function w(){return("undefined"===typeof navigator||"ReactNative"!==navigator.product)&&("undefined"!==typeof window&&"undefined"!==typeof document)}function O(t,e){if(null!==t&&"undefined"!==typeof t)if("object"!==typeof t&&(t=[t]),a(t))for(var n=0,i=t.length;n<i;n++)e.call(null,t[n],n,t);else for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&e.call(null,t[r],r,t)}function _(){var t={};function e(e,n){"object"===typeof t[n]&&"object"===typeof e?t[n]=_(t[n],e):t[n]=e}for(var n=0,i=arguments.length;n<i;n++)O(arguments[n],e);return t}function S(t,e,n){return O(e,(function(e,r){t[r]=n&&"function"===typeof e?i(e,n):e})),t}t.exports={isArray:a,isArrayBuffer:s,isBuffer:r,isFormData:c,isArrayBufferView:u,isString:l,isNumber:f,isObject:d,isUndefined:h,isDate:p,isFile:v,isBlob:m,isFunction:g,isStream:b,isURLSearchParams:y,isStandardBrowserEnv:w,forEach:O,merge:_,extend:S,trim:x}},c607:function(t,e,n){var i=n("da84"),r=n("83ab"),o=n("fce3"),a=n("c6b6"),s=n("edd0"),c=n("69f3").get,u=RegExp.prototype,l=i.TypeError;r&&o&&s(u,"dotAll",{configurable:!0,get:function(){if(this!==u){if("RegExp"===a(this))return!!c(this).dotAll;throw l("Incompatible receiver, RegExp required")}}})},c65b:function(t,e,n){var i=n("40d5"),r=Function.prototype.call;t.exports=i?r.bind(r):function(){return r.apply(r,arguments)}},c6b6:function(t,e,n){var i=n("e330"),r=i({}.toString),o=i("".slice);t.exports=function(t){return o(r(t),8,-1)}},c6cd:function(t,e,n){var i=n("da84"),r=n("ce4e"),o="__core-js_shared__",a=i[o]||r(o,{});t.exports=a},c740:function(t,e,n){"use strict";var i=n("23e7"),r=n("b727").findIndex,o=n("44d2"),a="findIndex",s=!0;a in[]&&Array(1)[a]((function(){s=!1})),i({target:"Array",proto:!0,forced:s},{findIndex:function(t){return r(this,t,arguments.length>1?arguments[1]:void 0)}}),o(a)},c770:function(t,e,n){var i=n("e330"),r=Error,o=i("".replace),a=function(t){return String(r(t).stack)}("zxcasd"),s=/\n\s*at [^:]*:[^\n]*/,c=s.test(a);t.exports=function(t,e){if(c&&"string"==typeof t&&!r.prepareStackTrace)while(e--)t=o(t,s,"");return t}},c7cd:function(t,e,n){"use strict";var i=n("23e7"),r=n("857a"),o=n("af03");i({target:"String",proto:!0,forced:o("fixed")},{fixed:function(){return r(this,"tt","","")}})},c7ce:function(t,e){ -/*! - * Determine if an object is a Buffer - * - * @author Feross Aboukhadijeh <https://feross.org> - * @license MIT - */ -t.exports=function(t){return null!=t&&null!=t.constructor&&"function"===typeof t.constructor.isBuffer&&t.constructor.isBuffer(t)}},c8af:function(t,e,n){"use strict";var i=n("c532");t.exports=function(t,e){i.forEach(t,(function(n,i){i!==e&&i.toUpperCase()===e.toUpperCase()&&(t[e]=n,delete t[i])}))}},c8ba:function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(i){"object"===typeof window&&(n=window)}t.exports=n},c8d2:function(t,e,n){var i=n("5e77").PROPER,r=n("d039"),o=n("5899"),a="​…᠎";t.exports=function(t){return r((function(){return!!o[t]()||a[t]()!==a||i&&o[t].name!==t}))}},c96a:function(t,e,n){"use strict";var i=n("23e7"),r=n("857a"),o=n("af03");i({target:"String",proto:!0,forced:o("small")},{small:function(){return r(this,"small","","")}})},c995:function(t,e,n){"use strict";var i=n("ade3"),r=(n("a9e3"),n("2b0e"));e["a"]=r["a"].extend({name:"elevatable",props:{elevation:[Number,String]},computed:{computedElevation:function(){return this.elevation},elevationClasses:function(){var t=this.computedElevation;return null==t||isNaN(parseInt(t))?{}:Object(i["a"])({},"elevation-".concat(this.elevation),!0)}}})},ca71:function(t,e,n){},ca84:function(t,e,n){var i=n("e330"),r=n("1a2d"),o=n("fc6a"),a=n("4d64").indexOf,s=n("d012"),c=i([].push);t.exports=function(t,e){var n,i=o(t),u=0,l=[];for(n in i)!r(s,n)&&r(i,n)&&c(l,n);while(e.length>u)r(i,n=e[u++])&&(~a(l,n)||c(l,n));return l}},caad:function(t,e,n){"use strict";var i=n("23e7"),r=n("4d64").includes,o=n("d039"),a=n("44d2"),s=o((function(){return!Array(1).includes()}));i({target:"Array",proto:!0,forced:s},{includes:function(t){return r(this,t,arguments.length>1?arguments[1]:void 0)}}),a("includes")},cb29:function(t,e,n){var i=n("23e7"),r=n("81d5"),o=n("44d2");i({target:"Array",proto:!0},{fill:r}),o("fill")},cb2d:function(t,e,n){var i=n("da84"),r=n("1626"),o=n("9112"),a=n("13d2"),s=n("ce4e");t.exports=function(t,e,n,c){var u=!!c&&!!c.unsafe,l=!!c&&!!c.enumerable,f=!!c&&!!c.noTargetGet,h=c&&void 0!==c.name?c.name:e;return r(n)&&a(n,h,c),t===i?(l?t[e]=n:s(e,n),t):(u?!f&&t[e]&&(l=!0):delete t[e],l?t[e]=n:o(t,e,n),t)}},cc12:function(t,e,n){var i=n("da84"),r=n("861d"),o=i.document,a=r(o)&&r(o.createElement);t.exports=function(t){return a?o.createElement(t):{}}},cc98:function(t,e,n){"use strict";var i=n("23e7"),r=n("c430"),o=n("4738").CONSTRUCTOR,a=n("d256"),s=n("d066"),c=n("1626"),u=n("cb2d"),l=a&&a.prototype;if(i({target:"Promise",proto:!0,forced:o,real:!0},{catch:function(t){return this.then(void 0,t)}}),!r&&c(a)){var f=s("Promise").prototype["catch"];l["catch"]!==f&&u(l,"catch",f,{unsafe:!0})}},cca6:function(t,e,n){var i=n("23e7"),r=n("60da");i({target:"Object",stat:!0,arity:2,forced:Object.assign!==r},{assign:r})},cdf9:function(t,e,n){var i=n("825a"),r=n("861d"),o=n("f069");t.exports=function(t,e){if(i(t),r(e)&&e.constructor===t)return e;var n=o.f(t),a=n.resolve;return a(e),n.promise}},ce4e:function(t,e,n){var i=n("da84"),r=Object.defineProperty;t.exports=function(t,e){try{r(i,t,{value:e,configurable:!0,writable:!0})}catch(n){i[t]=e}return e}},cee4:function(t,e,n){"use strict";var i=n("c532"),r=n("1d2b"),o=n("0a06"),a=n("2444");function s(t){var e=new o(t),n=r(o.prototype.request,e);return i.extend(n,o.prototype,e),i.extend(n,e),n}var c=s(a);c.Axios=o,c.create=function(t){return s(i.merge(a,t))},c.Cancel=n("7a77"),c.CancelToken=n("8df4"),c.isCancel=n("2e67"),c.all=function(t){return Promise.all(t)},c.spread=n("0df6"),t.exports=c,t.exports.default=c},cf36:function(t,e,n){},d012:function(t,e){t.exports={}},d039:function(t,e){t.exports=function(t){try{return!!t()}catch(e){return!0}}},d066:function(t,e,n){var i=n("da84"),r=n("1626"),o=function(t){return r(t)?t:void 0};t.exports=function(t,e){return arguments.length<2?o(i[t]):i[t]&&i[t][e]}},d10f:function(t,e,n){"use strict";var i=n("2b0e");e["a"]=i["a"].extend({name:"ssr-bootable",data:function(){return{isBooted:!1}},mounted:function(){var t=this;window.requestAnimationFrame((function(){t.$el.setAttribute("data-booted","true"),t.isBooted=!0}))}})},d191:function(t,e,n){},d1e7:function(t,e,n){"use strict";var i={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,o=r&&!i.call({1:2},1);e.f=o?function(t){var e=r(this,t);return!!e&&e.enumerable}:i},d256:function(t,e,n){var i=n("da84");t.exports=i.Promise},d28b:function(t,e,n){var i=n("746f");i("iterator")},d2bb:function(t,e,n){var i=n("e330"),r=n("825a"),o=n("3bbe");t.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var t,e=!1,n={};try{t=i(Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set),t(n,[]),e=n instanceof Array}catch(a){}return function(n,i){return r(n),o(i),e?t(n,i):n.__proto__=i,n}}():void 0)},d3b7:function(t,e,n){var i=n("00ee"),r=n("cb2d"),o=n("b041");i||r(Object.prototype,"toString",o,{unsafe:!0})},d44e:function(t,e,n){var i=n("9bf2").f,r=n("1a2d"),o=n("b622"),a=o("toStringTag");t.exports=function(t,e,n){t&&!n&&(t=t.prototype),t&&!r(t,a)&&i(t,a,{configurable:!0,value:e})}},d4c3:function(t,e,n){var i=n("342f"),r=n("da84");t.exports=/ipad|iphone|ipod/i.test(i)&&void 0!==r.Pebble},d4ec:function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));n("d9e2");function i(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}},d6d6:function(t,e,n){var i=n("da84"),r=i.TypeError;t.exports=function(t,e){if(t<e)throw r("Not enough arguments");return t}},d784:function(t,e,n){"use strict";n("ac1f");var i=n("e330"),r=n("cb2d"),o=n("9263"),a=n("d039"),s=n("b622"),c=n("9112"),u=s("species"),l=RegExp.prototype;t.exports=function(t,e,n,f){var h=s(t),d=!a((function(){var e={};return e[h]=function(){return 7},7!=""[t](e)})),p=d&&!a((function(){var e=!1,n=/a/;return"split"===t&&(n={},n.constructor={},n.constructor[u]=function(){return n},n.flags="",n[h]=/./[h]),n.exec=function(){return e=!0,null},n[h](""),!e}));if(!d||!p||n){var v=i(/./[h]),m=e(h,""[t],(function(t,e,n,r,a){var s=i(t),c=e.exec;return c===o||c===l.exec?d&&!a?{done:!0,value:v(e,n,r)}:{done:!0,value:s(n,e,r)}:{done:!1}}));r(String.prototype,t,m[0]),r(l,h,m[1])}f&&c(l[h],"sham",!0)}},d81d:function(t,e,n){"use strict";var i=n("23e7"),r=n("b727").map,o=n("1dde"),a=o("map");i({target:"Array",proto:!0,forced:!a},{map:function(t){return r(this,t,arguments.length>1?arguments[1]:void 0)}})},d86b:function(t,e,n){var i=n("d039");t.exports=i((function(){if("function"==typeof ArrayBuffer){var t=new ArrayBuffer(8);Object.isExtensible(t)&&Object.defineProperty(t,"a",{value:8})}}))},d925:function(t,e,n){"use strict";t.exports=function(t){return/^([a-z][a-z\d\+\-\.]*:)?\/\//i.test(t)}},d998:function(t,e,n){var i=n("342f");t.exports=/MSIE|Trident/.test(i)},d9b5:function(t,e,n){var i=n("da84"),r=n("d066"),o=n("1626"),a=n("3a9b"),s=n("fdbf"),c=i.Object;t.exports=s?function(t){return"symbol"==typeof t}:function(t){var e=r("Symbol");return o(e)&&a(e.prototype,c(t))}},d9bd:function(t,e,n){"use strict";n.d(e,"c",(function(){return o})),n.d(e,"b",(function(){return a})),n.d(e,"d",(function(){return s})),n.d(e,"a",(function(){return c})),n.d(e,"e",(function(){return u}));n("caad"),n("2532"),n("99af"),n("ac1f"),n("5319"),n("b0c0"),n("466d"),n("a15b"),n("d81d"),n("38cf");var i=n("f309");function r(t,e,n){if(!i["a"].config.silent){if(n&&(e={_isVue:!0,$parent:n,$options:e}),e){if(e.$_alreadyWarned=e.$_alreadyWarned||[],e.$_alreadyWarned.includes(t))return;e.$_alreadyWarned.push(t)}return"[Vuetify] ".concat(t)+(e?d(e):"")}}function o(t,e,n){var i=r(t,e,n);null!=i&&console.warn(i)}function a(t,e,n){var i=r(t,e,n);null!=i&&console.error(i)}function s(t,e,n,i){o("[UPGRADE] '".concat(t,"' is deprecated, use '").concat(e,"' instead."),n,i)}function c(t,e,n,i){a("[BREAKING] '".concat(t,"' has been removed, use '").concat(e,"' instead. For more information, see the upgrade guide https://github.com/vuetifyjs/vuetify/releases/tag/v2.0.0#user-content-upgrade-guide"),n,i)}function u(t,e,n){o("[REMOVED] '".concat(t,"' has been removed. You can safely omit it."),e,n)}var l=/(?:^|[-_])(\w)/g,f=function(t){return t.replace(l,(function(t){return t.toUpperCase()})).replace(/[-_]/g,"")};function h(t,e){if(t.$root===t)return"<Root>";var n="function"===typeof t&&null!=t.cid?t.options:t._isVue?t.$options||t.constructor.options:t||{},i=n.name||n._componentTag,r=n.__file;if(!i&&r){var o=r.match(/([^/\\]+)\.vue$/);i=o&&o[1]}return(i?"<".concat(f(i),">"):"<Anonymous>")+(r&&!1!==e?" at ".concat(r):"")}function d(t){if(t._isVue&&t.$parent){var e=[],n=0;while(t){if(e.length>0){var i=e[e.length-1];if(i.constructor===t.constructor){n++,t=t.$parent;continue}n>0&&(e[e.length-1]=[i,n],n=0)}e.push(t),t=t.$parent}return"\n\nfound in\n\n"+e.map((function(t,e){return"".concat(0===e?"---\x3e ":" ".repeat(5+2*e)).concat(Array.isArray(t)?"".concat(h(t[0]),"... (").concat(t[1]," recursive calls)"):h(t))})).join("\n")}return"\n\n(found in ".concat(h(t),")")}},d9e2:function(t,e,n){var i=n("23e7"),r=n("da84"),o=n("2ba4"),a=n("e5cb"),s="WebAssembly",c=r[s],u=7!==Error("e",{cause:7}).cause,l=function(t,e){var n={};n[t]=a(t,e,u),i({global:!0,constructor:!0,arity:1,forced:u},n)},f=function(t,e){if(c&&c[t]){var n={};n[t]=a(s+"."+t,e,u),i({target:s,stat:!0,constructor:!0,arity:1,forced:u},n)}};l("Error",(function(t){return function(e){return o(t,this,arguments)}})),l("EvalError",(function(t){return function(e){return o(t,this,arguments)}})),l("RangeError",(function(t){return function(e){return o(t,this,arguments)}})),l("ReferenceError",(function(t){return function(e){return o(t,this,arguments)}})),l("SyntaxError",(function(t){return function(e){return o(t,this,arguments)}})),l("TypeError",(function(t){return function(e){return o(t,this,arguments)}})),l("URIError",(function(t){return function(e){return o(t,this,arguments)}})),f("CompileError",(function(t){return function(e){return o(t,this,arguments)}})),f("LinkError",(function(t){return function(e){return o(t,this,arguments)}})),f("RuntimeError",(function(t){return function(e){return o(t,this,arguments)}}))},d9f5:function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("c65b"),a=n("e330"),s=n("c430"),c=n("83ab"),u=n("4930"),l=n("d039"),f=n("1a2d"),h=n("3a9b"),d=n("825a"),p=n("fc6a"),v=n("a04b"),m=n("577e"),g=n("5c6c"),b=n("7c73"),y=n("df75"),x=n("241c"),w=n("057f"),O=n("7418"),_=n("06cf"),S=n("9bf2"),C=n("37e8"),k=n("d1e7"),j=n("cb2d"),$=n("5692"),A=n("f772"),E=n("d012"),T=n("90e3"),L=n("b622"),I=n("e538"),B=n("746f"),M=n("57b9"),P=n("d44e"),D=n("69f3"),R=n("b727").forEach,N=A("hidden"),V="Symbol",F="prototype",z=D.set,H=D.getterFor(V),W=Object[F],U=r.Symbol,q=U&&U[F],G=r.TypeError,Y=r.QObject,Z=_.f,K=S.f,X=w.f,J=k.f,Q=a([].push),tt=$("symbols"),et=$("op-symbols"),nt=$("wks"),it=!Y||!Y[F]||!Y[F].findChild,rt=c&&l((function(){return 7!=b(K({},"a",{get:function(){return K(this,"a",{value:7}).a}})).a}))?function(t,e,n){var i=Z(W,e);i&&delete W[e],K(t,e,n),i&&t!==W&&K(W,e,i)}:K,ot=function(t,e){var n=tt[t]=b(q);return z(n,{type:V,tag:t,description:e}),c||(n.description=e),n},at=function(t,e,n){t===W&&at(et,e,n),d(t);var i=v(e);return d(n),f(tt,i)?(n.enumerable?(f(t,N)&&t[N][i]&&(t[N][i]=!1),n=b(n,{enumerable:g(0,!1)})):(f(t,N)||K(t,N,g(1,{})),t[N][i]=!0),rt(t,i,n)):K(t,i,n)},st=function(t,e){d(t);var n=p(e),i=y(n).concat(ht(n));return R(i,(function(e){c&&!o(ut,n,e)||at(t,e,n[e])})),t},ct=function(t,e){return void 0===e?b(t):st(b(t),e)},ut=function(t){var e=v(t),n=o(J,this,e);return!(this===W&&f(tt,e)&&!f(et,e))&&(!(n||!f(this,e)||!f(tt,e)||f(this,N)&&this[N][e])||n)},lt=function(t,e){var n=p(t),i=v(e);if(n!==W||!f(tt,i)||f(et,i)){var r=Z(n,i);return!r||!f(tt,i)||f(n,N)&&n[N][i]||(r.enumerable=!0),r}},ft=function(t){var e=X(p(t)),n=[];return R(e,(function(t){f(tt,t)||f(E,t)||Q(n,t)})),n},ht=function(t){var e=t===W,n=X(e?et:p(t)),i=[];return R(n,(function(t){!f(tt,t)||e&&!f(W,t)||Q(i,tt[t])})),i};u||(U=function(){if(h(q,this))throw G("Symbol is not a constructor");var t=arguments.length&&void 0!==arguments[0]?m(arguments[0]):void 0,e=T(t),n=function(t){this===W&&o(n,et,t),f(this,N)&&f(this[N],e)&&(this[N][e]=!1),rt(this,e,g(1,t))};return c&&it&&rt(W,e,{configurable:!0,set:n}),ot(e,t)},q=U[F],j(q,"toString",(function(){return H(this).tag})),j(U,"withoutSetter",(function(t){return ot(T(t),t)})),k.f=ut,S.f=at,C.f=st,_.f=lt,x.f=w.f=ft,O.f=ht,I.f=function(t){return ot(L(t),t)},c&&(K(q,"description",{configurable:!0,get:function(){return H(this).description}}),s||j(W,"propertyIsEnumerable",ut,{unsafe:!0}))),i({global:!0,constructor:!0,wrap:!0,forced:!u,sham:!u},{Symbol:U}),R(y(nt),(function(t){B(t)})),i({target:V,stat:!0,forced:!u},{useSetter:function(){it=!0},useSimple:function(){it=!1}}),i({target:"Object",stat:!0,forced:!u,sham:!c},{create:ct,defineProperty:at,defineProperties:st,getOwnPropertyDescriptor:lt}),i({target:"Object",stat:!0,forced:!u},{getOwnPropertyNames:ft}),M(),P(U,V),E[N]=!0},d9f7:function(t,e,n){"use strict";n.d(e,"a",(function(){return u}));var i=n("5530"),r=n("3835"),o=n("b85c"),a=(n("ac1f"),n("1276"),n("498a"),n("b64b"),n("99af"),n("80d2")),s={styleList:/;(?![^(]*\))/g,styleProp:/:(.*)/};function c(t){var e,n={},i=Object(o["a"])(t.split(s.styleList));try{for(i.s();!(e=i.n()).done;){var c=e.value,u=c.split(s.styleProp),l=Object(r["a"])(u,2),f=l[0],h=l[1];f=f.trim(),f&&("string"===typeof h&&(h=h.trim()),n[Object(a["a"])(f)]=h)}}catch(d){i.e(d)}finally{i.f()}return n}function u(){var t,e={},n=arguments.length;while(n--)for(var r=0,o=Object.keys(arguments[n]);r<o.length;r++)switch(t=o[r],t){case"class":case"directives":arguments[n][t]&&(e[t]=f(e[t],arguments[n][t]));break;case"style":arguments[n][t]&&(e[t]=l(e[t],arguments[n][t]));break;case"staticClass":if(!arguments[n][t])break;void 0===e[t]&&(e[t]=""),e[t]&&(e[t]+=" "),e[t]+=arguments[n][t].trim();break;case"on":case"nativeOn":arguments[n][t]&&(e[t]=h(e[t],arguments[n][t]));break;case"attrs":case"props":case"domProps":case"scopedSlots":case"staticStyle":case"hook":case"transition":if(!arguments[n][t])break;e[t]||(e[t]={}),e[t]=Object(i["a"])(Object(i["a"])({},arguments[n][t]),e[t]);break;default:e[t]||(e[t]=arguments[n][t])}return e}function l(t,e){return t?e?(t=Object(a["v"])("string"===typeof t?c(t):t),t.concat("string"===typeof e?c(e):e)):t:e}function f(t,e){return e?t&&t?Object(a["v"])(t).concat(e):e:t}function h(){if(!(arguments.length<=0?void 0:arguments[0]))return arguments.length<=1?void 0:arguments[1];if(!(arguments.length<=1?void 0:arguments[1]))return arguments.length<=0?void 0:arguments[0];for(var t={},e=2;e--;){var n=e<0||arguments.length<=e?void 0:arguments[e];for(var i in n)n[i]&&(t[i]?t[i]=[].concat(n[i],t[i]):t[i]=n[i])}return t}},da84:function(t,e,n){(function(e){var n=function(t){return t&&t.Math==Math&&t};t.exports=n("object"==typeof globalThis&&globalThis)||n("object"==typeof window&&window)||n("object"==typeof self&&self)||n("object"==typeof e&&e)||function(){return this}()||Function("return this")()}).call(this,n("c8ba"))},db42:function(t,e,n){},dbb4:function(t,e,n){var i=n("23e7"),r=n("83ab"),o=n("56ef"),a=n("fc6a"),s=n("06cf"),c=n("8418");i({target:"Object",stat:!0,sham:!r},{getOwnPropertyDescriptors:function(t){var e,n,i=a(t),r=s.f,u=o(i),l={},f=0;while(u.length>f)n=r(i,e=u[f++]),void 0!==n&&c(l,e,n);return l}})},dc22:function(t,e,n){"use strict";function i(t,e,n){var i=e.value,r=e.options||{passive:!0};window.addEventListener("resize",i,r),t._onResize=Object(t._onResize),t._onResize[n.context._uid]={callback:i,options:r},e.modifiers&&e.modifiers.quiet||i()}function r(t,e,n){var i;if(null!=(i=t._onResize)&&i[n.context._uid]){var r=t._onResize[n.context._uid],o=r.callback,a=r.options;window.removeEventListener("resize",o,a),delete t._onResize[n.context._uid]}}var o={inserted:i,unbind:r};e["a"]=o},dc4a:function(t,e,n){var i=n("59ed");t.exports=function(t,e){var n=t[e];return null==n?void 0:i(n)}},dca8:function(t,e,n){var i=n("23e7"),r=n("bb2f"),o=n("d039"),a=n("861d"),s=n("f183").onFreeze,c=Object.freeze,u=o((function(){c(1)}));i({target:"Object",stat:!0,forced:u,sham:!r},{freeze:function(t){return c&&a(t)?c(s(t)):t}})},dd89:function(t,e,n){"use strict";function i(t){if("function"!==typeof t.getRootNode){while(t.parentNode)t=t.parentNode;return t!==document?null:document}var e=t.getRootNode();return e!==document&&e.getRootNode({composed:!0})!==document?null:e}n.d(e,"a",(function(){return i}))},ddb0:function(t,e,n){var i=n("da84"),r=n("fdbc"),o=n("785a"),a=n("e260"),s=n("9112"),c=n("b622"),u=c("iterator"),l=c("toStringTag"),f=a.values,h=function(t,e){if(t){if(t[u]!==f)try{s(t,u,f)}catch(i){t[u]=f}if(t[l]||s(t,l,e),r[e])for(var n in a)if(t[n]!==a[n])try{s(t,n,a[n])}catch(i){t[n]=a[n]}}};for(var d in r)h(i[d]&&i[d].prototype,d);h(o,"DOMTokenList")},df75:function(t,e,n){var i=n("ca84"),r=n("7839");t.exports=Object.keys||function(t){return i(t,r)}},df7c:function(t,e,n){(function(t){function n(t,e){for(var n=0,i=t.length-1;i>=0;i--){var r=t[i];"."===r?t.splice(i,1):".."===r?(t.splice(i,1),n++):n&&(t.splice(i,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function i(t){"string"!==typeof t&&(t+="");var e,n=0,i=-1,r=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!r){n=e+1;break}}else-1===i&&(r=!1,i=e+1);return-1===i?"":t.slice(n,i)}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],i=0;i<t.length;i++)e(t[i],i,t)&&n.push(t[i]);return n}e.resolve=function(){for(var e="",i=!1,o=arguments.length-1;o>=-1&&!i;o--){var a=o>=0?arguments[o]:t.cwd();if("string"!==typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(e=a+"/"+e,i="/"===a.charAt(0))}return e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"),(i?"/":"")+e||"."},e.normalize=function(t){var i=e.isAbsolute(t),a="/"===o(t,-1);return t=n(r(t.split("/"),(function(t){return!!t})),!i).join("/"),t||i||(t="."),t&&a&&(t+="/"),(i?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!==typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function i(t){for(var e=0;e<t.length;e++)if(""!==t[e])break;for(var n=t.length-1;n>=0;n--)if(""!==t[n])break;return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var r=i(t.split("/")),o=i(n.split("/")),a=Math.min(r.length,o.length),s=a,c=0;c<a;c++)if(r[c]!==o[c]){s=c;break}var u=[];for(c=s;c<r.length;c++)u.push("..");return u=u.concat(o.slice(s)),u.join("/")},e.sep="/",e.delimiter=":",e.dirname=function(t){if("string"!==typeof t&&(t+=""),0===t.length)return".";for(var e=t.charCodeAt(0),n=47===e,i=-1,r=!0,o=t.length-1;o>=1;--o)if(e=t.charCodeAt(o),47===e){if(!r){i=o;break}}else r=!1;return-1===i?n?"/":".":n&&1===i?"/":t.slice(0,i)},e.basename=function(t,e){var n=i(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!==typeof t&&(t+="");for(var e=-1,n=0,i=-1,r=!0,o=0,a=t.length-1;a>=0;--a){var s=t.charCodeAt(a);if(47!==s)-1===i&&(r=!1,i=a+1),46===s?-1===e?e=a:1!==o&&(o=1):-1!==e&&(o=-1);else if(!r){n=a+1;break}}return-1===e||-1===i||0===o||1===o&&e===i-1&&e===n+1?"":t.slice(e,i)};var o="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n("4362"))},df86:function(t,e,n){},e01a:function(t,e,n){"use strict";var i=n("23e7"),r=n("83ab"),o=n("da84"),a=n("e330"),s=n("1a2d"),c=n("1626"),u=n("3a9b"),l=n("577e"),f=n("9bf2").f,h=n("e893"),d=o.Symbol,p=d&&d.prototype;if(r&&c(d)&&(!("description"in p)||void 0!==d().description)){var v={},m=function(){var t=arguments.length<1||void 0===arguments[0]?void 0:l(arguments[0]),e=u(p,this)?new d(t):void 0===t?d():d(t);return""===t&&(v[e]=!0),e};h(m,d),m.prototype=p,p.constructor=m;var g="Symbol(test)"==String(d("test")),b=a(p.toString),y=a(p.valueOf),x=/^Symbol\((.*)\)[^)]+$/,w=a("".replace),O=a("".slice);f(p,"description",{configurable:!0,get:function(){var t=y(this),e=b(t);if(s(v,t))return"";var n=g?O(e,7,-1):w(e,x,"$1");return""===n?void 0:n}}),i({global:!0,constructor:!0,forced:!0},{Symbol:m})}},e163:function(t,e,n){var i=n("da84"),r=n("1a2d"),o=n("1626"),a=n("7b0b"),s=n("f772"),c=n("e177"),u=s("IE_PROTO"),l=i.Object,f=l.prototype;t.exports=c?l.getPrototypeOf:function(t){var e=a(t);if(r(e,u))return e[u];var n=e.constructor;return o(n)&&e instanceof n?n.prototype:e instanceof l?f:null}},e177:function(t,e,n){var i=n("d039");t.exports=!i((function(){function t(){}return t.prototype.constructor=null,Object.getPrototypeOf(new t)!==t.prototype}))},e260:function(t,e,n){"use strict";var i=n("fc6a"),r=n("44d2"),o=n("3f8c"),a=n("69f3"),s=n("9bf2").f,c=n("7dd0"),u=n("c430"),l=n("83ab"),f="Array Iterator",h=a.set,d=a.getterFor(f);t.exports=c(Array,"Array",(function(t,e){h(this,{type:f,target:i(t),index:0,kind:e})}),(function(){var t=d(this),e=t.target,n=t.kind,i=t.index++;return!e||i>=e.length?(t.target=void 0,{value:void 0,done:!0}):"keys"==n?{value:i,done:!1}:"values"==n?{value:e[i],done:!1}:{value:[i,e[i]],done:!1}}),"values");var p=o.Arguments=o.Array;if(r("keys"),r("values"),r("entries"),!u&&l&&"values"!==p.name)try{s(p,"name",{value:"values"})}catch(v){}},e330:function(t,e,n){var i=n("40d5"),r=Function.prototype,o=r.bind,a=r.call,s=i&&o.bind(a,a);t.exports=i?function(t){return t&&s(t)}:function(t){return t&&function(){return a.apply(t,arguments)}}},e391:function(t,e,n){var i=n("577e");t.exports=function(t,e){return void 0===t?arguments.length<2?"":e:i(t)}},e439:function(t,e,n){var i=n("23e7"),r=n("d039"),o=n("fc6a"),a=n("06cf").f,s=n("83ab"),c=r((function(){a(1)})),u=!s||c;i({target:"Object",stat:!0,forced:u,sham:!s},{getOwnPropertyDescriptor:function(t,e){return a(o(t),e)}})},e538:function(t,e,n){var i=n("b622");e.f=i},e5cb:function(t,e,n){"use strict";var i=n("d066"),r=n("1a2d"),o=n("9112"),a=n("3a9b"),s=n("d2bb"),c=n("e893"),u=n("aeb0"),l=n("7156"),f=n("e391"),h=n("ab36"),d=n("c770"),p=n("b980"),v=n("83ab"),m=n("c430");t.exports=function(t,e,n,g){var b="stackTraceLimit",y=g?2:1,x=t.split("."),w=x[x.length-1],O=i.apply(null,x);if(O){var _=O.prototype;if(!m&&r(_,"cause")&&delete _.cause,!n)return O;var S=i("Error"),C=e((function(t,e){var n=f(g?e:t,void 0),i=g?new O(t):new O;return void 0!==n&&o(i,"message",n),p&&o(i,"stack",d(i.stack,2)),this&&a(_,this)&&l(i,this,C),arguments.length>y&&h(i,arguments[y]),i}));if(C.prototype=_,"Error"!==w?s?s(C,S):c(C,S,{name:!0}):v&&b in O&&(u(C,O,b),u(C,O,"prepareStackTrace")),c(C,O),!m)try{_.name!==w&&o(_,"name",w),_.constructor=C}catch(k){}return C}}},e667:function(t,e){t.exports=function(t){try{return{error:!1,value:t()}}catch(e){return{error:!0,value:e}}}},e683:function(t,e,n){"use strict";t.exports=function(t,e){return e?t.replace(/\/+$/,"")+"/"+e.replace(/^\/+/,""):t}},e6cf:function(t,e,n){n("5e7e"),n("14e5"),n("cc98"),n("3529"),n("f22b"),n("7149")},e893:function(t,e,n){var i=n("1a2d"),r=n("56ef"),o=n("06cf"),a=n("9bf2");t.exports=function(t,e,n){for(var s=r(e),c=a.f,u=o.f,l=0;l<s.length;l++){var f=s[l];i(t,f)||n&&i(n,f)||c(t,f,u(e,f))}}},e8b5:function(t,e,n){var i=n("c6b6");t.exports=Array.isArray||function(t){return"Array"==i(t)}},e95a:function(t,e,n){var i=n("b622"),r=n("3f8c"),o=i("iterator"),a=Array.prototype;t.exports=function(t){return void 0!==t&&(r.Array===t||a[o]===t)}},e9b1:function(t,e,n){},e9c4:function(t,e,n){var i=n("23e7"),r=n("d066"),o=n("2ba4"),a=n("c65b"),s=n("e330"),c=n("d039"),u=n("e8b5"),l=n("1626"),f=n("861d"),h=n("d9b5"),d=n("f36a"),p=n("4930"),v=r("JSON","stringify"),m=s(/./.exec),g=s("".charAt),b=s("".charCodeAt),y=s("".replace),x=s(1..toString),w=/[\uD800-\uDFFF]/g,O=/^[\uD800-\uDBFF]$/,_=/^[\uDC00-\uDFFF]$/,S=!p||c((function(){var t=r("Symbol")();return"[null]"!=v([t])||"{}"!=v({a:t})||"{}"!=v(Object(t))})),C=c((function(){return'"\\udf06\\ud834"'!==v("\udf06\ud834")||'"\\udead"'!==v("\udead")})),k=function(t,e){var n=d(arguments),i=e;if((f(e)||void 0!==t)&&!h(t))return u(e)||(e=function(t,e){if(l(i)&&(e=a(i,this,t,e)),!h(e))return e}),n[1]=e,o(v,null,n)},j=function(t,e,n){var i=g(n,e-1),r=g(n,e+1);return m(O,t)&&!m(_,r)||m(_,t)&&!m(O,i)?"\\u"+x(b(t,0),16):t};v&&i({target:"JSON",stat:!0,arity:3,forced:S||C},{stringify:function(t,e,n){var i=d(arguments),r=o(S?k:v,null,i);return C&&"string"==typeof r?y(r,w,j):r}})},ec29:function(t,e,n){},edd0:function(t,e,n){var i=n("13d2"),r=n("9bf2");t.exports=function(t,e,n){return n.get&&i(n.get,e,{getter:!0}),n.set&&i(n.set,e,{setter:!0}),r.f(t,e,n)}},ee6f:function(t,e,n){},f069:function(t,e,n){"use strict";var i=n("59ed"),r=function(t){var e,n;this.promise=new t((function(t,i){if(void 0!==e||void 0!==n)throw TypeError("Bad Promise constructor");e=t,n=i})),this.resolve=i(e),this.reject=i(n)};t.exports.f=function(t){return new r(t)}},f183:function(t,e,n){var i=n("23e7"),r=n("e330"),o=n("d012"),a=n("861d"),s=n("1a2d"),c=n("9bf2").f,u=n("241c"),l=n("057f"),f=n("4fad"),h=n("90e3"),d=n("bb2f"),p=!1,v=h("meta"),m=0,g=function(t){c(t,v,{value:{objectID:"O"+m++,weakData:{}}})},b=function(t,e){if(!a(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!s(t,v)){if(!f(t))return"F";if(!e)return"E";g(t)}return t[v].objectID},y=function(t,e){if(!s(t,v)){if(!f(t))return!0;if(!e)return!1;g(t)}return t[v].weakData},x=function(t){return d&&p&&f(t)&&!s(t,v)&&g(t),t},w=function(){O.enable=function(){},p=!0;var t=u.f,e=r([].splice),n={};n[v]=1,t(n).length&&(u.f=function(n){for(var i=t(n),r=0,o=i.length;r<o;r++)if(i[r]===v){e(i,r,1);break}return i},i({target:"Object",stat:!0,forced:!0},{getOwnPropertyNames:l.f}))},O=t.exports={enable:w,fastKey:b,getWeakData:y,onFreeze:x};o[v]=!0},f22b:function(t,e,n){"use strict";var i=n("23e7"),r=n("c65b"),o=n("f069"),a=n("4738").CONSTRUCTOR;i({target:"Promise",stat:!0,forced:a},{reject:function(t){var e=o.f(this);return r(e.reject,void 0,t),e.promise}})},f2e7:function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var i=n("ade3"),r=n("2b0e");function o(){var t,e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"value",n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"input";return r["a"].extend({name:"toggleable",model:{prop:e,event:n},props:Object(i["a"])({},e,{required:!1}),data:function(){return{isActive:!!this[e]}},watch:(t={},Object(i["a"])(t,e,(function(t){this.isActive=!!t})),Object(i["a"])(t,"isActive",(function(t){!!t!==this[e]&&this.$emit(n,t)})),t)})}var a=o();e["a"]=a},f309:function(t,e,n){"use strict";n.d(e,"a",(function(){return at}));var i=n("d4ec"),r=n("bee2"),o=(n("d3b7"),n("159b"),n("caad"),n("2532"),n("2b0e")),a=n("d9bd");function s(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(!s.installed){s.installed=!0,o["a"]!==t&&Object(a["b"])("Multiple instances of Vue detected\nSee https://github.com/vuetifyjs/vuetify/issues/4068\n\nIf you're seeing \"$attrs is readonly\", it's caused by this");var n=e.components||{},i=e.directives||{};for(var r in i){var c=i[r];t.directive(r,c)}(function e(n){if(n){for(var i in n){var r=n[i];r&&!e(r.$_vuetify_subcomponents)&&t.component(i,r)}return!0}return!1})(n),t.$_vuetify_installed||(t.$_vuetify_installed=!0,t.mixin({beforeCreate:function(){var e=this.$options;e.vuetify?(e.vuetify.init(this,this.$ssrContext),this.$vuetify=t.observable(e.vuetify.framework)):this.$vuetify=e.parent&&e.parent.$vuetify||this},beforeMount:function(){this.$options.vuetify&&this.$el&&this.$el.hasAttribute("data-server-rendered")&&(this.$vuetify.isHydrating=!0,this.$vuetify.breakpoint.update(!0))},mounted:function(){this.$options.vuetify&&this.$vuetify.isHydrating&&(this.$vuetify.isHydrating=!1,this.$vuetify.breakpoint.update())}}))}}var c=n("15fd"),u=n("262e"),l=n("2caf"),f=(n("95ed"),{badge:"Badge",close:"Close",dataIterator:{noResultsText:"No matching records found",loadingText:"Loading items..."},dataTable:{itemsPerPageText:"Rows per page:",ariaLabel:{sortDescending:"Sorted descending.",sortAscending:"Sorted ascending.",sortNone:"Not sorted.",activateNone:"Activate to remove sorting.",activateDescending:"Activate to sort descending.",activateAscending:"Activate to sort ascending."},sortBy:"Sort by"},dataFooter:{itemsPerPageText:"Items per page:",itemsPerPageAll:"All",nextPage:"Next page",prevPage:"Previous page",firstPage:"First page",lastPage:"Last page",pageText:"{0}-{1} of {2}"},datePicker:{itemsSelected:"{0} selected",nextMonthAriaLabel:"Next month",nextYearAriaLabel:"Next year",prevMonthAriaLabel:"Previous month",prevYearAriaLabel:"Previous year"},noDataText:"No data available",carousel:{prev:"Previous visual",next:"Next visual",ariaLabel:{delimiter:"Carousel slide {0} of {1}"}},calendar:{moreEvents:"{0} more"},fileInput:{counter:"{0} files",counterSize:"{0} files ({1} in total)"},timePicker:{am:"AM",pm:"PM"},pagination:{ariaLabel:{wrapper:"Pagination Navigation",next:"Next page",previous:"Previous page",page:"Goto Page {0}",currentPage:"Current Page, Page {0}"}},rating:{ariaLabel:{icon:"Rating {0} of {1}"}}}),h={breakpoint:{mobileBreakpoint:1264,scrollBarWidth:16,thresholds:{xs:600,sm:960,md:1280,lg:1920}},icons:{iconfont:"mdi",values:{}},lang:{current:"en",locales:{en:f},t:void 0},rtl:!1,theme:{dark:!1,default:"light",disable:!1,options:{cspNonce:void 0,customProperties:void 0,minifyTheme:void 0,themeCache:void 0,variations:!0},themes:{light:{primary:"#1976D2",secondary:"#424242",accent:"#82B1FF",error:"#FF5252",info:"#2196F3",success:"#4CAF50",warning:"#FB8C00"},dark:{primary:"#2196F3",secondary:"#424242",accent:"#FF4081",error:"#FF5252",info:"#2196F3",success:"#4CAF50",warning:"#FB8C00"}}}},d=n("80d2"),p=n("fff9"),v=["preset"],m=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(t,r){var o;Object(i["a"])(this,n),o=e.call(this);var s=Object(d["r"])({},h),u=r.userPreset,l=u.preset,f=void 0===l?{}:l,p=Object(c["a"])(u,v);return null!=f.preset&&Object(a["c"])("Global presets do not support the **preset** option, it can be safely omitted"),r.preset=Object(d["r"])(Object(d["r"])(s,f),p),o}return Object(r["a"])(n)}(p["a"]);m.property="presets";n("07ac");var g=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(){var t;return Object(i["a"])(this,n),t=e.apply(this,arguments),t.bar=0,t.top=0,t.left=0,t.insetFooter=0,t.right=0,t.bottom=0,t.footer=0,t.application={bar:{},top:{},left:{},insetFooter:{},right:{},bottom:{},footer:{}},t}return Object(r["a"])(n,[{key:"register",value:function(t,e,n){this.application[e][t]=n,this.update(e)}},{key:"unregister",value:function(t,e){null!=this.application[e][t]&&(delete this.application[e][t],this.update(e))}},{key:"update",value:function(t){this[t]=Object.values(this.application[t]).reduce((function(t,e){return t+e}),0)}}]),n}(p["a"]);g.property="application";n("b0c0");var b=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(t){var r;Object(i["a"])(this,n),r=e.call(this),r.xs=!1,r.sm=!1,r.md=!1,r.lg=!1,r.xl=!1,r.xsOnly=!1,r.smOnly=!1,r.smAndDown=!1,r.smAndUp=!1,r.mdOnly=!1,r.mdAndDown=!1,r.mdAndUp=!1,r.lgOnly=!1,r.lgAndDown=!1,r.lgAndUp=!1,r.xlOnly=!1,r.name="xs",r.height=0,r.width=0,r.mobile=!0,r.resizeTimeout=0;var o=t[n.property],a=o.mobileBreakpoint,s=o.scrollBarWidth,c=o.thresholds;return r.mobileBreakpoint=a,r.scrollBarWidth=s,r.thresholds=c,r}return Object(r["a"])(n,[{key:"init",value:function(){this.update(),"undefined"!==typeof window&&window.addEventListener("resize",this.onResize.bind(this),{passive:!0})}},{key:"update",value:function(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0],e=t?0:this.getClientHeight(),n=t?0:this.getClientWidth(),i=n<this.thresholds.xs,r=n<this.thresholds.sm&&!i,o=n<this.thresholds.md-this.scrollBarWidth&&!(r||i),a=n<this.thresholds.lg-this.scrollBarWidth&&!(o||r||i),s=n>=this.thresholds.lg-this.scrollBarWidth;switch(this.height=e,this.width=n,this.xs=i,this.sm=r,this.md=o,this.lg=a,this.xl=s,this.xsOnly=i,this.smOnly=r,this.smAndDown=(i||r)&&!(o||a||s),this.smAndUp=!i&&(r||o||a||s),this.mdOnly=o,this.mdAndDown=(i||r||o)&&!(a||s),this.mdAndUp=!(i||r)&&(o||a||s),this.lgOnly=a,this.lgAndDown=(i||r||o||a)&&!s,this.lgAndUp=!(i||r||o)&&(a||s),this.xlOnly=s,!0){case i:this.name="xs";break;case r:this.name="sm";break;case o:this.name="md";break;case a:this.name="lg";break;default:this.name="xl";break}if("number"!==typeof this.mobileBreakpoint){var c={xs:0,sm:1,md:2,lg:3,xl:4},u=c[this.name],l=c[this.mobileBreakpoint];this.mobile=u<=l}else this.mobile=n<parseInt(this.mobileBreakpoint,10)}},{key:"onResize",value:function(){clearTimeout(this.resizeTimeout),this.resizeTimeout=window.setTimeout(this.update.bind(this),200)}},{key:"getClientWidth",value:function(){return"undefined"===typeof document?0:Math.max(document.documentElement.clientWidth,window.innerWidth||0)}},{key:"getClientHeight",value:function(){return"undefined"===typeof document?0:Math.max(document.documentElement.clientHeight,window.innerHeight||0)}}]),n}(p["a"]);b.property="breakpoint";var y=n("7d8f"),x=(n("ddb0"),n("dca8"),{complete:"M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z",cancel:"M12,2C17.53,2 22,6.47 22,12C22,17.53 17.53,22 12,22C6.47,22 2,17.53 2,12C2,6.47 6.47,2 12,2M15.59,7L12,10.59L8.41,7L7,8.41L10.59,12L7,15.59L8.41,17L12,13.41L15.59,17L17,15.59L13.41,12L17,8.41L15.59,7Z",close:"M19,6.41L17.59,5L12,10.59L6.41,5L5,6.41L10.59,12L5,17.59L6.41,19L12,13.41L17.59,19L19,17.59L13.41,12L19,6.41Z",delete:"M12,2C17.53,2 22,6.47 22,12C22,17.53 17.53,22 12,22C6.47,22 2,17.53 2,12C2,6.47 6.47,2 12,2M15.59,7L12,10.59L8.41,7L7,8.41L10.59,12L7,15.59L8.41,17L12,13.41L15.59,17L17,15.59L13.41,12L17,8.41L15.59,7Z",clear:"M19,6.41L17.59,5L12,10.59L6.41,5L5,6.41L10.59,12L5,17.59L6.41,19L12,13.41L17.59,19L19,17.59L13.41,12L19,6.41Z",success:"M12,2C17.52,2 22,6.48 22,12C22,17.52 17.52,22 12,22C6.48,22 2,17.52 2,12C2,6.48 6.48,2 12,2M11,16.5L18,9.5L16.59,8.09L11,13.67L7.91,10.59L6.5,12L11,16.5Z",info:"M13,9H11V7H13M13,17H11V11H13M12,2C6.48,2 2,6.48 2,12C2,17.52 6.48,22 12,22C17.52,22 22,17.52 22,12C22,6.48 17.52,2 12,2Z",warning:"M11,4.5H13V15.5H11V4.5M13,17.5V19.5H11V17.5H13Z",error:"M13,14H11V10H13M13,18H11V16H13M1,21H23L12,2L1,21Z",prev:"M15.41,16.58L10.83,12L15.41,7.41L14,6L8,12L14,18L15.41,16.58Z",next:"M8.59,16.58L13.17,12L8.59,7.41L10,6L16,12L10,18L8.59,16.58Z",checkboxOn:"M10,17L5,12L6.41,10.58L10,14.17L17.59,6.58L19,8M19,3H5C3.89,3 3,3.89 3,5V19C3,20.1 3.9,21 5,21H19C20.1,21 21,20.1 21,19V5C21,3.89 20.1,3 19,3Z",checkboxOff:"M19,3H5C3.89,3 3,3.89 3,5V19C3,20.1 3.9,21 5,21H19C20.1,21 21,20.1 21,19V5C21,3.89 20.1,3 19,3M19,5V19H5V5H19Z",checkboxIndeterminate:"M17,13H7V11H17M19,3H5C3.89,3 3,3.89 3,5V19C3,20.1 3.9,21 5,21H19C20.1,21 21,20.1 21,19V5C21,3.89 20.1,3 19,3Z",delimiter:"M12,2C6.48,2 2,6.48 2,12C2,17.52 6.48,22 12,22C17.52,22 22,17.52 22,12C22,6.48 17.52,2 12,2Z",sort:"M13,20H11V8L5.5,13.5L4.08,12.08L12,4.16L19.92,12.08L18.5,13.5L13,8V20Z",expand:"M7.41,8.58L12,13.17L16.59,8.58L18,10L12,16L6,10L7.41,8.58Z",menu:"M3,6H21V8H3V6M3,11H21V13H3V11M3,16H21V18H3V16Z",subgroup:"M7,10L12,15L17,10H7Z",dropdown:"M7,10L12,15L17,10H7Z",radioOn:"M12,20C7.58,20 4,16.42 4,12C4,7.58 7.58,4 12,4C16.42,4 20,7.58 20,12C20,16.42 16.42,20 12,20M12,2C6.48,2 2,6.48 2,12C2,17.52 6.48,22 12,22C17.52,22 22,17.52 22,12C22,6.48 17.52,2 12,2M12,7C9.24,7 7,9.24 7,12C7,14.76 9.24,17 12,17C14.76,17 17,14.76 17,12C17,9.24 14.76,7 12,7Z",radioOff:"M12,20C7.58,20 4,16.42 4,12C4,7.58 7.58,4 12,4C16.42,4 20,7.58 20,12C20,16.42 16.42,20 12,20M12,2C6.48,2 2,6.48 2,12C2,17.52 6.48,22 12,22C17.52,22 22,17.52 22,12C22,6.48 17.52,2 12,2Z",edit:"M20.71,7.04C21.1,6.65 21.1,6 20.71,5.63L18.37,3.29C18,2.9 17.35,2.9 16.96,3.29L15.12,5.12L18.87,8.87M3,17.25V21H6.75L17.81,9.93L14.06,6.18L3,17.25Z",ratingEmpty:"M12,15.39L8.24,17.66L9.23,13.38L5.91,10.5L10.29,10.13L12,6.09L13.71,10.13L18.09,10.5L14.77,13.38L15.76,17.66M22,9.24L14.81,8.63L12,2L9.19,8.63L2,9.24L7.45,13.97L5.82,21L12,17.27L18.18,21L16.54,13.97L22,9.24Z",ratingFull:"M12,17.27L18.18,21L16.54,13.97L22,9.24L14.81,8.62L12,2L9.19,8.62L2,9.24L7.45,13.97L5.82,21L12,17.27Z",ratingHalf:"M12,15.4V6.1L13.71,10.13L18.09,10.5L14.77,13.39L15.76,17.67M22,9.24L14.81,8.63L12,2L9.19,8.63L2,9.24L7.45,13.97L5.82,21L12,17.27L18.18,21L16.54,13.97L22,9.24Z",loading:"M19,8L15,12H18C18,15.31 15.31,18 12,18C11,18 10.03,17.75 9.2,17.3L7.74,18.76C8.97,19.54 10.43,20 12,20C16.42,20 20,16.42 20,12H23M6,12C6,8.69 8.69,6 12,6C13,6 13.97,6.25 14.8,6.7L16.26,5.24C15.03,4.46 13.57,4 12,4C7.58,4 4,7.58 4,12H1L5,16L9,12",first:"M18.41,16.59L13.82,12L18.41,7.41L17,6L11,12L17,18L18.41,16.59M6,6H8V18H6V6Z",last:"M5.59,7.41L10.18,12L5.59,16.59L7,18L13,12L7,6L5.59,7.41M16,6H18V18H16V6Z",unfold:"M12,18.17L8.83,15L7.42,16.41L12,21L16.59,16.41L15.17,15M12,5.83L15.17,9L16.58,7.59L12,3L7.41,7.59L8.83,9L12,5.83Z",file:"M16.5,6V17.5C16.5,19.71 14.71,21.5 12.5,21.5C10.29,21.5 8.5,19.71 8.5,17.5V5C8.5,3.62 9.62,2.5 11,2.5C12.38,2.5 13.5,3.62 13.5,5V15.5C13.5,16.05 13.05,16.5 12.5,16.5C11.95,16.5 11.5,16.05 11.5,15.5V6H10V15.5C10,16.88 11.12,18 12.5,18C13.88,18 15,16.88 15,15.5V5C15,2.79 13.21,1 11,1C8.79,1 7,2.79 7,5V17.5C7,20.54 9.46,23 12.5,23C15.54,23 18,20.54 18,17.5V6H16.5Z",plus:"M19,13H13V19H11V13H5V11H11V5H13V11H19V13Z",minus:"M19,13H5V11H19V13Z"}),w=x,O={complete:"check",cancel:"cancel",close:"close",delete:"cancel",clear:"clear",success:"check_circle",info:"info",warning:"priority_high",error:"warning",prev:"chevron_left",next:"chevron_right",checkboxOn:"check_box",checkboxOff:"check_box_outline_blank",checkboxIndeterminate:"indeterminate_check_box",delimiter:"fiber_manual_record",sort:"arrow_upward",expand:"keyboard_arrow_down",menu:"menu",subgroup:"arrow_drop_down",dropdown:"arrow_drop_down",radioOn:"radio_button_checked",radioOff:"radio_button_unchecked",edit:"edit",ratingEmpty:"star_border",ratingFull:"star",ratingHalf:"star_half",loading:"cached",first:"first_page",last:"last_page",unfold:"unfold_more",file:"attach_file",plus:"add",minus:"remove"},_=O,S={complete:"mdi-check",cancel:"mdi-close-circle",close:"mdi-close",delete:"mdi-close-circle",clear:"mdi-close",success:"mdi-check-circle",info:"mdi-information",warning:"mdi-exclamation",error:"mdi-alert",prev:"mdi-chevron-left",next:"mdi-chevron-right",checkboxOn:"mdi-checkbox-marked",checkboxOff:"mdi-checkbox-blank-outline",checkboxIndeterminate:"mdi-minus-box",delimiter:"mdi-circle",sort:"mdi-arrow-up",expand:"mdi-chevron-down",menu:"mdi-menu",subgroup:"mdi-menu-down",dropdown:"mdi-menu-down",radioOn:"mdi-radiobox-marked",radioOff:"mdi-radiobox-blank",edit:"mdi-pencil",ratingEmpty:"mdi-star-outline",ratingFull:"mdi-star",ratingHalf:"mdi-star-half-full",loading:"mdi-cached",first:"mdi-page-first",last:"mdi-page-last",unfold:"mdi-unfold-more-horizontal",file:"mdi-paperclip",plus:"mdi-plus",minus:"mdi-minus"},C=S,k={complete:"fas fa-check",cancel:"fas fa-times-circle",close:"fas fa-times",delete:"fas fa-times-circle",clear:"fas fa-times-circle",success:"fas fa-check-circle",info:"fas fa-info-circle",warning:"fas fa-exclamation",error:"fas fa-exclamation-triangle",prev:"fas fa-chevron-left",next:"fas fa-chevron-right",checkboxOn:"fas fa-check-square",checkboxOff:"far fa-square",checkboxIndeterminate:"fas fa-minus-square",delimiter:"fas fa-circle",sort:"fas fa-sort-up",expand:"fas fa-chevron-down",menu:"fas fa-bars",subgroup:"fas fa-caret-down",dropdown:"fas fa-caret-down",radioOn:"far fa-dot-circle",radioOff:"far fa-circle",edit:"fas fa-edit",ratingEmpty:"far fa-star",ratingFull:"fas fa-star",ratingHalf:"fas fa-star-half",loading:"fas fa-sync",first:"fas fa-step-backward",last:"fas fa-step-forward",unfold:"fas fa-arrows-alt-v",file:"fas fa-paperclip",plus:"fas fa-plus",minus:"fas fa-minus"},j=k,$={complete:"fa fa-check",cancel:"fa fa-times-circle",close:"fa fa-times",delete:"fa fa-times-circle",clear:"fa fa-times-circle",success:"fa fa-check-circle",info:"fa fa-info-circle",warning:"fa fa-exclamation",error:"fa fa-exclamation-triangle",prev:"fa fa-chevron-left",next:"fa fa-chevron-right",checkboxOn:"fa fa-check-square",checkboxOff:"fa fa-square-o",checkboxIndeterminate:"fa fa-minus-square",delimiter:"fa fa-circle",sort:"fa fa-sort-up",expand:"fa fa-chevron-down",menu:"fa fa-bars",subgroup:"fa fa-caret-down",dropdown:"fa fa-caret-down",radioOn:"fa fa-dot-circle-o",radioOff:"fa fa-circle-o",edit:"fa fa-pencil",ratingEmpty:"fa fa-star-o",ratingFull:"fa fa-star",ratingHalf:"fa fa-star-half-o",loading:"fa fa-refresh",first:"fa fa-step-backward",last:"fa fa-step-forward",unfold:"fa fa-angle-double-down",file:"fa fa-paperclip",plus:"fa fa-plus",minus:"fa fa-minus"},A=$;n("ac1f"),n("1276");function E(t,e){var n={};for(var i in e)n[i]={component:t,props:{icon:e[i].split(" fa-")}};return n}var T=E("font-awesome-icon",j),L=Object.freeze({mdiSvg:w,md:_,mdi:C,fa:j,fa4:A,faSvg:T}),I=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(t){var r;Object(i["a"])(this,n),r=e.call(this);var o=t[n.property],a=o.iconfont,s=o.values,c=o.component;return r.component=c,r.iconfont=a,r.values=Object(d["r"])(L[a],s),r}return Object(r["a"])(n)}(p["a"]);I.property="icons";n("a4d3"),n("e01a"),n("5319"),n("2ca0"),n("99af");var B="$vuetify.",M=Symbol("Lang fallback");function P(t,e){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=arguments.length>3?arguments[3]:void 0,r=e.replace(B,""),o=Object(d["j"])(t,r,M);return o===M&&(n?(Object(a["b"])('Translation key "'.concat(r,'" not found in fallback')),o=e):(Object(a["c"])('Translation key "'.concat(r,'" not found, falling back to default')),o=P(i,e,!0,i))),o}var D=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(t){var r;Object(i["a"])(this,n),r=e.call(this),r.defaultLocale="en";var o=t[n.property],a=o.current,s=o.locales,c=o.t;return r.current=a,r.locales=s,r.translator=c||r.defaultTranslator,r}return Object(r["a"])(n,[{key:"currentLocale",value:function(t){var e=this.locales[this.current],n=this.locales[this.defaultLocale];return P(e,t,!1,n)}},{key:"t",value:function(t){for(var e=arguments.length,n=new Array(e>1?e-1:0),i=1;i<e;i++)n[i-1]=arguments[i];return t.startsWith(B)?this.translator.apply(this,[t].concat(n)):this.replace(t,n)}},{key:"defaultTranslator",value:function(t){for(var e=arguments.length,n=new Array(e>1?e-1:0),i=1;i<e;i++)n[i-1]=arguments[i];return this.replace(this.currentLocale(t),n)}},{key:"replace",value:function(t,e){return t.replace(/\{(\d+)\}/g,(function(t,n){return String(e[+n])}))}}]),n}(p["a"]);D.property="lang";var R=n("99de"),N=(n("7db0"),n("3835")),V=n("53ca"),F=(n("18a5"),n("b64b"),n("7bc6")),z=n("8da5"),H=(n("3ea3"),.20689655172413793),W=function(t){return t>Math.pow(H,3)?Math.cbrt(t):t/(3*Math.pow(H,2))+4/29},U=function(t){return t>H?Math.pow(t,3):3*Math.pow(H,2)*(t-4/29)};function q(t){var e=W,n=e(t[1]);return[116*n-16,500*(e(t[0]/.95047)-n),200*(n-e(t[2]/1.08883))]}function G(t){var e=U,n=(t[0]+16)/116;return[.95047*e(n+t[1]/500),e(n),1.08883*e(n-t[2]/200)]}var Y=["anchor"],Z=["anchor"];function K(t){for(var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=!(arguments.length>2&&void 0!==arguments[2])||arguments[2],i=t.anchor,r=Object(c["a"])(t,Y),o=Object.keys(r),a={},s=0;s<o.length;++s){var u=o[s],l=t[u];null!=l&&(n?e?("base"===u||u.startsWith("lighten")||u.startsWith("darken"))&&(a[u]=Object(F["a"])(l)):"object"===Object(V["a"])(l)?a[u]=K(l,!0,n):a[u]=nt(u,Object(F["b"])(l)):a[u]={base:Object(F["c"])(Object(F["b"])(l))})}return e||(a.anchor=i||a.base||a.primary.base),a}var X=function(t,e){return"\n.v-application .".concat(t," {\n background-color: ").concat(e," !important;\n border-color: ").concat(e," !important;\n}\n.v-application .").concat(t,"--text {\n color: ").concat(e," !important;\n caret-color: ").concat(e," !important;\n}")},J=function(t,e,n){var i=e.split(/(\d)/,2),r=Object(N["a"])(i,2),o=r[0],a=r[1];return"\n.v-application .".concat(t,".").concat(o,"-").concat(a," {\n background-color: ").concat(n," !important;\n border-color: ").concat(n," !important;\n}\n.v-application .").concat(t,"--text.text--").concat(o,"-").concat(a," {\n color: ").concat(n," !important;\n caret-color: ").concat(n," !important;\n}")},Q=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"base";return"--v-".concat(t,"-").concat(e)},tt=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"base";return"var(".concat(Q(t,e),")")};function et(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=t.anchor,i=Object(c["a"])(t,Z),r=Object.keys(i);if(!r.length)return"";var o="",a="",s=e?tt("anchor"):n;a+=".v-application a { color: ".concat(s,"; }"),e&&(o+=" ".concat(Q("anchor"),": ").concat(n,";\n"));for(var u=0;u<r.length;++u){var l=r[u],f=t[l];a+=X(l,e?tt(l):f.base),e&&(o+=" ".concat(Q(l),": ").concat(f.base,";\n"));for(var h=Object(d["q"])(f),p=0;p<h.length;++p){var v=h[p],m=f[v];"base"!==v&&(a+=J(l,v,e?tt(l,v):m),e&&(o+=" ".concat(Q(l,v),": ").concat(m,";\n")))}}return e&&(o=":root {\n".concat(o,"}\n\n")),o+a}function nt(t,e){for(var n={base:Object(F["c"])(e)},i=5;i>0;--i)n["lighten".concat(i)]=Object(F["c"])(it(e,i));for(var r=1;r<=4;++r)n["darken".concat(r)]=Object(F["c"])(rt(e,r));return n}function it(t,e){var n=q(z["b"](t));return n[0]=n[0]+10*e,z["a"](G(n))}function rt(t,e){var n=q(z["b"](t));return n[0]=n[0]-10*e,z["a"](G(n))}var ot=function(t){Object(u["a"])(n,t);var e=Object(l["a"])(n);function n(t){var r;Object(i["a"])(this,n),r=e.call(this),r.disabled=!1,r.isDark=null,r.unwatch=null,r.vueMeta=null;var o=t[n.property],a=o.dark,s=o.disable,c=o.options,u=o.themes;return r.dark=Boolean(a),r.defaults=r.themes=u,r.options=c,s?(r.disabled=!0,Object(R["a"])(r)):(r.themes={dark:r.fillVariant(u.dark,!0),light:r.fillVariant(u.light,!1)},r)}return Object(r["a"])(n,[{key:"css",set:function(t){this.vueMeta?this.isVueMeta23&&this.applyVueMeta23():this.checkOrCreateStyleElement()&&(this.styleEl.innerHTML=t)}},{key:"dark",get:function(){return Boolean(this.isDark)},set:function(t){var e=this.isDark;this.isDark=t,null!=e&&this.applyTheme()}},{key:"applyTheme",value:function(){if(this.disabled)return this.clearCss();this.css=this.generatedStyles}},{key:"clearCss",value:function(){this.css=""}},{key:"init",value:function(t,e){this.disabled||(t.$meta?this.initVueMeta(t):e&&this.initSSR(e),this.initTheme(t))}},{key:"setTheme",value:function(t,e){this.themes[t]=Object.assign(this.themes[t],e),this.applyTheme()}},{key:"resetThemes",value:function(){this.themes.light=Object.assign({},this.defaults.light),this.themes.dark=Object.assign({},this.defaults.dark),this.applyTheme()}},{key:"checkOrCreateStyleElement",value:function(){return this.styleEl=document.getElementById("vuetify-theme-stylesheet"),!!this.styleEl||(this.genStyleElement(),Boolean(this.styleEl))}},{key:"fillVariant",value:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1?arguments[1]:void 0,n=this.themes[e?"dark":"light"];return Object.assign({},n,t)}},{key:"genStyleElement",value:function(){"undefined"!==typeof document&&(this.styleEl=document.createElement("style"),this.styleEl.type="text/css",this.styleEl.id="vuetify-theme-stylesheet",this.options.cspNonce&&this.styleEl.setAttribute("nonce",this.options.cspNonce),document.head.appendChild(this.styleEl))}},{key:"initVueMeta",value:function(t){var e=this;if(this.vueMeta=t.$meta(),this.isVueMeta23)t.$nextTick((function(){e.applyVueMeta23()}));else{var n="function"===typeof this.vueMeta.getOptions?this.vueMeta.getOptions().keyName:"metaInfo",i=t.$options[n]||{};t.$options[n]=function(){i.style=i.style||[];var t=i.style.find((function(t){return"vuetify-theme-stylesheet"===t.id}));return t?t.cssText=e.generatedStyles:i.style.push({cssText:e.generatedStyles,type:"text/css",id:"vuetify-theme-stylesheet",nonce:(e.options||{}).cspNonce}),i}}}},{key:"applyVueMeta23",value:function(){var t=this.vueMeta.addApp("vuetify"),e=t.set;e({style:[{cssText:this.generatedStyles,type:"text/css",id:"vuetify-theme-stylesheet",nonce:this.options.cspNonce}]})}},{key:"initSSR",value:function(t){var e=this.options.cspNonce?' nonce="'.concat(this.options.cspNonce,'"'):"";t.head=t.head||"",t.head+='<style type="text/css" id="vuetify-theme-stylesheet"'.concat(e,">").concat(this.generatedStyles,"</style>")}},{key:"initTheme",value:function(t){var e=this;"undefined"!==typeof document&&(this.unwatch&&(this.unwatch(),this.unwatch=null),t.$once("hook:created",(function(){var n=o["a"].observable({themes:e.themes});e.unwatch=t.$watch((function(){return n.themes}),(function(){return e.applyTheme()}),{deep:!0})})),this.applyTheme())}},{key:"currentTheme",get:function(){var t=this.dark?"dark":"light";return this.themes[t]}},{key:"generatedStyles",get:function(){var t,e=this.parsedTheme,n=this.options||{};return null!=n.themeCache&&(t=n.themeCache.get(e),null!=t)||(t=et(e,n.customProperties),null!=n.minifyTheme&&(t=n.minifyTheme(t)),null!=n.themeCache&&n.themeCache.set(e,t)),t}},{key:"parsedTheme",get:function(){return K(this.currentTheme||{},void 0,Object(d["i"])(this.options,["variations"],!0))}},{key:"isVueMeta23",get:function(){return"function"===typeof this.vueMeta.addApp}}]),n}(p["a"]);ot.property="theme";var at=function(){function t(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};Object(i["a"])(this,t),this.framework={isHydrating:!1},this.installed=[],this.preset={},this.userPreset={},this.userPreset=e,this.use(m),this.use(g),this.use(b),this.use(y["a"]),this.use(I),this.use(D),this.use(ot)}return Object(r["a"])(t,[{key:"init",value:function(t,e){var n=this;this.installed.forEach((function(i){var r=n.framework[i];r.framework=n.framework,r.init(t,e)})),this.framework.rtl=Boolean(this.preset.rtl)}},{key:"use",value:function(t){var e=t.property;this.installed.includes(e)||(this.framework[e]=new t(this.preset,this),this.installed.push(e))}}]),t}();at.install=s,at.installed=!1,at.version="2.6.5",at.config={silent:!1}},f36a:function(t,e,n){var i=n("e330");t.exports=i([].slice)},f5df:function(t,e,n){var i=n("da84"),r=n("00ee"),o=n("1626"),a=n("c6b6"),s=n("b622"),c=s("toStringTag"),u=i.Object,l="Arguments"==a(function(){return arguments}()),f=function(t,e){try{return t[e]}catch(n){}};t.exports=r?a:function(t){var e,n,i;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(n=f(e=u(t),c))?n:l?a(e):"Object"==(i=a(e))&&o(e.callee)?"Arguments":i}},f6b4:function(t,e,n){"use strict";var i=n("c532");function r(){this.handlers=[]}r.prototype.use=function(t,e){return this.handlers.push({fulfilled:t,rejected:e}),this.handlers.length-1},r.prototype.eject=function(t){this.handlers[t]&&(this.handlers[t]=null)},r.prototype.forEach=function(t){i.forEach(this.handlers,(function(e){null!==e&&t(e)}))},t.exports=r},f6c4:function(t,e,n){"use strict";n("bd0c");var i=n("d10f");e["a"]=i["a"].extend({name:"v-main",props:{tag:{type:String,default:"main"}},computed:{styles:function(){var t=this.$vuetify.application,e=t.bar,n=t.top,i=t.right,r=t.footer,o=t.insetFooter,a=t.bottom,s=t.left;return{paddingTop:"".concat(n+e,"px"),paddingRight:"".concat(i,"px"),paddingBottom:"".concat(r+o+a,"px"),paddingLeft:"".concat(s,"px")}}},render:function(t){var e={staticClass:"v-main",style:this.styles,ref:"main"};return t(this.tag,e,[t("div",{staticClass:"v-main__wrap"},this.$slots.default)])}})},f748:function(t,e){t.exports=Math.sign||function(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},f772:function(t,e,n){var i=n("5692"),r=n("90e3"),o=i("keys");t.exports=function(t){return o[t]||(o[t]=r(t))}},f8c9:function(t,e,n){var i=n("23e7"),r=n("da84"),o=n("d44e");i({global:!0},{Reflect:{}}),o(r.Reflect,"Reflect",!0)},fb6a:function(t,e,n){"use strict";var i=n("23e7"),r=n("da84"),o=n("e8b5"),a=n("68ee"),s=n("861d"),c=n("23cb"),u=n("07fa"),l=n("fc6a"),f=n("8418"),h=n("b622"),d=n("1dde"),p=n("f36a"),v=d("slice"),m=h("species"),g=r.Array,b=Math.max;i({target:"Array",proto:!0,forced:!v},{slice:function(t,e){var n,i,r,h=l(this),d=u(h),v=c(t,d),y=c(void 0===e?d:e,d);if(o(h)&&(n=h.constructor,a(n)&&(n===g||o(n.prototype))?n=void 0:s(n)&&(n=n[m],null===n&&(n=void 0)),n===g||void 0===n))return p(h,v,y);for(i=new(void 0===n?g:n)(b(y-v,0)),r=0;v<y;v++,r++)v in h&&f(i,r,h[v]);return i.length=r,i}})},fc6a:function(t,e,n){var i=n("44ad"),r=n("1d80");t.exports=function(t){return i(r(t))}},fce3:function(t,e,n){var i=n("d039"),r=n("da84"),o=r.RegExp;t.exports=i((function(){var t=o(".","s");return!(t.dotAll&&t.exec("\n")&&"s"===t.flags)}))},fdbc:function(t,e){t.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},fdbf:function(t,e,n){var i=n("4930");t.exports=i&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},fe6c:function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var i=n("2b0e"),r=n("80d2"),o={absolute:Boolean,bottom:Boolean,fixed:Boolean,left:Boolean,right:Boolean,top:Boolean};function a(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return i["a"].extend({name:"positionable",props:t.length?Object(r["h"])(o,t):o})}e["a"]=a()},fff9:function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var i=n("d4ec"),r=n("bee2"),o=function(){function t(){Object(i["a"])(this,t),this.framework={}}return Object(r["a"])(t,[{key:"init",value:function(t,e){}}]),t}()}}]); \ No newline at end of file diff --git a/spaces/smartinezbragado/reddit-topic-modelling/src/__init__.py b/spaces/smartinezbragado/reddit-topic-modelling/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sneedium/captcha_pixelplanet/losses.py b/spaces/sneedium/captcha_pixelplanet/losses.py deleted file mode 100644 index eea99b5dc280b2e4719afe0b3bda0b3faf316327..0000000000000000000000000000000000000000 --- a/spaces/sneedium/captcha_pixelplanet/losses.py +++ /dev/null @@ -1,72 +0,0 @@ -from fastai.vision import * - -from modules.model import Model - - -class MultiLosses(nn.Module): - def __init__(self, one_hot=True): - super().__init__() - self.ce = SoftCrossEntropyLoss() if one_hot else torch.nn.CrossEntropyLoss() - self.bce = torch.nn.BCELoss() - - @property - def last_losses(self): - return self.losses - - def _flatten(self, sources, lengths): - return torch.cat([t[:l] for t, l in zip(sources, lengths)]) - - def _merge_list(self, all_res): - if not isinstance(all_res, (list, tuple)): - return all_res - def merge(items): - if isinstance(items[0], torch.Tensor): return torch.cat(items, dim=0) - else: return items[0] - res = dict() - for key in all_res[0].keys(): - items = [r[key] for r in all_res] - res[key] = merge(items) - return res - - def _ce_loss(self, output, gt_labels, gt_lengths, idx=None, record=True): - loss_name = output.get('name') - pt_logits, weight = output['logits'], output['loss_weight'] - - assert pt_logits.shape[0] % gt_labels.shape[0] == 0 - iter_size = pt_logits.shape[0] // gt_labels.shape[0] - if iter_size > 1: - gt_labels = gt_labels.repeat(iter_size, 1, 1) - gt_lengths = gt_lengths.repeat(iter_size) - flat_gt_labels = self._flatten(gt_labels, gt_lengths) - flat_pt_logits = self._flatten(pt_logits, gt_lengths) - - nll = output.get('nll') - if nll is not None: - loss = self.ce(flat_pt_logits, flat_gt_labels, softmax=False) * weight - else: - loss = self.ce(flat_pt_logits, flat_gt_labels) * weight - if record and loss_name is not None: self.losses[f'{loss_name}_loss'] = loss - - return loss - - def forward(self, outputs, *args): - self.losses = {} - if isinstance(outputs, (tuple, list)): - outputs = [self._merge_list(o) for o in outputs] - return sum([self._ce_loss(o, *args) for o in outputs if o['loss_weight'] > 0.]) - else: - return self._ce_loss(outputs, *args, record=False) - - -class SoftCrossEntropyLoss(nn.Module): - def __init__(self, reduction="mean"): - super().__init__() - self.reduction = reduction - - def forward(self, input, target, softmax=True): - if softmax: log_prob = F.log_softmax(input, dim=-1) - else: log_prob = torch.log(input) - loss = -(target * log_prob).sum(dim=-1) - if self.reduction == "mean": return loss.mean() - elif self.reduction == "sum": return loss.sum() - else: return loss diff --git a/spaces/songdaooi/Swap/face_parsing/swap.py b/spaces/songdaooi/Swap/face_parsing/swap.py deleted file mode 100644 index 644a49029d21a68514ce23e5ab6c341b55f96351..0000000000000000000000000000000000000000 --- a/spaces/songdaooi/Swap/face_parsing/swap.py +++ /dev/null @@ -1,134 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.transforms as transforms -import cv2 -import numpy as np - -from .model import BiSeNet - -mask_regions = { - "Background":0, - "Skin":1, - "L-Eyebrow":2, - "R-Eyebrow":3, - "L-Eye":4, - "R-Eye":5, - "Eye-G":6, - "L-Ear":7, - "R-Ear":8, - "Ear-R":9, - "Nose":10, - "Mouth":11, - "U-Lip":12, - "L-Lip":13, - "Neck":14, - "Neck-L":15, - "Cloth":16, - "Hair":17, - "Hat":18 -} - -# Borrowed from simswap -# https://github.com/neuralchen/SimSwap/blob/26c84d2901bd56eda4d5e3c5ca6da16e65dc82a6/util/reverse2original.py#L30 -class SoftErosion(nn.Module): - def __init__(self, kernel_size=15, threshold=0.6, iterations=1): - super(SoftErosion, self).__init__() - r = kernel_size // 2 - self.padding = r - self.iterations = iterations - self.threshold = threshold - - # Create kernel - y_indices, x_indices = torch.meshgrid(torch.arange(0., kernel_size), torch.arange(0., kernel_size)) - dist = torch.sqrt((x_indices - r) ** 2 + (y_indices - r) ** 2) - kernel = dist.max() - dist - kernel /= kernel.sum() - kernel = kernel.view(1, 1, *kernel.shape) - self.register_buffer('weight', kernel) - - def forward(self, x): - x = x.float() - for i in range(self.iterations - 1): - x = torch.min(x, F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding)) - x = F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding) - - mask = x >= self.threshold - x[mask] = 1.0 - x[~mask] /= x[~mask].max() - - return x, mask - -device = "cpu" - -def init_parser(pth_path, mode="cpu"): - global device - device = mode - n_classes = 19 - net = BiSeNet(n_classes=n_classes) - if device == "cuda": - net.cuda() - net.load_state_dict(torch.load(pth_path)) - else: - net.load_state_dict(torch.load(pth_path, map_location=torch.device('cpu'))) - net.eval() - return net - - -def image_to_parsing(img, net): - img = cv2.resize(img, (512, 512)) - img = img[:,:,::-1] - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) - ]) - img = transform(img.copy()) - img = torch.unsqueeze(img, 0) - - with torch.no_grad(): - img = img.to(device) - out = net(img)[0] - parsing = out.squeeze(0).cpu().numpy().argmax(0) - return parsing - - -def get_mask(parsing, classes): - res = parsing == classes[0] - for val in classes[1:]: - res += parsing == val - return res - -def swap_regions(source, target, net, smooth_mask, includes=[1,2,3,4,5,10,11,12,13], blur=10): - parsing = image_to_parsing(source, net) - - if len(includes) == 0: - return source, np.zeros_like(source) - - include_mask = get_mask(parsing, includes) - mask = np.repeat(include_mask[:, :, np.newaxis], 3, axis=2).astype("float32") - - if smooth_mask is not None: - mask_tensor = torch.from_numpy(mask.copy().transpose((2, 0, 1))).float().to(device) - face_mask_tensor = mask_tensor[0] + mask_tensor[1] - soft_face_mask_tensor, _ = smooth_mask(face_mask_tensor.unsqueeze_(0).unsqueeze_(0)) - soft_face_mask_tensor.squeeze_() - mask = np.repeat(soft_face_mask_tensor.cpu().numpy()[:, :, np.newaxis], 3, axis=2) - - if blur > 0: - mask = cv2.GaussianBlur(mask, (0, 0), blur) - - resized_source = cv2.resize((source/255).astype("float32"), (512, 512)) - resized_target = cv2.resize((target/255).astype("float32"), (512, 512)) - - result = mask * resized_source + (1 - mask) * resized_target - normalized_result = (result - np.min(result)) / (np.max(result) - np.min(result)) - result = cv2.resize((result*255).astype("uint8"), (source.shape[1], source.shape[0])) - - return result - -def mask_regions_to_list(values): - out_ids = [] - for value in values: - if value in mask_regions.keys(): - out_ids.append(mask_regions.get(value)) - return out_ids diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/data_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/data_utils.py deleted file mode 100644 index b3de57681e0fb6b026003eff19f7745caf6799d3..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/data_utils.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -try: - from collections.abc import Iterable -except ImportError: - from collections import Iterable -import contextlib -import itertools -import logging -import re -import warnings -from typing import Optional, Tuple - -import numpy as np -import torch - -from fairseq.file_io import PathManager -from fairseq import utils -import os - -logger = logging.getLogger(__name__) - - -def infer_language_pair(path): - """Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx""" - src, dst = None, None - for filename in PathManager.ls(path): - parts = filename.split(".") - if len(parts) >= 3 and len(parts[1].split("-")) == 2: - return parts[1].split("-") - return src, dst - - -def collate_tokens( - values, - pad_idx, - eos_idx=None, - left_pad=False, - move_eos_to_beginning=False, - pad_to_length=None, - pad_to_multiple=1, - pad_to_bsz=None, -): - """Convert a list of 1d tensors into a padded 2d tensor.""" - size = max(v.size(0) for v in values) - size = size if pad_to_length is None else max(size, pad_to_length) - if pad_to_multiple != 1 and size % pad_to_multiple != 0: - size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple) - - batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz) - res = values[0].new(batch_size, size).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if move_eos_to_beginning: - if eos_idx is None: - # if no eos_idx is specified, then use the last token in src - dst[0] = src[-1] - else: - dst[0] = eos_idx - dst[1:] = src[:-1] - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)]) - return res - -def load_indexed_dataset( - path, dictionary=None, dataset_impl=None, combine=False, default="cached" -): - """A helper function for loading indexed datasets. - - Args: - path (str): path to indexed dataset (e.g., 'data-bin/train') - dictionary (~fairseq.data.Dictionary): data dictionary - dataset_impl (str, optional): which dataset implementation to use. If - not provided, it will be inferred automatically. For legacy indexed - data we use the 'cached' implementation by default. - combine (bool, optional): automatically load and combine multiple - datasets. For example, if *path* is 'data-bin/train', then we will - combine 'data-bin/train', 'data-bin/train1', ... and return a - single ConcatDataset instance. - """ - import fairseq.data.indexed_dataset as indexed_dataset - from fairseq.data.concat_dataset import ConcatDataset - - datasets = [] - for k in itertools.count(): - path_k = path + (str(k) if k > 0 else "") - try: - path_k = indexed_dataset.get_indexed_dataset_to_local(path_k) - except Exception as e: - if "StorageException: [404] Path not found" in str(e): - logger.warning(f"path_k: {e} not found") - else: - raise e - - dataset_impl_k = dataset_impl - if dataset_impl_k is None: - dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) - dataset = indexed_dataset.make_dataset( - path_k, - impl=dataset_impl_k or default, - fix_lua_indexing=True, - dictionary=dictionary, - ) - if dataset is None: - break - logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k)) - datasets.append(dataset) - if not combine: - break - if len(datasets) == 0: - return None - elif len(datasets) == 1: - return datasets[0] - else: - return ConcatDataset(datasets) - - -@contextlib.contextmanager -def numpy_seed(seed, *addl_seeds): - """Context manager which seeds the NumPy PRNG with the specified seed and - restores the state afterward""" - if seed is None: - yield - return - if len(addl_seeds) > 0: - seed = int(hash((seed, *addl_seeds)) % 1e6) - state = np.random.get_state() - np.random.seed(seed) - try: - yield - finally: - np.random.set_state(state) - - -def collect_filtered(function, iterable, filtered): - """ - Similar to :func:`filter` but collects filtered elements in ``filtered``. - - Args: - function (callable): function that returns ``False`` for elements that - should be filtered - iterable (iterable): iterable to filter - filtered (list): list to store filtered elements - """ - for el in iterable: - if function(el): - yield el - else: - filtered.append(el) - - -def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False): - def compare_leq(a, b): - return a <= b if not isinstance(a, tuple) else max(a) <= b - - def check_size(idx): - if isinstance(max_positions, float) or isinstance(max_positions, int): - return size_fn(idx) <= max_positions - elif isinstance(max_positions, dict): - idx_size = size_fn(idx) - assert isinstance(idx_size, dict) - intersect_keys = set(max_positions.keys()) & set(idx_size.keys()) - return all( - all( - a is None or b is None or a <= b - for a, b in zip(idx_size[key], max_positions[key]) - ) - for key in intersect_keys - ) - else: - # For MultiCorpusSampledDataset, will generalize it later - if not isinstance(size_fn(idx), Iterable): - return all(size_fn(idx) <= b for b in max_positions) - return all( - a is None or b is None or a <= b - for a, b in zip(size_fn(idx), max_positions) - ) - - ignored = [] - itr = collect_filtered(check_size, indices, ignored) - indices = np.fromiter(itr, dtype=np.int64, count=-1) - return indices, ignored - - -def filter_by_size(indices, dataset, max_positions, raise_exception=False): - """ - [deprecated] Filter indices based on their size. - Use `FairseqDataset::filter_indices_by_size` instead. - - Args: - indices (List[int]): ordered list of dataset indices - dataset (FairseqDataset): fairseq dataset instance - max_positions (tuple): filter elements larger than this size. - Comparisons are done component-wise. - raise_exception (bool, optional): if ``True``, raise an exception if - any elements are filtered (default: False). - """ - warnings.warn( - "data_utils.filter_by_size is deprecated. " - "Use `FairseqDataset::filter_indices_by_size` instead.", - stacklevel=2, - ) - if isinstance(max_positions, float) or isinstance(max_positions, int): - if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray): - ignored = indices[dataset.sizes[indices] > max_positions].tolist() - indices = indices[dataset.sizes[indices] <= max_positions] - elif ( - hasattr(dataset, "sizes") - and isinstance(dataset.sizes, list) - and len(dataset.sizes) == 1 - ): - ignored = indices[dataset.sizes[0][indices] > max_positions].tolist() - indices = indices[dataset.sizes[0][indices] <= max_positions] - else: - indices, ignored = _filter_by_size_dynamic( - indices, dataset.size, max_positions - ) - else: - indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions) - - if len(ignored) > 0 and raise_exception: - raise Exception( - ( - "Size of sample #{} is invalid (={}) since max_positions={}, " - "skip this example with --skip-invalid-size-inputs-valid-test" - ).format(ignored[0], dataset.size(ignored[0]), max_positions) - ) - if len(ignored) > 0: - logger.warning( - ( - "{} samples have invalid sizes and will be skipped, " - "max_positions={}, first few sample ids={}" - ).format(len(ignored), max_positions, ignored[:10]) - ) - return indices - - -def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes): - """Filter a list of sample indices. Remove those that are longer - than specified in max_sizes. - - Args: - indices (np.array): original array of sample indices - max_sizes (int or list[int] or tuple[int]): max sample size, - can be defined separately for src and tgt (then list or tuple) - - Returns: - np.array: filtered sample array - list: list of removed indices - """ - if max_sizes is None: - return indices, [] - if type(max_sizes) in (int, float): - max_src_size, max_tgt_size = max_sizes, max_sizes - else: - max_src_size, max_tgt_size = max_sizes - if tgt_sizes is None: - ignored = indices[src_sizes[indices] > max_src_size] - else: - ignored = indices[ - (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size) - ] - if len(ignored) > 0: - if tgt_sizes is None: - indices = indices[src_sizes[indices] <= max_src_size] - else: - indices = indices[ - (src_sizes[indices] <= max_src_size) - & (tgt_sizes[indices] <= max_tgt_size) - ] - return indices, ignored.tolist() - - -def batch_by_size( - indices, - num_tokens_fn, - num_tokens_vec=None, - max_tokens=None, - max_sentences=None, - required_batch_size_multiple=1, - fixed_shapes=None, -): - """ - Yield mini-batches of indices bucketed by size. Batches may contain - sequences of different lengths. - - Args: - indices (List[int]): ordered list of dataset indices - num_tokens_fn (callable): function that returns the number of tokens at - a given index - num_tokens_vec (List[int], optional): precomputed vector of the number - of tokens for each index in indices (to enable faster batch generation) - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - required_batch_size_multiple (int, optional): require batch size to - be less than N or a multiple of N (default: 1). - fixed_shapes (List[Tuple[int, int]], optional): if given, batches will - only be created with the given shapes. *max_sentences* and - *required_batch_size_multiple* will be ignored (default: None). - """ - try: - from fairseq.data.data_utils_fast import ( - batch_by_size_fn, - batch_by_size_vec, - batch_fixed_shapes_fast, - ) - except ImportError: - raise ImportError( - "Please build Cython components with: " - "`python setup.py build_ext --inplace`" - ) - except ValueError: - raise ValueError( - "Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`." - ) - - # added int() to avoid TypeError: an integer is required - max_tokens = ( - int(max_tokens) if max_tokens is not None else -1 - ) - max_sentences = max_sentences if max_sentences is not None else -1 - bsz_mult = required_batch_size_multiple - - if not isinstance(indices, np.ndarray): - indices = np.fromiter(indices, dtype=np.int64, count=-1) - - if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray): - num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1) - - if fixed_shapes is None: - if num_tokens_vec is None: - return batch_by_size_fn( - indices, - num_tokens_fn, - max_tokens, - max_sentences, - bsz_mult, - ) - else: - return batch_by_size_vec( - indices, - num_tokens_vec, - max_tokens, - max_sentences, - bsz_mult, - ) - - else: - fixed_shapes = np.array(fixed_shapes, dtype=np.int64) - sort_order = np.lexsort( - [ - fixed_shapes[:, 1].argsort(), # length - fixed_shapes[:, 0].argsort(), # bsz - ] - ) - fixed_shapes_sorted = fixed_shapes[sort_order] - return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted) - - -def post_process(sentence: str, symbol: str): - if symbol == "sentencepiece": - sentence = sentence.replace(" ", "").replace("\u2581", " ").strip() - elif symbol == "wordpiece": - sentence = sentence.replace(" ", "").replace("_", " ").strip() - elif symbol == "letter": - sentence = sentence.replace(" ", "").replace("|", " ").strip() - elif symbol == "silence": - import re - sentence = sentence.replace("<SIL>", "") - sentence = re.sub(' +', ' ', sentence).strip() - elif symbol == "_EOW": - sentence = sentence.replace(" ", "").replace("_EOW", " ").strip() - elif symbol in {"subword_nmt", "@@ ", "@@"}: - if symbol == "subword_nmt": - symbol = "@@ " - sentence = (sentence + " ").replace(symbol, "").rstrip() - elif symbol == "none": - pass - elif symbol is not None: - raise NotImplementedError(f"Unknown post_process option: {symbol}") - return sentence - - -def compute_mask_indices( - shape: Tuple[int, int], - padding_mask: Optional[torch.Tensor], - mask_prob: float, - mask_length: int, - mask_type: str = "static", - mask_other: float = 0.0, - min_masks: int = 0, - no_overlap: bool = False, - min_space: int = 0, -) -> np.ndarray: - """ - Computes random mask spans for a given shape - - Args: - shape: the the shape for which to compute masks. - should be of size 2 where first element is batch size and 2nd is timesteps - padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements - mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by - number of timesteps divided by length of mask span to mask approximately this percentage of all elements. - however due to overlaps, the actual number will be smaller (unless no_overlap is True) - mask_type: how to compute mask lengths - static = fixed size - uniform = sample from uniform distribution [mask_other, mask_length*2] - normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element - poisson = sample from possion distribution with lambda = mask length - min_masks: minimum number of masked spans - no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping - min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans - """ - - bsz, all_sz = shape - mask = np.full((bsz, all_sz), False) - - all_num_mask = int( - # add a random number for probabilistic rounding - mask_prob * all_sz / float(mask_length) - + np.random.rand() - ) - - all_num_mask = max(min_masks, all_num_mask) - - mask_idcs = [] - for i in range(bsz): - if padding_mask is not None: - sz = all_sz - padding_mask[i].long().sum().item() - num_mask = int( - # add a random number for probabilistic rounding - mask_prob * sz / float(mask_length) - + np.random.rand() - ) - num_mask = max(min_masks, num_mask) - else: - sz = all_sz - num_mask = all_num_mask - - if mask_type == "static": - lengths = np.full(num_mask, mask_length) - elif mask_type == "uniform": - lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) - elif mask_type == "normal": - lengths = np.random.normal(mask_length, mask_other, size=num_mask) - lengths = [max(1, int(round(x))) for x in lengths] - elif mask_type == "poisson": - lengths = np.random.poisson(mask_length, size=num_mask) - lengths = [int(round(x)) for x in lengths] - else: - raise Exception("unknown mask selection " + mask_type) - - if sum(lengths) == 0: - lengths[0] = min(mask_length, sz - 1) - - if no_overlap: - mask_idc = [] - - def arrange(s, e, length, keep_length): - span_start = np.random.randint(s, e - length) - mask_idc.extend(span_start + i for i in range(length)) - - new_parts = [] - if span_start - s - min_space >= keep_length: - new_parts.append((s, span_start - min_space + 1)) - if e - span_start - keep_length - min_space > keep_length: - new_parts.append((span_start + length + min_space, e)) - return new_parts - - parts = [(0, sz)] - min_length = min(lengths) - for length in sorted(lengths, reverse=True): - lens = np.fromiter( - (e - s if e - s >= length + min_space else 0 for s, e in parts), - np.int, - ) - l_sum = np.sum(lens) - if l_sum == 0: - break - probs = lens / np.sum(lens) - c = np.random.choice(len(parts), p=probs) - s, e = parts.pop(c) - parts.extend(arrange(s, e, length, min_length)) - mask_idc = np.asarray(mask_idc) - else: - min_len = min(lengths) - if sz - min_len <= num_mask: - min_len = sz - num_mask - 1 - - mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) - - mask_idc = np.asarray( - [ - mask_idc[j] + offset - for j in range(len(mask_idc)) - for offset in range(lengths[j]) - ] - ) - - mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) - - min_len = min([len(m) for m in mask_idcs]) - for i, mask_idc in enumerate(mask_idcs): - if len(mask_idc) > min_len: - mask_idc = np.random.choice(mask_idc, min_len, replace=False) - mask[i, mask_idc] = True - - return mask - - -def get_mem_usage(): - try: - import psutil - - mb = 1024 * 1024 - return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb" - except ImportError: - return "N/A" - - -# lens: torch.LongTensor -# returns: torch.BoolTensor -def lengths_to_padding_mask(lens): - bsz, max_lens = lens.size(0), torch.max(lens).item() - mask = torch.arange(max_lens).to(lens.device).view(1, max_lens) - mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens) - return mask - - -# lens: torch.LongTensor -# returns: torch.BoolTensor -def lengths_to_mask(lens): - return ~lengths_to_padding_mask(lens) - - -def get_buckets(sizes, num_buckets): - buckets = np.unique( - np.percentile( - sizes, - np.linspace(0, 100, num_buckets + 1), - interpolation='lower', - )[1:] - ) - return buckets - - -def get_bucketed_sizes(orig_sizes, buckets): - sizes = np.copy(orig_sizes) - assert np.min(sizes) >= 0 - start_val = -1 - for end_val in buckets: - mask = (sizes > start_val) & (sizes <= end_val) - sizes[mask] = end_val - start_val = end_val - return sizes - - - -def _find_extra_valid_paths(dataset_path: str) -> set: - paths = utils.split_paths(dataset_path) - all_valid_paths = set() - for sub_dir in paths: - contents = PathManager.ls(sub_dir) - valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None] - all_valid_paths |= {os.path.basename(p) for p in valid_paths} - # Remove .bin, .idx etc - roots = {os.path.splitext(p)[0] for p in all_valid_paths} - return roots - - -def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None: - """Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored.""" - if ( - train_cfg.dataset.ignore_unused_valid_subsets - or train_cfg.dataset.combine_valid_subsets - or train_cfg.dataset.disable_validation - or not hasattr(train_cfg.task, "data") - ): - return - other_paths = _find_extra_valid_paths(train_cfg.task.data) - specified_subsets = train_cfg.dataset.valid_subset.split(",") - ignored_paths = [p for p in other_paths if p not in specified_subsets] - if ignored_paths: - advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them." - msg = f"Valid paths {ignored_paths} will be ignored. {advice}" - raise ValueError(msg) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/positional_embedding.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/positional_embedding.py deleted file mode 100644 index 8e94e35edb46bf9dea911fe74577d8ecbe9b5ff1..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/positional_embedding.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn - -from .learned_positional_embedding import LearnedPositionalEmbedding -from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding - - -def PositionalEmbedding( - num_embeddings: int, - embedding_dim: int, - padding_idx: int, - learned: bool = False, -): - if learned: - # if padding_idx is specified then offset the embedding ids by - # this index and adjust num_embeddings appropriately - # TODO: The right place for this offset would be inside - # LearnedPositionalEmbedding. Move this there for a cleaner implementation. - if padding_idx is not None: - num_embeddings = num_embeddings + padding_idx + 1 - m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) - nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) - if padding_idx is not None: - nn.init.constant_(m.weight[padding_idx], 0) - else: - m = SinusoidalPositionalEmbedding( - embedding_dim, - padding_idx, - init_size=num_embeddings + padding_idx + 1, - ) - return m diff --git a/spaces/srush/minichain-table/app.py b/spaces/srush/minichain-table/app.py deleted file mode 100644 index d4fd01c9b13b54b2a15b67c474890ba8bcfadcb9..0000000000000000000000000000000000000000 --- a/spaces/srush/minichain-table/app.py +++ /dev/null @@ -1,85 +0,0 @@ -# + tags=["hide_inp"] -desc = """ -### Table - -Example of extracting tables from a textual document. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/table.ipynb) - -""" -# - - -# $ -import pandas as pd -from minichain import prompt, Mock, show, OpenAIStream -import minichain -import json -import gradio as gr -import requests - -rotowire = requests.get("https://raw.githubusercontent.com/srush/text2table/main/data.json").json() -names = { - '3-pointer percentage': 'FG3_PCT', - '3-pointers attempted': 'FG3A', - '3-pointers made': 'FG3M', - 'Assists': 'AST', - 'Blocks': 'BLK', - 'Field goal percentage': 'FG_PCT', - 'Field goals attempted': 'FGA', - 'Field goals made': 'FGM', - 'Free throw percentage': 'FT_PCT', - 'Free throws attempted': 'FTA', - 'Free throws made': 'FTM', - 'Minutes played': 'MIN', - 'Personal fouls': 'PF', - 'Points': 'PTS', - 'Rebounds': 'REB', - 'Rebounds (Defensive)': 'DREB', - 'Rebounds (Offensive)': 'OREB', - 'Steals': 'STL', - 'Turnovers': 'TO' -} -# Convert an example to dataframe -def to_df(d): - players = {player for v in d.values() if v is not None for player, _ in v.items()} - lookup = {k: {a: b for a, b in v.items()} for k,v in d.items()} - rows = [dict(**{"player": p}, **{k: "_" if p not in lookup.get(k, []) else lookup[k][p] for k in names.keys()}) - for p in players] - return pd.DataFrame.from_dict(rows).astype("str").sort_values(axis=0, by="player", ignore_index=True).transpose() - - -# Make few shot examples -few_shot_examples = 2 -examples = [] -for i in range(few_shot_examples): - examples.append({"input": rotowire[i][1], - "output": to_df(rotowire[i][0][1]).transpose().set_index("player").to_csv(sep="\t")}) - -@prompt(OpenAIStream(), - template_file="table.pmpt.txt", - block_output=gr.HTML, - stream=True) -def extract(model, passage, typ): - state = [] - out = "" - for token in model.stream(dict(player_keys=names.items(), examples=examples, passage=passage, type=typ)): - out += token - html = "<table><tr><td>" + out.replace("\t", "</td><td>").replace("\n", "</td></tr><tr><td>") + "</td></td></table>" - yield html - yield html - - - -def run(query): - return extract(query, "Player") - -# $ - -import os -gradio = show(run, - examples = [rotowire[i][1] for i in range(50, 55)], - subprompts=[extract], - code=open("table.py" if os.path.exists("table.py") else "app.py", "r").read().split("$")[1].strip().strip("#").strip(), - out_type="markdown" - ) - -if __name__ == "__main__": - gradio.queue().launch() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Ample Bass Torrent LINK.md b/spaces/stomexserde/gpt4-ui/Examples/Ample Bass Torrent LINK.md deleted file mode 100644 index bc29168165466d07c61c22fb5adcaf65f5af4d90..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Ample Bass Torrent LINK.md +++ /dev/null @@ -1,23 +0,0 @@ -<br /> -<h1>How to Download and Install Ample Bass Torrent for Free</h1> -<p>If you are looking for a realistic and versatile virtual bass guitar instrument, you might want to check out Ample Bass Torrent. This is a collection of four bass guitar plugins from Ample Sound, based on samples from the Fender Precision Bass, Fender Jazz Bass, Music Man Stingray 5 Classic, and Acoustic Bass. Each plugin has a library of over 3 GB of samples, recorded at every fret with no destructive editing or dynamics processing. You can also choose from 14 articulations, such as sustain, palm mute, slap, pop, tap, and more.</p> -<p>Ample Bass Torrent is compatible with Windows and Mac OS X, and supports VSTi, VSTi3, AAX, AU, and standalone formats. You can use it in your favorite DAW or as a standalone application. You can also customize the sound with various effects, such as EQ, compressor, distortion, chorus, delay, and reverb. You can also use the built-in riffer, tab reader, and MIDI controller to create your own bass lines.</p> -<h2>Ample Bass Torrent</h2><br /><p><b><b>DOWNLOAD</b> &#127775; <a href="https://urlgoal.com/2uI9ee">https://urlgoal.com/2uI9ee</a></b></p><br /><br /> -<p>But how can you download and install Ample Bass Torrent for free? Well, there are some websites that offer torrent files for these plugins, such as vsttorrentz.net or audiostorrent.com. However, these are not official sources and may contain viruses or malware that can harm your computer or compromise your privacy. Moreover, downloading pirated software is illegal and unethical, and may result in legal consequences or fines.</p> -<p>The best way to get Ample Bass Torrent for free is to use the trial version from the official website of Ample Sound. This way, you can test the plugins for 7 days without any limitations or restrictions. You can also access the latest updates and support from the developers. If you like the plugins and want to use them permanently, you can purchase them from the website or from authorized dealers. The prices range from $89 to $149 per plugin, depending on the model.</p> -<p>So what are you waiting for? Visit <a href="https://www.amplesound.net/en/index.asp">https://www.amplesound.net/en/index.asp</a> and download the trial version of Ample Bass Torrent today. You will be amazed by the quality and realism of these bass guitar plugins.</p> - -<h2>What are the Benefits of Ample Bass Torrent?</h2> -<p>Ample Bass Torrent is not just a simple bass guitar plugin. It is a powerful and flexible instrument that can help you create realistic and expressive bass tracks for any genre of music. Here are some of the benefits of using Ample Bass Torrent:</p> -<p></p> -<ul> -<li>You can get the sound of four different bass guitars in one plugin. Whether you need a warm and smooth tone, a bright and punchy tone, a deep and fat tone, or a natural and acoustic tone, you can find it in Ample Bass Torrent.</li> -<li>You can customize the sound with various parameters, such as volume, pan, tone, pickup, string, fret noise, release noise, and more. You can also adjust the tuning, intonation, and fretboard position of each string.</li> -<li>You can use the effects section to add more character and color to your sound. You can choose from three classic amp models and four cabinet models, each with four mic options. You can also apply EQ, compressor, distortion, chorus, delay, and reverb effects.</li> -<li>You can use the riffer to generate realistic and inspiring bass riffs. You can choose from hundreds of presets or create your own patterns. You can also drag and drop the MIDI files to your DAW for further editing.</li> -<li>You can use the tab reader to play any tablature file with Ample Bass Torrent. You can import Guitar Pro files or plain text files. You can also edit the tablature with the built-in editor.</li> -<li>You can use the MIDI controller to control Ample Bass Torrent with your keyboard or MIDI device. You can assign any parameter to any MIDI CC or key switch. You can also use the pitch wheel and modulation wheel to control the vibrato and bend effects.</li> -</ul> -<p>As you can see, Ample Bass Torrent is a versatile and comprehensive bass guitar plugin that can enhance your music production. Whether you are a beginner or a professional, you can find something useful and enjoyable in Ample Bass Torrent.</p> 7b8c122e87<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Breaking Bad S05e10 720p Uploaded Search BEST.md b/spaces/stomexserde/gpt4-ui/Examples/Breaking Bad S05e10 720p Uploaded Search BEST.md deleted file mode 100644 index 97e891234bc72b4d8747e706f4ea8e45146068bc..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Breaking Bad S05e10 720p Uploaded Search BEST.md +++ /dev/null @@ -1,12 +0,0 @@ - -<h1>Breaking Bad S05E10: Buried - A Review</h1> -<p>Breaking Bad is a critically acclaimed American crime drama series created by Vince Gilligan. The show follows Walter White (Bryan Cranston), a high school chemistry teacher who turns to producing and selling methamphetamine after being diagnosed with terminal lung cancer. Along with his former student and partner Jesse Pinkman (Aaron Paul), Walter faces various dangers and moral dilemmas in his illicit business.</p> -<h2>breaking bad s05e10 720p uploaded search</h2><br /><p><b><b>Download</b> &#10027;&#10027;&#10027; <a href="https://urlgoal.com/2uI7q6">https://urlgoal.com/2uI7q6</a></b></p><br /><br /> -<p>In the tenth episode of the fifth and final season, titled "Buried", Walter tries to hide his money from his DEA brother-in-law Hank (Dean Norris), who is determined to bring him down. Meanwhile, Jesse is arrested by the police after throwing away millions of dollars in the streets. Skyler (Anna Gunn), Walter's wife, is conflicted about whether to cooperate with Hank or protect her family. Lydia (Laura Fraser), a business associate of Walter, orchestrates a massacre of a rival meth crew in order to secure a new supply of methylamine.</p> -<p>The episode was written by Thomas Schnauz and directed by Michelle MacLaren. It aired on August 18, 2013 on AMC and received positive reviews from critics and viewers. The episode was nominated for three Primetime Emmy Awards, including Outstanding Writing for a Drama Series, Outstanding Directing for a Drama Series, and Outstanding Supporting Actress in a Drama Series for Anna Gunn.</p> -<p>If you want to watch or download Breaking Bad S05E10 in 720p quality, you can find various links on the internet. One of them is <a href="https://solidtorrents.to/torrents/breaking-bad-season-5-complete-720p-brrip-sujaidr-5044a/5bcf57fe537c2d4ccc7675a0/">Breaking Bad Season 5 Complete 720p.BRrip.Sujaidr</a> [^2^], which has subtitles in different languages. You can also find subtitles for this episode on <a href="https://subscene.com/subtitles/breaking-bad-fifth-season/english">Subscene</a> [^1^] or <a href="https://subdl.com/subtitle/sd1300044/breaking-bad/fifth-season/english">SUBDL</a> [^4^]. Alternatively, you can stream the episode online on <a href="https://archive.org/details/breaking-bad-s-01-e-01-pilot_202208">Internet Archive</a> [^3^], which has all the episodes of Breaking Bad available for free.</p><p>"Buried" is a tense and thrilling episode that showcases the brilliant performances of the cast and the skillful direction of MacLaren. The episode explores the themes of loyalty, betrayal, guilt, and greed that have been central to Breaking Bad. The episode also sets up the stage for the final showdown between Walter and Hank, as well as the consequences of Walter's actions for his family and Jesse.</p> -<p>One of the most memorable scenes in the episode is when Walter buries his money in the desert, using a GPS device to mark the location. The scene is a visual metaphor for Walter's descent into darkness and isolation, as he literally digs his own grave. The scene also foreshadows the events of the series finale, when Walter returns to the same spot to retrieve his money and face his enemies.</p> -<p></p> -<p>Another highlight of the episode is the confrontation between Skyler and Hank in a diner, where Hank tries to persuade Skyler to testify against Walter. Skyler, however, refuses to talk and asks for a lawyer, realizing that Hank has no solid evidence against Walter. The scene is a turning point for Skyler's character, as she chooses to stand by her husband despite his crimes. The scene also demonstrates Gunn's superb acting skills, as she conveys Skyler's emotions with subtle facial expressions and body language.</p> cec2833e83<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Free Download Samurai Warriors 3 Pc Fix Full Version.md b/spaces/stomexserde/gpt4-ui/Examples/Free Download Samurai Warriors 3 Pc Fix Full Version.md deleted file mode 100644 index 23affde212e4395cf5767cf8db0cf33d553fe856..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Free Download Samurai Warriors 3 Pc Fix Full Version.md +++ /dev/null @@ -1,29 +0,0 @@ -<br /> -Here is what I created: - -<h1>How to Download Samurai Warriors 3 for PC for Free</h1> -<p>Samurai Warriors 3 is a hack and slash video game that was released in 2009 for the Nintendo Wii and PlayStation 3. It is the third installment in the Samurai Warriors series, which is a spin-off of the Dynasty Warriors franchise. The game features over 35 playable characters, each with their own unique weapons and skills, as well as historical scenarios based on the Sengoku period of Japan.</p> -<h2>free download samurai warriors 3 pc full version</h2><br /><p><b><b>DOWNLOAD</b> &harr; <a href="https://urlgoal.com/2uIalH">https://urlgoal.com/2uIalH</a></b></p><br /><br /> -<p>If you are a fan of Samurai Warriors 3 and want to play it on your PC, you might be wondering how to download it for free. Well, you are in luck, because in this article, we will show you how to do just that. All you need is a Wii emulator, a Samurai Warriors 3 ISO file, and a controller. Follow these steps to get started:</p> -<ol> -<li>Download and install a Wii emulator on your PC. We recommend Dolphin, which is one of the most popular and reliable Wii emulators out there. You can download it from <a href="https://dolphin-emu.org/">https://dolphin-emu.org/</a>.</li> -<li>Download a Samurai Warriors 3 ISO file from a reputable source. You can find many websites that offer Wii ISO files for free, but be careful of viruses and malware. We suggest using <a href="https://www.emuparadise.me/">https://www.emuparadise.me/</a>, which has a large collection of Wii games and is safe to use.</li> -<li>Extract the Samurai Warriors 3 ISO file using a program like WinRAR or 7-Zip. You should get a folder with the game files inside.</li> -<li>Launch Dolphin and click on the "Open" button. Navigate to the folder where you extracted the Samurai Warriors 3 ISO file and select it.</li> -<li>Configure your controller settings in Dolphin. You can use a keyboard, a mouse, or a gamepad to play Samurai Warriors 3 on your PC. To set up your controller, go to "Options" > "Controller Settings" and choose the input device you want to use. You can also customize the buttons and axes according to your preference.</li> -<li>Enjoy playing Samurai Warriors 3 on your PC for free!</li> -</ol> -<p>That's it! You have successfully downloaded and installed Samurai Warriors 3 for PC for free. Now you can experience the epic battles and historical drama of the Sengoku period on your computer screen. Have fun!</p> -Here is what I created: - -<p>If you want to learn more about Samurai Warriors 3 and its features, here are some tips and tricks that might help you:</p> -<ul> -<li>Samurai Warriors 3 has a character creation mode, where you can create your own custom warrior and use them in the game. You can choose their gender, appearance, voice, weapon, and skills. You can also edit their biography and personality traits.</li> -<li>Samurai Warriors 3 has a story mode, where you can follow the historical events of the Sengoku period from different perspectives. You can choose from four main factions: the Oda, the Takeda, the Uesugi, and the Tokugawa. Each faction has its own storyline and characters to play as.</li> -<li>Samurai Warriors 3 has a free mode, where you can replay any stage you have cleared in the story mode with any character you want. You can also change the difficulty level and the objectives of each stage.</li> -<li>Samurai Warriors 3 has a challenge mode, where you can test your skills in various mini-games. You can compete for high scores and rankings in speed runs, time attacks, survival battles, and more.</li> -<li>Samurai Warriors 3 has a multiplayer mode, where you can team up with another player online or locally. You can cooperate or compete with each other in various modes, such as story mode, free mode, challenge mode, and versus mode.</li> -</ul> -<p>Samurai Warriors 3 is a fun and exciting game that will keep you entertained for hours. If you are a fan of hack and slash games or Japanese history, you should definitely give it a try. You won't regret it!</p> 81aa517590<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/GloboFleetccPlusv261keygen.md b/spaces/stomexserde/gpt4-ui/Examples/GloboFleetccPlusv261keygen.md deleted file mode 100644 index 2f9f5208eb67a3960a6c3b9e2a3890e2a2755edc..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/GloboFleetccPlusv261keygen.md +++ /dev/null @@ -1,25 +0,0 @@ -<br /> -<h1>How to Use GloboFleet CC Plus to Manage Driver Data</h1> -<p>If you are a driver, dispatcher or transport company owner who needs to create and manage detailed reports from data generated by driver cards or storage devices, you may want to try GloboFleet CC Plus. This software solution works in the legal boundaries of driver monitoring and violation tracking while providing an intuitive workflow. In this article, we will show you how to use GloboFleet CC Plus to analyze and archive driver data.</p> -<h2>What is GloboFleet CC Plus?</h2> -<p>GloboFleet CC Plus is a powerful piece of software developed especially for drivers, dispatchers or transport company owners who need to create and manage detailed reports from data generated by driver cards or storage devices[^1^]. It supports all types of digital tachographs and driver cards that comply with the European regulation (EC) No 561/2006. With GloboFleet CC Plus, you can:</p> -<h2>GloboFleetccPlusv261keygen</h2><br /><p><b><b>DOWNLOAD</b> &#127379; <a href="https://urlgoal.com/2uI7dv">https://urlgoal.com/2uI7dv</a></b></p><br /><br /> -<ul> -<li>Read out driver cards and mass storage data from digital tachographs</li> -<li>Analyze driver activities, work shifts, events, faults, controls and violations</li> -<li>Create forecasts and reminders for driver card expiry dates and calibration intervals</li> -<li>Archive driver data locally on your computer or on a network drive</li> -<li>Export driver data in various formats such as PDF, CSV or XML</li> -<li>Print driver data reports or send them by email</li> -<li>Comply with the legal requirements for data archiving and reporting</li> -</ul> -<h2>How to Install GloboFleet CC Plus?</h2> -<p>To install GloboFleet CC Plus on your Windows computer, you need to download the setup file from the official website: https://www.globofleet.co.uk/download.html. You can choose between a demo version that allows you to test the software for 30 days or a full version that requires a license key. After downloading the setup file, run it and follow the instructions on the screen. You will need to accept the terms and conditions, choose a destination folder and create a shortcut on your desktop. The installation process should take only a few minutes.</p> -<h2>How to Use GloboFleet CC Plus?</h2> -<p>To use GloboFleet CC Plus, you need to connect a card reader or a download key to your computer. Then, you can insert your driver card or your mass storage device into the card reader or the download key. The software will automatically detect the device and start reading out the data. You can see the progress of the data transfer on the status bar at the bottom of the main window. Once the data transfer is complete, you can view and analyze the data in various tabs such as Overview, Activities, Violations, Events or Countries. You can also switch between different views such as Calendar, Graph or Table. You can zoom in or out of the graph view by using the mouse wheel or the buttons on the toolbar. You can also filter the data by date range, driver name or vehicle number.</p> -<p>To archive the data, you need to click on the Archive button on the toolbar. You can choose to archive the data locally on your computer or on a network drive. You can also specify a folder name and a file name for each archive. The software will create a subfolder for each driver card or mass storage device and store the data in encrypted files with .gfc extension. You can open these files later with GloboFleet CC Plus or with GloboFleet File Checker.</p> -<p>To export or print the data, you need to click on the Export button on the toolbar. You can choose to export or print the data in various formats such as PDF, CSV or XML. You can also select which tabs and which columns you want to include in the export or print. You can also customize the layout and appearance of the export or print by changing fonts, colors, margins and headers.</p> -<p></p> -<p>To send an email with the data attached, you need to click on the Email button on the toolbar. You can choose to send an email with one or more attachments in various formats such as PDF, CSV or XML. You can also select which tabs and which columns you want to include in each attachment. You can also customize the subject and body of the email by using placeholders such as drivername, {vehiclen</p> 81aa517590<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Golden Shiner Streamers.md b/spaces/stomexserde/gpt4-ui/Examples/Golden Shiner Streamers.md deleted file mode 100644 index 8d4f3d9dd3b97da8ab9968f28160e96788be0e8d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Golden Shiner Streamers.md +++ /dev/null @@ -1,22 +0,0 @@ - -<h1>How to Tie and Fish Golden Shiner Streamers</h1> -<p>Golden shiners are a common baitfish species that can be found in many lakes, ponds, and rivers across North America. They are a favorite prey of many game fish, such as bass, pike, walleye, and trout. Golden shiners have a distinctive body shape, coloration, and behavior that make them an attractive target for streamer flies.</p> -<p>In this article, we will show you how to tie and fish golden shiner streamers, using some of the best patterns and techniques available. We will also give you some tips on how to find and catch fish that feed on golden shiners.</p> -<h2>golden shiner streamers</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt; <a href="https://urlgoal.com/2uIaWM">https://urlgoal.com/2uIaWM</a></b></p><br /><br /> -<h2>What are Golden Shiner Streamers?</h2> -<p>Golden shiner streamers are streamer flies that imitate the appearance and movement of golden shiners. Streamer flies are flies that are designed to look like small fish, leeches, crayfish, or other aquatic creatures that swim in the water column. They are usually tied with materials that have a lot of movement and flash, such as marabou, rabbit fur, synthetic fibers, or flashabou.</p> -<p>Golden shiner streamers typically have a long and slender body, with a dark olive-green back, a golden-silver side, and a yellowish or orange belly. They also have a long sickle-shaped anal fin and a thin scaleless keel on the midline of the belly behind the pelvic fins. Some patterns also include a dark lateral band or a red spot near the gill cover to mimic the natural markings of golden shiners.</p> -<p></p> -<h2>How to Tie Golden Shiner Streamers?</h2> -<p>There are many ways to tie golden shiner streamers, depending on your preference and skill level. Here are some of the most popular and effective patterns that you can try:</p> -<ul> -<li><strong>Zwirz and Evans' Golden Shiner</strong>: This is a classic pattern that was created by two famous streamer tyers, Mike Zwirz and Bill Evans. It uses bucktail, peacock herl, silver tinsel, red floss, and jungle cock eyes to create a realistic and durable fly. You can find the detailed tying instructions <a href="https://globalflyfisher.com/forage-fish-streamers/golden-shiner/zwirz-and-evans-golden-shiner">here</a>.</li> -<li><strong>Dave Whitlock's Golden Shiner</strong>: This is another classic pattern that was created by one of the most influential fly fishing authors and artists, Dave Whitlock. It uses marabou, chenille, hackle, flashabou, and bead chain eyes to create a simple and effective fly. You can find the detailed tying instructions <a href="https://globalflyfisher.com/forage-fish-streamers/golden-shiner/dave-whitlocks-golden-shiner">here</a>.</li> -<li><strong>Simple Golden Shiner</strong>: This is a modern pattern that was created by Bob Skehan, a member of the Streamer List. It uses rabbit zonker strips, crystal flash, cactus chenille, and dumbbell eyes to create a quick and easy fly. You can find the detailed tying instructions <a href="https://globalflyfisher.com/forage-fish-streamers/golden-shiner/simple-golden-shiner">here</a>.</li> -<li><strong>CATCH's Wiggle Flashabugger</strong>: This is a variation of the popular Wooly Bugger pattern that was created by CATCH Fly Fishing. It uses marabou, flashabou, chenille, hackle, and wiggle tail to create a fly that has a lot of movement and flash. You can find the detailed tying instructions <a href="https://catchflyfish.com/product/wiggle-flashabugger/">here</a>.</li> -<li><strong>Austin Adduci's Shake Dat</strong>: This is a modern pattern that was created by Austin Adduci, a professional fly fishing guide and tyer. It uses craft fur, ice dubbing, flashabou, wire ribbing, and articulation beads to create a fly that has a lot of action and versatility. You can find the detailed tying instructions <a href="https://catchflyfish.com/product/shake-dat/">here</a>.</li> -</ul> -<h2>How to Fish Golden Shiner Streamers?</h2> -<p>Golden shiner streamers can be fished in various</p> cec2833e83<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/sugo/v6yu7bgn/index.html b/spaces/sugo/v6yu7bgn/index.html deleted file mode 100644 index 62282ecf37e7b956cba7559423bd73183d3568c2..0000000000000000000000000000000000000000 --- a/spaces/sugo/v6yu7bgn/index.html +++ /dev/null @@ -1,55 +0,0 @@ - -<!DOCTYPE html> -<html lang="en"> - <head> - <meta charset="utf-8" /> - <meta - name="viewport" - content="width=device-width, initial-scale=1, shrink-to-fit=no, maximum-scale=1" - /> - - - <script> - window.__gradio_mode__ = "app"; - window.gradio_config = {"version": "3.0.13", "mode": "blocks", "dev_mode": false, "components": [{"id": 1, "type": "markdown", "props": {"value": "<h1><center>DALL\u00b7E mini</center></h1>", "name": "markdown", "visible": true, "style": {}}}, {"id": 2, "type": "markdown", "props": {"value": "<p>DALL\u00b7E mini is an AI model that generates images from any prompt you give!</p>\n", "name": "markdown", "visible": true, "style": {}}}, {"id": 3, "type": "group", "props": {"type": "group", "visible": true, "style": {}}}, {"id": 4, "type": "box", "props": {"type": "box", "visible": true, "style": {}}}, {"id": 5, "type": "row", "props": {"type": "row", "visible": true, "style": {"equal_height": true, "mobile_collapse": false}}}, {"id": 6, "type": "textbox", "props": {"lines": 1, "max_lines": 1, "value": "", "label": "Enter your prompt", "show_label": false, "name": "textbox", "visible": true, "style": {"container": false}}}, {"id": 7, "type": "button", "props": {"value": "Run", "variant": "primary", "name": "button", "visible": true, "style": {}}}, {"id": 8, "type": "gallery", "props": {"label": "Generated images", "show_label": false, "name": "gallery", "visible": true, "style": {"grid": [3], "height": "auto"}}}, {"id": 9, "type": "markdown", "props": {"value": "<details>\n<summary>Bias and Limitations</summary>\n<p style='line-height: normal; font-size: small'>\nWhile the capabilities of image generation models are impressive, they may also reinforce or exacerbate societal biases. While the extent and nature of the biases of the DALL\u00b7E mini model have yet to be fully documented, given the fact that the model was trained on unfiltered data from the Internet, it may generate images that contain stereotypes against minority groups. Work to analyze the nature and extent of these limitations is ongoing, and will be documented in more detail in the <a href=\"https://huggingface.co/dalle-mini/dalle-mini\" target=\"_blank\">DALL\u00b7E mini model card</a>.\n</p>\n</details>", "name": "markdown", "visible": true, "style": {}}}, {"id": 10, "type": "markdown", "props": {"value": "<hr />\n<p style='text-align: center'>\nCreated by <a href=\"https://twitter.com/borisdayma\" target=\"_blank\">Boris Dayma</a> et al. 2021-2022\n<br/>\n<a href=\"https://github.com/borisdayma/dalle-mini\" target=\"_blank\">GitHub</a> | <a href=\"https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini-Generate-images-from-any-text-prompt--VmlldzoyMDE4NDAy\" target=\"_blank\">Project Report</a>\n<p style='text-align: center'>Powered by Google <a href=\"https://sites.research.google/trc/\" target=\"_blank\">TPU Research Cloud</a>\n</p>", "name": "markdown", "visible": true, "style": {}}}], "theme": "default", "css": ".container { max-width: 800px; margin: auto; }", "enable_queue": false, "layout": {"id": 0, "children": [{"id": 1}, {"id": 2}, {"id": 3, "children": [{"id": 4, "children": [{"id": 5, "children": [{"id": 6}, {"id": 7}]}]}, {"id": 8}]}, {"id": 9}, {"id": 10}]}, "dependencies": [{"targets": [7], "trigger": "click", "inputs": [6], "outputs": [8], "backend_fn": false, "js": "\n async (text) => {\n try {\n response = await fetch('https://bf.dallemini.ai/generate', {\n method: 'POST',\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n },\n body: JSON.stringify({\n prompt: text\n })\n });\n response = await response.json()\n let imgs = response.images.map(r => \"data:image/png;base64,\" + r)\n return imgs\n } catch (e) {\n alert(\"Too much traffic, please try again.\")\n IMG = \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAMAAACahl6sAAAAOVBMVEXg4OB1dXXX19fd3d2EhIR9fX14eHjJycm2trbb29uurq6goKCZmZmIiIiBgYHNzc2np6e8vLySkpKXK8HrAAABuUlEQVR4nO3Z0bKCIBCAYQNFVCzr/R/2nHU6k8KpJi6wZf7vLu1id9gFhKYBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAb249h7pzr5jD29uhospnlfNo4L+boiLKYyZ0iblKYiu/iNER3PTquD9npPgbB98Za0/twH59JVasMtzXo1m+iHny7PrwpysSuebgxCtmOTlkma121l/TFZR2UqXxEebxEO/87QZlZ3inpeCPzVftkojUyJp2OWVgKy23qSsbg8evitBSXkUjHzYN9Is0oeWoYkkUKazsxRYlYKa6ldFSfs7K/8tsnUSLrXHAuG1SOXpp5t1LEiQxSe33ZqDJIC4TdkziRJkRN9J1CXFlpIj7J9RvNSd0kiUj1zSVjyiKr4X5yTRIx0kYlY8oinbzfFSaJWFlJSsaUpZpEqimttNkTOpo9nX4TOqbfdEFM6FgQpW7c8OofSrYo1Wwaq9nG1/NhVc2nbj2HD821kuOgeg7o3hyZBj1Hpo9D7M3K+HeIrSmPeq4Vfl3ruOhpnly9vdyEfa1KLkPF7nr66GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPjcD13rCcC3ILx/AAAAAElFTkSuQmCC\"\n return Array(9).fill(IMG)\n }\n }\n ", "status_tracker": null, "queue": null, "api_name": null}]}; - </script> - - <link rel="preconnect" href="https://fonts.googleapis.com" /> - <link - rel="preconnect" - href="https://fonts.gstatic.com" - crossorigin="anonymous" - /> - <link - href="https://fonts.googleapis.com/css?family=Source Sans Pro" - rel="stylesheet" - /> - <link - href="https://fonts.googleapis.com/css?family=IBM Plex Mono" - rel="stylesheet" - /> - <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script> - <script type="module" crossorigin src="https://gradio.s3-us-west-2.amazonaws.com/3.0.9b12/assets/index.8eca4ae7.js"></script> - <link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/3.0.9b12/assets/index.cbea297d.css"> - <style> - footer img { - display: none !important; - } - </style> - </head> - - <body - style=" - margin: 0; - padding: 0; - display: flex; - flex-direction: column; - flex-grow: 1; - " - > - <div - id="root" - style="display: flex; flex-direction: column; flex-grow: 1" - ></div> - </body> -</html> \ No newline at end of file diff --git a/spaces/sunwaee/Face-Mask-Detection/README.md b/spaces/sunwaee/Face-Mask-Detection/README.md deleted file mode 100644 index 0d93f5551eb346c94f19dedfebcc8c06dcf6769a..0000000000000000000000000000000000000000 --- a/spaces/sunwaee/Face-Mask-Detection/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Face Mask Detection [ResNets] -emoji: 👁 -colorFrom: indigo -colorTo: pink -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HWID Changer V1.2 [PC]l.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HWID Changer V1.2 [PC]l.md deleted file mode 100644 index 64ccbd03a1bdba25a9c1f91a0b1fa1b12b0eb9f0..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HWID Changer V1.2 [PC]l.md +++ /dev/null @@ -1,33 +0,0 @@ -<br /> -<h1>HWID Changer V1.2 [PC]l: How to Change Your Hardware ID Easily and Safely</h1> -<p>If you are looking for a way to change your hardware ID (HWID) on your PC, you may have come across HWID Changer V1.2 [PC]l, a software tool that claims to do just that. But what is HWID Changer V1.2 [PC]l, how does it work, and is it safe to use? In this article, we will answer these questions and more.</p> -<h2>HWID Changer V1.2 [PC]l</h2><br /><p><b><b>Download</b> &#9193; <a href="https://cinurl.com/2uEZ5h">https://cinurl.com/2uEZ5h</a></b></p><br /><br /> -<h2>What is HWID Changer V1.2 [PC]l?</h2> -<p>HWID Changer V1.2 [PC]l is a software tool that allows you to change your HWID on your PC with a few clicks. HWID stands for hardware identification, which is a unique identifier assigned to each device by the manufacturer. Your HWID can be used by software applications, online services, or game servers to identify your device and enforce certain restrictions or bans.</p> -<p>By changing your HWID, you can bypass these restrictions or bans and access the software or service you want. For example, if you are banned from a game server for cheating or violating the rules, you can use HWID Changer V1.2 [PC]l to change your HWID and create a new account with a different HWID.</p> -<h2>How does HWID Changer V1.2 [PC]l work?</h2> -<p>HWID Changer V1.2 [PC]l works by modifying the registry entries of your device that store your HWID. The registry is a database that stores various settings and information for your operating system and applications. By changing the values of these registry entries, you can change your HWID to any value you want.</p> -<p>To use HWID Changer V1.2 [PC]l, you need to download and run the software as an administrator. Then, you can choose the device components that you want to change the HWID of, such as CPU, motherboard, hard disk, network adapter, etc. You can also generate a random HWID or enter a custom one. After that, you just need to click on "Change" and restart your PC for the changes to take effect.</p> -<p></p> -<h2>Is HWID Changer V1.2 [PC]l safe to use?</h2> -<p>HWID Changer V1.2 [PC]l is not an official or authorized software tool and it may have some risks associated with it. Changing your HWID may violate the terms of service or license agreement of some software applications or online services and result in legal consequences or further bans. Changing your HWID may also cause compatibility issues or errors with some software applications or drivers that rely on your original HWID.</p> -<p>Therefore, before using HWID Changer V1.2 [PC]l, you should make sure that you have a backup of your registry and system files in case something goes wrong. You should also use it at your own risk and discretion and understand the possible consequences of changing your HWID.</p> - -<h2>How to use HWID Changer V1.2 [PC]l safely and effectively?</h2> -<p>If you decide to use HWID Changer V1.2 [PC]l, you should follow some tips and precautions to use it safely and effectively. Here are some of them:</p> -<ul> -<li>Before changing your HWID, make sure that you have a backup of your registry and system files in case something goes wrong. You can use a system restore point or a backup software to do this.</li> -<li>After changing your HWID, make sure that you delete any traces of your original HWID from your device. You can use a disk cleaner or a registry cleaner to do this.</li> -<li>After changing your HWID, make sure that you update your drivers and software applications to avoid compatibility issues or errors. You can use a driver updater or a software updater to do this.</li> -<li>After changing your HWID, make sure that you do not log in to any accounts or services that are linked to your original HWID. You may need to create new accounts or use different devices to access them.</li> -<li>After changing your HWID, make sure that you do not change it again frequently or randomly. This may raise suspicion or trigger security measures from some software applications or online services.</li> -</ul> -<h2>What are some alternatives to HWID Changer V1.2 [PC]l?</h2> -<p>If you are looking for some alternatives to HWID Changer V1.2 [PC]l, you may want to consider some other options that can help you change your HWID or bypass restrictions or bans. Here are some of them:</p> -<ul> -<li>Use a virtual machine: A virtual machine is a software that allows you to run another operating system on your device. You can use a virtual machine to create a different HWID for each virtual machine and access the software or service you want.</li> -<li>Use a VPN: A VPN is a service that allows you to connect to the internet through a different server and location. You can use a VPN to change your IP address and hide your identity and location from the software or service you want.</li> -<li>Use a proxy: A proxy is a server that acts as an intermediary between your device and the internet. You can use a proxy to change your IP address and hide your identity and location from the software or service you want.</li> -</ul></p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Modern Warfare 2 Zone Files 196.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Modern Warfare 2 Zone Files 196.md deleted file mode 100644 index 84aafd8642430218276ff17ea82bebcdefca9985..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Modern Warfare 2 Zone Files 196.md +++ /dev/null @@ -1,10 +0,0 @@ -<h2>modern warfare 2 zone files 196</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://cinurl.com/2uEXEU">https://cinurl.com/2uEXEU</a></b></p><br /><br /> - -Call of Duty Modern Warfare Zone Files download, Call of Duty Modern Warfare 3 Zone Files download, Call of Duty Modern Warfare 2 Zone Files download, ... download Call of Duty: Modern Warfare -The Call of Duty: Modern Warfare series is one of the most successful series in television gaming history. -It has been a series that has seen a prolific and fansite, and it has been a series that has seen a ... -Call of Duty 4 Modern Warfare free download -Call of Duty 4: Modern Warfare free download, Call of Duty 4: Modern Warfare free download, Call of Duty 4: Modern Warfare download english, Call of Duty 4 Modern Warfare ... 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Vivid Workshopdata Ati V11 2 No EXCLUSIVE Crack.TXT.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Vivid Workshopdata Ati V11 2 No EXCLUSIVE Crack.TXT.md deleted file mode 100644 index 2bfaf58998695e3c29dc14563a96eb38faf5ffab..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Vivid Workshopdata Ati V11 2 No EXCLUSIVE Crack.TXT.md +++ /dev/null @@ -1,10 +0,0 @@ -<h2>Vivid Workshopdata Ati V11 2 No Crack.TXT</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733; <a href="https://cinurl.com/2uEX1g">https://cinurl.com/2uEX1g</a></b></p><br /><br /> -<br /> -Vivid Workshop Data Ati V.11.2 Q3 Multilingual 2012 Torrent . Microsoft Office 2013 Japanese torrent. ati workshop vivid data v.11.2 multilingual. Vivid Workshop Data Ati V.11.2 Q3 Multilingual 2012 Torrent Rating: 7.4/10 3564reviews -Vivid Workshop Data Ati V.11.2 Q3 Multilingual 2012 Torrent Summary. -The vivid workshop data are a collection of video workspots to improve your productivity, focus, and productivity-based skills.The vivid workshop data is a collection of workspots to improve your productivity, focus, and productivity-based skills. -This is a nonprofit. -Vivid Workshop Data Ati V.11.2 Pc Free Download. 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zikina Dinastija 1 Ceo Film Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zikina Dinastija 1 Ceo Film Download.md deleted file mode 100644 index 7e61413e17b6b31f361fb83fbaa481c7eef949d0..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zikina Dinastija 1 Ceo Film Download.md +++ /dev/null @@ -1,14 +0,0 @@ -<h2>zikina dinastija 1 ceo film download</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://cinurl.com/2uEXIX">https://cinurl.com/2uEXIX</a></b></p><br /><br /> - -He is sent to work in the country as a domestic helper for a wealthy woman. It is time for Misa to work, earn money and be independent, while constantly being watched by her former best friend Zoran, who always seems to know everything about her. - -Lucky ubičemćeIvan to Bić I nisi nikadJoš jer ona ne znaStvarno si puno bolji od svega! u kakvoj ste u školi, ali ej u igrišti za šta si ostao?! Ej, ljudi ih smo svi upali iz ciljeta, kako bi pomogli onima koji nisu izašli na selo! - -Good morning?Sinoć pise po krivu uređaju, ja?Kolega od Nacionala je posao prenio nekome s druge krajine. Sad tužiteljstvo radi o tome koliko dugo ima kamata na tim mjesecima.Na početku su postavili bar 20 kazni, pa sad rade na njih 34. Uglavnom život ima još koliko dugo, pa ne uspije proći do sati. Zbog nepotizma ga na teret izdužuje. - -You're beautiful!I trebaš te da ne pitaju. Iz svoje kuće je nepoznata osoba iz bolje dobre kuće svoje ljubavi slikala. Taj je "tatović", on će sve na sukob. Onaj sličan, malo veći. "Kad bih se malo više okrenula da se ne pojavi, da se malo povukao, malo snašlo, malo tržio, dolazio bih u njega. - -I want to help you!Sad mi je to moguće, ali... Ostaj pri dvom vašim korisnicima ili ti jedan korisnik oda ili oni oda. To 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/alexnet.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/alexnet.py deleted file mode 100644 index 89e36b8c7851f895d9ae7f07149f0e707456aab0..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/alexnet.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn - - -class AlexNet(nn.Module): - """AlexNet backbone. - - Args: - num_classes (int): number of classes for classification. - """ - - def __init__(self, num_classes=-1): - super(AlexNet, self).__init__() - self.num_classes = num_classes - self.features = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(64, 192, kernel_size=5, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(192, 384, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(384, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(256, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - ) - if self.num_classes > 0: - self.classifier = nn.Sequential( - nn.Dropout(), - nn.Linear(256 * 6 * 6, 4096), - nn.ReLU(inplace=True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(inplace=True), - nn.Linear(4096, num_classes), - ) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - # use default initializer - pass - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - - x = self.features(x) - if self.num_classes > 0: - x = x.view(x.size(0), 256 * 6 * 6) - x = self.classifier(x) - - return x diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/activation.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/activation.py deleted file mode 100644 index cab2712287d5ef7be2f079dcb54a94b96394eab5..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/activation.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version -from .registry import ACTIVATION_LAYERS - -for module in [ - nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, - nn.Sigmoid, nn.Tanh -]: - ACTIVATION_LAYERS.register_module(module=module) - - -@ACTIVATION_LAYERS.register_module(name='Clip') -@ACTIVATION_LAYERS.register_module() -class Clamp(nn.Module): - """Clamp activation layer. - - This activation function is to clamp the feature map value within - :math:`[min, max]`. More details can be found in ``torch.clamp()``. - - Args: - min (Number | optional): Lower-bound of the range to be clamped to. - Default to -1. - max (Number | optional): Upper-bound of the range to be clamped to. - Default to 1. - """ - - def __init__(self, min=-1., max=1.): - super(Clamp, self).__init__() - self.min = min - self.max = max - - def forward(self, x): - """Forward function. - - Args: - x (torch.Tensor): The input tensor. - - Returns: - torch.Tensor: Clamped tensor. - """ - return torch.clamp(x, min=self.min, max=self.max) - - -class GELU(nn.Module): - r"""Applies the Gaussian Error Linear Units function: - - .. math:: - \text{GELU}(x) = x * \Phi(x) - where :math:`\Phi(x)` is the Cumulative Distribution Function for - Gaussian Distribution. - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional - dimensions - - Output: :math:`(N, *)`, same shape as the input - - .. image:: scripts/activation_images/GELU.png - - Examples:: - - >>> m = nn.GELU() - >>> input = torch.randn(2) - >>> output = m(input) - """ - - def forward(self, input): - return F.gelu(input) - - -if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.4')): - ACTIVATION_LAYERS.register_module(module=GELU) -else: - ACTIVATION_LAYERS.register_module(module=nn.GELU) - - -def build_activation_layer(cfg): - """Build activation layer. - - Args: - cfg (dict): The activation layer config, which should contain: - - type (str): Layer type. - - layer args: Args needed to instantiate an activation layer. - - Returns: - nn.Module: Created activation layer. - """ - return build_from_cfg(cfg, ACTIVATION_LAYERS) diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/plugin.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/plugin.py deleted file mode 100644 index 07c010d4053174dd41107aa654ea67e82b46a25c..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/plugin.py +++ /dev/null @@ -1,88 +0,0 @@ -import inspect -import platform - -from .registry import PLUGIN_LAYERS - -if platform.system() == 'Windows': - import regex as re -else: - import re - - -def infer_abbr(class_type): - """Infer abbreviation from the class name. - - This method will infer the abbreviation to map class types to - abbreviations. - - Rule 1: If the class has the property "abbr", return the property. - Rule 2: Otherwise, the abbreviation falls back to snake case of class - name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. - - Args: - class_type (type): The norm layer type. - - Returns: - str: The inferred abbreviation. - """ - - def camel2snack(word): - """Convert camel case word into snack case. - - Modified from `inflection lib - <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_. - - Example:: - - >>> camel2snack("FancyBlock") - 'fancy_block' - """ - - word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) - word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) - word = word.replace('-', '_') - return word.lower() - - if not inspect.isclass(class_type): - raise TypeError( - f'class_type must be a type, but got {type(class_type)}') - if hasattr(class_type, '_abbr_'): - return class_type._abbr_ - else: - return camel2snack(class_type.__name__) - - -def build_plugin_layer(cfg, postfix='', **kwargs): - """Build plugin layer. - - Args: - cfg (None or dict): cfg should contain: - type (str): identify plugin layer type. - layer args: args needed to instantiate a plugin layer. - postfix (int, str): appended into norm abbreviation to - create named layer. Default: ''. - - Returns: - tuple[str, nn.Module]: - name (str): abbreviation + postfix - layer (nn.Module): created plugin layer - """ - if not isinstance(cfg, dict): - raise TypeError('cfg must be a dict') - if 'type' not in cfg: - raise KeyError('the cfg dict must contain the key "type"') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in PLUGIN_LAYERS: - raise KeyError(f'Unrecognized plugin type {layer_type}') - - plugin_layer = PLUGIN_LAYERS.get(layer_type) - abbr = infer_abbr(plugin_layer) - - assert isinstance(postfix, (int, str)) - name = abbr + str(postfix) - - layer = plugin_layer(**kwargs, **cfg_) - - return name, layer diff --git a/spaces/teelinsan/aclpubcheck/README.md b/spaces/teelinsan/aclpubcheck/README.md deleted file mode 100644 index 816c08437eee4fd13af5b8d84f181e668f3c07ad..0000000000000000000000000000000000000000 --- a/spaces/teelinsan/aclpubcheck/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: ACL Pubcheck -emoji: 📝 -colorFrom: red -colorTo: red -sdk: docker -pinned: false -license: mit ---- - -# 📝 ACL Pubcheck Graphical User Interface - -ACL Pubcheck GUI is a minimal graphical user interface for the [ACL Pubcheck tool](https://github.com/acl-org/aclpubcheck) made with [gradio](https://github.com/gradio-app/gradio). -The tool allows you to check the compliance of your paper with the [ACL conferences](https://www.aclweb.org/) guidelines by simply drag and drop your pdf file on the interface. -You can find more info on the [Github repository](https://github.com/teelinsan/aclpubcheck-gui) \ No newline at end of file diff --git a/spaces/temp-late/manga-anime/app.py b/spaces/temp-late/manga-anime/app.py deleted file mode 100644 index 66e43d310303a86d99e7d32b8f5637cd25690ce9..0000000000000000000000000000000000000000 --- a/spaces/temp-late/manga-anime/app.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding: utf-8 -import os -#os.system("pip install gradio==2.8.0b3") -import torch -import gradio as gr -from PIL import ImageFont, ImageDraw - -device = "cuda" if torch.cuda.is_available() else "cpu" -model1 = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", device=device, pretrained="face_paint_512_v1") -model2 = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", device=device, pretrained="face_paint_512_v2") -face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device) - - -def inference(img, ver): - title_font = ImageFont.truetype("LEMONMILK-Regular.otf", 20) - title_text = "@_temp.late_" - padding = 10 - x, y = 15, 15 # 180, 412 - w, h = title_font.getsize(title_text) - if ver == "Version 2 (Réaliste)": - out = face2paint(model2, img) - else: - out = face2paint(model1, img) - image_editable = ImageDraw.Draw(out) - image_editable.rectangle((x, y, x + w + padding, y + h + padding), fill="white") - image_editable.text((x + padding / 2, y + padding / 2), title_text, (0, 0, 0), font=title_font) - return out - - -title = "Temp Late" -gr.Interface( - inference, - [ - gr.inputs.Image(type="pil", source="upload"), - gr.inputs.Radio( - ["Version 1 (Stylisé)", "Version 2 (Réaliste)"], - type="value", - default="Version 2 (Réaliste)", - label="Version", - ), - ], - gr.outputs.Image(type="pil"), - allow_flagging="auto", - allow_screenshot=False, - flagging_dir="flagged", -).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/templates/http-server/README.md b/spaces/templates/http-server/README.md deleted file mode 100644 index fafaa017c7836f519e145f2f5e843e84748af5b7..0000000000000000000000000000000000000000 --- a/spaces/templates/http-server/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Python + HTTP Server -emoji: 🐍 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 2.9.1 -python_version: 3.10.4 -app_file: app.py -models: [osanseviero/BigGAN-deep-128, t5-small] -datasets: [emotion] -license: mit -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Cc 2019 Crack _VERIFIED_.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Cc 2019 Crack _VERIFIED_.md deleted file mode 100644 index 79ed510462dee0dcb4302f777d555ed8941c4215..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Cc 2019 Crack _VERIFIED_.md +++ /dev/null @@ -1,8 +0,0 @@ -<br /> -<p>the issue here is that the latest version of adobe flash player is 32.0.161, and you do not have the last update of the plug-in installed. to fix this, you will need to install the patch using the windows update. to do so, go to settings, and then to update and security. under windows update, click check for updates, and then, under optional updates, click view updates.</p> -<p>in a recent version of the program, adobe introduced a new mode of activation. in order to activate the program, you must enter your serial number and press the <strong>activate</strong> button. if you do not activate it before that, it will be deactivated and you will not be able to use the program.</p> -<h2>adobe cc 2019 crack</h2><br /><p><b><b>Download</b> &harr; <a href="https://bytlly.com/2uGl9u">https://bytlly.com/2uGl9u</a></b></p><br /><br /> -<p>in my case, they updated the software and the rest of the program didn't work. i had to reinstall the program. i went to the adobe website and downloaded the last version available at that time, flash player and installed it. it worked.</p> -<p>the full version of adobe color cc 2019 is available to those who already own the previous color version or the free version of creative cloud. the new version features a new feature, named "hd preview," which previews the outcome of the tool's different styles on the preview board. once you pick a style, the preview board will switch to the style youve selected, and you can see the result in real-time. the preview board can be moved to any part of the board, and it also supports transparent and watermarked previews. there is also the option to disable the preview board. alternatively, you can always continue working on the same preview board, and it can be set to any size.</p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Caterpillarcommunicationadapter3driver19.md b/spaces/terfces0erbo/CollegeProjectV2/Caterpillarcommunicationadapter3driver19.md deleted file mode 100644 index f115f536cb0592c38b0b6ea0430f080bd90f738b..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Caterpillarcommunicationadapter3driver19.md +++ /dev/null @@ -1,11 +0,0 @@ - -<p>caterpillarcommunicationadapter3driver19 atlantis: caterpillarcommunicationadapter3driver19 the one who to become king of the world, to become a hero, hero. caterpillarcommunicationadapter3driver19 stopmotion pro 7 full crack stop motion pro 7 full crack caterpillarcommunicationadapter3driver19 uyless black optical networks ebook download. </p> -<h2>caterpillarcommunicationadapter3driver19</h2><br /><p><b><b>DOWNLOAD</b> &#127379; <a href="https://bytlly.com/2uGlIo">https://bytlly.com/2uGlIo</a></b></p><br /><br /> -<p>caterpillarcommunicationadapter3driver19 elephants in the sky | full hd 1080p | yify | link: caterpillarcommunicationadapter3driver19 cats&dogs by catboy3: caterpillarcommunicationadapter3driver19 vlads are not good at internet! </p> -<p>caterpillarcommunicationadapter3driver19 adb0fc82310 50 <br /> re: this thread has been locked by admin. <br /> <br /> leslie t. bennett john g. <br /> re: this thread has been locked by admin. <br /> <br /> lisa henderson <br /> re: this thread has been locked by admin. <br /> <br /> </p> -<p>caterpillarcommunicationadapter3driver19 bd86983c93 anthai. responder. meadean dice: a las 5:05 am kanzen master series pdf download caterpillarcommunicationadapter3driver19 trainz jr up sd70ace.rar demo uyless black optical networks ebook download. </p> -<p></p> -<p>caterpillarcommunicationadapter3driver19 <br /> trainzjrupsd7ace <br /> uyless black optical networks ebook download <br /> rika nishimura zip43 <br /> alpine pxe-h660 pxe-h650 imprint audyssey multeq v2.2.9 download <br /> foto memek ibu melahirkan <br /> sonivox vocalizer pro download <br /> stop motion pro 7 full crack <br /> vladmodels ksenya-y146 (34 sets) 14 <br /> clash of the titans 1080p bluray x264 <br /> </p> -<p>caterpillarcommunicationadapter3driver19 <br /> trainz jr up sd70ace.rar demo <br /> uyless black optical networks ebook download <br /> rika nishimura zip43 <br /> alpine pxe-h660 pxe-h650 imprint audyssey multeq v2.2.9 download <br /> foto memek ibu melahirkan <br /> sonivox vocalizer pro download <br /> stop motion pro 7 full crack <br /> vladmodels ksenya-y146 (34 sets) 14 <br /> clash of the titans 1080p bluray x264 <br /> </p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Colonial Cousins ? Colonial Cousins [1996 ? FLAC].md b/spaces/terfces0erbo/CollegeProjectV2/Colonial Cousins ? Colonial Cousins [1996 ? FLAC].md deleted file mode 100644 index 0372b006b44445c96008818d72bf93675e119954..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Colonial Cousins ? Colonial Cousins [1996 ? FLAC].md +++ /dev/null @@ -1,22 +0,0 @@ -<h2>Colonial Cousins – Colonial Cousins [1996 – FLAC]</h2><br /><p><b><b>DOWNLOAD</b> &#10026; <a href="https://bytlly.com/2uGlOd">https://bytlly.com/2uGlOd</a></b></p><br /><br /> -<br /> -It has all of the sound on the video, too. - -Shazam is the newest iOS feature for music identification, or what you are playing on your iPhone. Shazam uses your camera and microphone to try to identify what song you are listening to. It is like Siri, but without a microphone. - -Pandora is an iPhone app that allows you to create your own radio station. Just type in music you like, click a button and Pandora searches your music library and recommends the songs for you to enjoy. When you like one, they will download it on your device. - -The Roku Channel is the best way to access streaming TV shows and movies. Roku has over 100 channels for people to watch, and they are all free. There are also channels for music, news, sports and more. - -For those looking for more of a reason to watch TV, Hulu lets you watch free movies, TV shows, and even live events. It also has a feature for on-the-go movies and TV shows. - -Remember when we first started hearing about "net neutrality"? The idea is that people should be able to access the Internet, and it should be free and accessible to all. The content you want to view should be able to get to you the same way. - -The TV Streaming Bundle is a more comprehensive approach to watching TV online. It allows you to watch free TV content from a number of free sites. - -The problem with TV streaming bundles like the TV Streaming Bundle is that it costs too much to subscribe. You are paying for three different subscriptions in addition to having to pay for an Internet connection. We think Hulu is the best one to start with.Glaucoma as a complication of pseudoexfoliation. - -The purpose of this study was to determine if any relationship exists between glaucoma and pseudoexfoliation. We identified all patients who were diagnosed as having pseudoexfoliation by our ophthalmologists and then examined each patient's medical records for the development of glaucoma and cataract. We then analyzed the data to determine if a statistically significant difference existed between those patients with glaucoma and those without glaucoma. The mean age of patients diagnosed with pseudoexfoliation was 74 years. The majority (89%) had glaucoma as a coexisting condition, and 22% had pseudophakia. The risk of glaucoma developing in those with pseudoexfoliation was not statistically significant. 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/terfces0erbo/CollegeProjectV2/Estimates By Max Fajardo Pdf Free BEST.md b/spaces/terfces0erbo/CollegeProjectV2/Estimates By Max Fajardo Pdf Free BEST.md deleted file mode 100644 index 1a41a4309bcad47cb98484aaed284d487b6a8a6e..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Estimates By Max Fajardo Pdf Free BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Estimates By Max Fajardo Pdf Free</h2><br /><p><b><b>DOWNLOAD</b> &#9658;&#9658;&#9658; <a href="https://bytlly.com/2uGlDK">https://bytlly.com/2uGlDK</a></b></p><br /><br /> - -Read PDF Simplified construction estimate by Max Fajardo. Drainage project. Extensive mathematical theories relating to the most important area of ​​the soil. This work was published in the journal Science in 1959. Read PDF, Taylors, P., and R. T. Schultz. (1991). "The influence of the raw material on the mechanical properties of wood and plastic materials." Physical Review B, 43(11), 5038–5043. Read PDF, John W. McGinnis. (1959). "The dynamics of the drainage system." Science, 174(318), 18–23. Read PDF, John W. McGinnis. (1960). "The effect of the drainage system on the drainage system." Science, 174(32), 21–26. About 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (MixMeister Fusion Video V7.3.2 [RH] ).md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (MixMeister Fusion Video V7.3.2 [RH] ).md deleted file mode 100644 index 8125186e3475df5b8034d05d46f7bfcde9294bf3..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (MixMeister Fusion Video V7.3.2 [RH] ).md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>HD Online Player (MixMeister Fusion Video v7.3.2 [RH] )</h2><br /><p><b><b>Download</b> &#187;&#187;&#187; <a href="https://bytlly.com/2uGk17">https://bytlly.com/2uGk17</a></b></p><br /><br /> -<br /> -On social welfare department hong kong adoption real player free ... All fotball fusion c2 c3 dairy cow big udder nba preseason clippers vs warriors ... All full dizi hd izle tek parca pltw 3.2 unit conversion answer key ... wt19a android 4 man hinh nokia 6030 full hd 4k video songs modern ... With rh sin author. 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/thegenerativegeneration/FNeVR_demo/augmentation.py b/spaces/thegenerativegeneration/FNeVR_demo/augmentation.py deleted file mode 100644 index 50d03203aaec2a59fb2671bdeccfae1d214f607c..0000000000000000000000000000000000000000 --- a/spaces/thegenerativegeneration/FNeVR_demo/augmentation.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -Code from https://github.com/hassony2/torch_videovision -""" - -import numbers - -import random -import numpy as np -import PIL - -from skimage.transform import resize, rotate -from skimage.util import pad -import torchvision - -import warnings - -from skimage import img_as_ubyte, img_as_float - - -def crop_clip(clip, min_h, min_w, h, w): - if isinstance(clip[0], np.ndarray): - cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] - - elif isinstance(clip[0], PIL.Image.Image): - cropped = [ - img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip - ] - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - return cropped - - -def pad_clip(clip, h, w): - im_h, im_w = clip[0].shape[:2] - pad_h = (0, 0) if h < im_h else ((h - im_h) // 2, (h - im_h + 1) // 2) - pad_w = (0, 0) if w < im_w else ((w - im_w) // 2, (w - im_w + 1) // 2) - - return pad(clip, ((0, 0), pad_h, pad_w, (0, 0)), mode='edge') - - -def resize_clip(clip, size, interpolation='bilinear'): - if isinstance(clip[0], np.ndarray): - if isinstance(size, numbers.Number): - im_h, im_w, im_c = clip[0].shape - # Min spatial dim already matches minimal size - if (im_w <= im_h and im_w == size) or (im_h <= im_w - and im_h == size): - return clip - new_h, new_w = get_resize_sizes(im_h, im_w, size) - size = (new_w, new_h) - else: - size = size[1], size[0] - - scaled = [ - resize(img, size, order=1 if interpolation == 'bilinear' else 0, preserve_range=True, - mode='constant', anti_aliasing=True) for img in clip - ] - elif isinstance(clip[0], PIL.Image.Image): - if isinstance(size, numbers.Number): - im_w, im_h = clip[0].size - # Min spatial dim already matches minimal size - if (im_w <= im_h and im_w == size) or (im_h <= im_w - and im_h == size): - return clip - new_h, new_w = get_resize_sizes(im_h, im_w, size) - size = (new_w, new_h) - else: - size = size[1], size[0] - if interpolation == 'bilinear': - pil_inter = PIL.Image.NEAREST - else: - pil_inter = PIL.Image.BILINEAR - scaled = [img.resize(size, pil_inter) for img in clip] - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - return scaled - - -def get_resize_sizes(im_h, im_w, size): - if im_w < im_h: - ow = size - oh = int(size * im_h / im_w) - else: - oh = size - ow = int(size * im_w / im_h) - return oh, ow - - -class RandomFlip(object): - def __init__(self, time_flip=False, horizontal_flip=False): - self.time_flip = time_flip - self.horizontal_flip = horizontal_flip - - def __call__(self, clip): - if random.random() < 0.5 and self.time_flip: - return clip[::-1] - if random.random() < 0.5 and self.horizontal_flip: - return [np.fliplr(img) for img in clip] - - return clip - - -class RandomResize(object): - """Resizes a list of (H x W x C) numpy.ndarray to the final size - The larger the original image is, the more times it takes to - interpolate - Args: - interpolation (str): Can be one of 'nearest', 'bilinear' - defaults to nearest - size (tuple): (widht, height) - """ - - def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): - self.ratio = ratio - self.interpolation = interpolation - - def __call__(self, clip): - scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) - - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - - new_w = int(im_w * scaling_factor) - new_h = int(im_h * scaling_factor) - new_size = (new_w, new_h) - resized = resize_clip( - clip, new_size, interpolation=self.interpolation) - - return resized - - -class RandomCrop(object): - """Extract random crop at the same location for a list of videos - Args: - size (sequence or int): Desired output size for the - crop in format (h, w) - """ - - def __init__(self, size): - if isinstance(size, numbers.Number): - size = (size, size) - - self.size = size - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of videos to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of videos - """ - h, w = self.size - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - - clip = pad_clip(clip, h, w) - im_h, im_w = clip.shape[1:3] - x1 = 0 if h == im_h else random.randint(0, im_w - w) - y1 = 0 if w == im_w else random.randint(0, im_h - h) - cropped = crop_clip(clip, y1, x1, h, w) - - return cropped - - -class RandomRotation(object): - """Rotate entire clip randomly by a random angle within - given bounds - Args: - degrees (sequence or int): Range of degrees to select from - If degrees is a number instead of sequence like (min, max), - the range of degrees, will be (-degrees, +degrees). - """ - - def __init__(self, degrees): - if isinstance(degrees, numbers.Number): - if degrees < 0: - raise ValueError('If degrees is a single number,' - 'must be positive') - degrees = (-degrees, degrees) - else: - if len(degrees) != 2: - raise ValueError('If degrees is a sequence,' - 'it must be of len 2.') - - self.degrees = degrees - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of videos to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of videos - """ - angle = random.uniform(self.degrees[0], self.degrees[1]) - if isinstance(clip[0], np.ndarray): - rotated = [rotate(image=img, angle=angle, preserve_range=True) for img in clip] - elif isinstance(clip[0], PIL.Image.Image): - rotated = [img.rotate(angle) for img in clip] - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - - return rotated - - -class ColorJitter(object): - """Randomly change the brightness, contrast and saturation and hue of the clip - Args: - brightness (float): How much to jitter brightness. brightness_factor - is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. - contrast (float): How much to jitter contrast. contrast_factor - is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. - saturation (float): How much to jitter saturation. saturation_factor - is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. - hue(float): How much to jitter hue. hue_factor is chosen uniformly from - [-hue, hue]. Should be >=0 and <= 0.5. - """ - - def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): - self.brightness = brightness - self.contrast = contrast - self.saturation = saturation - self.hue = hue - - def get_params(self, brightness, contrast, saturation, hue): - if brightness > 0: - brightness_factor = random.uniform( - max(0, 1 - brightness), 1 + brightness) - else: - brightness_factor = None - - if contrast > 0: - contrast_factor = random.uniform( - max(0, 1 - contrast), 1 + contrast) - else: - contrast_factor = None - - if saturation > 0: - saturation_factor = random.uniform( - max(0, 1 - saturation), 1 + saturation) - else: - saturation_factor = None - - if hue > 0: - hue_factor = random.uniform(-hue, hue) - else: - hue_factor = None - return brightness_factor, contrast_factor, saturation_factor, hue_factor - - def __call__(self, clip): - """ - Args: - clip (list): list of PIL.Image - Returns: - list PIL.Image : list of transformed PIL.Image - """ - if isinstance(clip[0], np.ndarray): - brightness, contrast, saturation, hue = self.get_params( - self.brightness, self.contrast, self.saturation, self.hue) - - # Create img transform function sequence - img_transforms = [] - if brightness is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) - if saturation is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) - if hue is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) - if contrast is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) - random.shuffle(img_transforms) - img_transforms = [img_as_ubyte, torchvision.transforms.ToPILImage()] + img_transforms + [np.array, - img_as_float] - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - jittered_clip = [] - for img in clip: - jittered_img = img - for func in img_transforms: - jittered_img = func(jittered_img) - jittered_clip.append(jittered_img.astype('float32')) - elif isinstance(clip[0], PIL.Image.Image): - brightness, contrast, saturation, hue = self.get_params( - self.brightness, self.contrast, self.saturation, self.hue) - - # Create img transform function sequence - img_transforms = [] - if brightness is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) - if saturation is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) - if hue is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) - if contrast is not None: - img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) - random.shuffle(img_transforms) - - # Apply to all videos - jittered_clip = [] - for img in clip: - for func in img_transforms: - jittered_img = func(img) - jittered_clip.append(jittered_img) - - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - return jittered_clip - - -class AllAugmentationTransform: - def __init__(self, resize_param=None, rotation_param=None, flip_param=None, crop_param=None, jitter_param=None): - self.transforms = [] - - if flip_param is not None: - self.transforms.append(RandomFlip(**flip_param)) - - if rotation_param is not None: - self.transforms.append(RandomRotation(**rotation_param)) - - if resize_param is not None: - self.transforms.append(RandomResize(**resize_param)) - - if crop_param is not None: - self.transforms.append(RandomCrop(**crop_param)) - - if jitter_param is not None: - self.transforms.append(ColorJitter(**jitter_param)) - - def __call__(self, clip): - for t in self.transforms: - clip = t(clip) - return clip diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Educacion Y Psicomotricidad Durivage Pdf Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/Educacion Y Psicomotricidad Durivage Pdf Download.md deleted file mode 100644 index 905728be2d44a209281bf55aaa9db34a70bc99b1..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Educacion Y Psicomotricidad Durivage Pdf Download.md +++ /dev/null @@ -1,15 +0,0 @@ -<br /> -<h1>Educación y psicomotricidad: un manual para el nivel preescolar de Johanne Durivage</h1> -<p>La psicomotricidad es una disciplina que estudia la relación entre el movimiento, el pensamiento y la emoción. La psicomotricidad tiene aplicaciones educativas, terapéuticas y preventivas, y se basa en el principio de que el desarrollo cognitivo y afectivo del niño depende de su actividad motriz.</p> -<h2>Educacion Y Psicomotricidad Durivage Pdf Download</h2><br /><p><b><b>Download</b> &#127775; <a href="https://urlcod.com/2uKaSv">https://urlcod.com/2uKaSv</a></b></p><br /><br /> -<p>En el nivel preescolar, la psicomotricidad es fundamental para favorecer el aprendizaje, la creatividad, la socialización y la autoestima de los niños. El manual <strong>Educación y psicomotricidad: manual para el nivel preescolar</strong> de Johanne Durivage[^1^] ofrece una serie de actividades lúdicas y variadas para estimular las habilidades psicomotoras de los niños de 3 a 6 años.</p> -<p>El manual se divide en cuatro partes: la primera parte presenta los fundamentos teóricos de la psicomotricidad y su importancia en el nivel preescolar; la segunda parte propone una metodología para planificar, organizar y evaluar las sesiones de psicomotricidad; la tercera parte describe 50 actividades prácticas clasificadas según los objetivos psicomotores que se quieren trabajar (equilibrio, coordinación, lateralidad, esquema corporal, etc.); y la cuarta parte ofrece algunos recursos complementarios como fichas de observación, canciones y juegos.</p> -<p>El manual está dirigido a educadores, maestros, padres y profesionales que quieran incorporar la psicomotricidad en sus prácticas educativas con los niños preescolares. El manual se puede descargar en formato PDF desde el siguiente enlace:</p> -<p><a href="https://books.google.com/books/about/Educaci%C3%B3n_y_psicomotricidad.html?id=LPrpOwAACAAJ">Educación y psicomotricidad: manual para el nivel preescolar</a></p> - -<p>La psicomotricidad se puede trabajar desde diferentes enfoques y perspectivas. Algunos de los más conocidos son el enfoque relacional, el enfoque funcional y el enfoque global. El enfoque relacional se centra en la relación afectiva entre el niño y el adulto que le acompaña, y busca favorecer la expresión emocional y la comunicación a través del movimiento. El enfoque funcional se basa en el desarrollo de las capacidades motrices del niño, y busca mejorar su adaptación al medio físico y social. El enfoque global integra los aspectos cognitivos, afectivos y motrices del niño, y busca estimular su creatividad y su autonomía.</p> -<p>La psicomotricidad tiene múltiples beneficios para el desarrollo integral de los niños preescolares. Algunos de estos beneficios son: mejorar la percepción y el conocimiento del propio cuerpo y del espacio; desarrollar la coordinación, el equilibrio, la lateralidad y la orientación espacial; favorecer el aprendizaje de conceptos básicos como los colores, las formas, los tamaños, las cantidades, etc.; potenciar la memoria, la atención, la concentración y el razonamiento lógico; fomentar la imaginación, la fantasía y el juego simbólico; fortalecer la autoestima, la confianza y el respeto por uno mismo y por los demás; facilitar la expresión y la regulación de las emociones; promover la socialización, la cooperación y el trabajo en equipo.</p> -<p>La psicomotricidad es una disciplina que requiere de una formación específica y de una actitud lúdica y sensible por parte de los profesionales que la aplican. El manual <strong>Educación y psicomotricidad: manual para el nivel preescolar</strong> de Johanne Durivage es una herramienta útil y práctica para introducirse en el mundo de la psicomotricidad y para ofrecer a los niños preescolares una educación integral y divertida.</p> -<p></p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Ism 6.0 Keyboard Driver Software [BETTER] Free Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/Ism 6.0 Keyboard Driver Software [BETTER] Free Download.md deleted file mode 100644 index 58db8f41b156e37a01994ece60de2d64d11f0692..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Ism 6.0 Keyboard Driver Software [BETTER] Free Download.md +++ /dev/null @@ -1,54 +0,0 @@ - -<h1>Ism 6.0 Keyboard Driver Software Free Download: How to Install and Use It on Your PC</h1> - -<p>If you are looking for a way to type in Indian languages on your computer, you might have heard of Ism 6.0 Keyboard Driver Software. This is a free software that allows you to use various keyboard layouts for different Indian scripts, such as Devanagari, Bengali, Gujarati, Malayalam, Tamil, Telugu, and more.</p> -<h2>Ism 6.0 Keyboard Driver Software Free Download</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://urlcod.com/2uK3kC">https://urlcod.com/2uK3kC</a></b></p><br /><br /> - -<p>In this article, we will show you how to download, install, and use Ism 6.0 Keyboard Driver Software on your PC. We will also explain some of the features and benefits of this software, as well as some of the common issues and solutions.</p> - -<h2>What is Ism 6.0 Keyboard Driver Software?</h2> - -<p>Ism 6.0 Keyboard Driver Software is a product of the Centre for Development of Advanced Computing (C-DAC), an organization under the Ministry of Electronics and Information Technology, Government of India. It is part of the Indian Language Technology Proliferation and Deployment Programme (ILTP), which aims to develop and promote the use of Indian languages in ICT applications and services.</p> - -<p>Ism 6.0 Keyboard Driver Software is a software that enables you to type in various Indian languages using different keyboard layouts. It supports both phonetic and typewriter modes of input, as well as transliteration and on-screen keyboard options. It also provides spell check and dictionary features for some languages.</p> -<p></p> - -<p>Ism 6.0 Keyboard Driver Software is compatible with Windows XP, Vista, 7, 8, and 10 operating systems. It can be used with any application that supports Unicode input, such as Microsoft Word, Excel, PowerPoint, Notepad, etc.</p> - -<h2>How to Download Ism 6.0 Keyboard Driver Software?</h2> - -<p>You can download Ism 6.0 Keyboard Driver Software for free from the official website of C-DAC. Here are the steps to follow:</p> - -<ol> -<li>Go to <a href="https://cdac.in/index.aspx?id=dl_ism_v6">https://cdac.in/index.aspx?id=dl_ism_v6</a> and click on the "Download" button.</li> -<li>Select your preferred language from the drop-down menu and click on "Submit".</li> -<li>Enter your name, email address, phone number, and captcha code in the form and click on "Download".</li> -<li>A download link will be sent to your email address. Click on it to start downloading the software.</li> -<li>Save the file to your desired location on your PC.</li> -</ol> - -<h2>How to Install Ism 6.0 Keyboard Driver Software?</h2> - -<p>Once you have downloaded Ism 6.0 Keyboard Driver Software, you can install it on your PC by following these steps:</p> - -<ol> -<li>Double-click on the downloaded file to launch the setup wizard.</li> -<li>Click on "Next" to proceed with the installation.</li> -<li>Read and accept the license agreement and click on "Next".</li> -<li>Select the destination folder where you want to install the software and click on "Next".</li> -<li>Select the components you want to install and click on "Next".</li> -<li>Click on "Install" to begin the installation process.</li> -<li>Wait for the installation to complete and click on "Finish".</li> -</ol> - -<h2>How to Use Ism 6.0 Keyboard Driver Software?</h2> - -<p>After installing Ism 6.0 Keyboard Driver Software, you can use it to type in various Indian languages on your PC. Here are some tips on how to use it:</p> - -<ul> -<li>To activate Ism 6.0 Keyboard Driver Software, press Ctrl+Alt+Shift+I keys together. A green icon will appear in the system tray indicating that Ism is active.</li> -<li>To select a language and a keyboard layout, right-click on the green icon and choose from the menu. You can also use shortcut keys to switch between languages and layouts.</li> -<li>To type in phonetic mode, type as you would normally do in English. The software will automatically convert your keystrokes into the corresponding Indian script characters.</li> -<li>To type in typewriter mode, use the keys that correspond to the Indian</p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Descargar Discografia De Liquits Mediafire PATCHED.md b/spaces/tioseFevbu/cartoon-converter/scripts/Descargar Discografia De Liquits Mediafire PATCHED.md deleted file mode 100644 index 44bd389d5d394a87bfa6ea934d6fda813392d898..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Descargar Discografia De Liquits Mediafire PATCHED.md +++ /dev/null @@ -1,26 +0,0 @@ - -<h1>Descargar Discografia De Liquits Mediafire: Todo lo que necesitas saber</h1> -<p>Liquits es una de las bandas más originales y divertidas del rock mexicano. Con más de 20 años de trayectoria, han creado canciones que mezclan el pop, el funk, el ska y el rock alternativo con letras ingeniosas y humorísticas. Si quieres descargar su discografía completa por Mediafire, aquí te contamos cómo hacerlo y qué álbumes podrás disfrutar.</p> -<h2>Descargar Discografia De Liquits Mediafire</h2><br /><p><b><b>Download File</b> &#9658;&#9658;&#9658; <a href="https://urlcod.com/2uHyye">https://urlcod.com/2uHyye</a></b></p><br /><br /> -<h2>¿Quiénes son Liquits?</h2> -<p>Liquits se formó en 1993 en la zona sur de la Ciudad de México, en la colonia Tepito, donde estudiaban Facundo, Teo, Ro y Edi. El nombre de la banda surgió de una broma entre ellos, al combinar las palabras "liquid" y "hits". Desde sus inicios, tuvieron una gran aceptación en la escena underground por su estilo fresco y desenfadado.</p> -<p>En el año 2000, viajaron a Nueva York para grabar su primer álbum de estudio, Karaoke, bajo la producción de Andrew Weiss, quien había trabajado con bandas como Ween y Rollins Band. El disco fue un éxito y los llevó a participar en festivales como Vive Latino y Rock al Parque. Su segundo álbum, Jardín, salió en 2004 y consolidó su fama con temas como "Chido", "Desde que" y "Kurasaibo".</p> -<p>En 2007, lanzaron Perfume Pantera, un disco más maduro y experimental que contó con la colaboración de artistas como Natalia Lafourcade, Ely Guerra y Café Tacvba. En 2009, publicaron Kimono en llamas y el regreso de los insectos, un álbum conceptual inspirado en la cultura japonesa. En 2011, editaron Ven Ven, un disco acústico grabado en vivo. Y en 2015, presentaron Safari de noche, su último trabajo hasta la fecha.</p> -<p></p> -<h2>¿Cómo descargar su discografía por Mediafire?</h2> -<p>Si quieres descargar la discografía completa de Liquits por Mediafire, solo tienes que seguir estos pasos:</p> -<ol> -<li>Entra al sitio web <a href="https://www.discografiasmega.com/descargar-discografia-liquits-mega-completa/">https://www.discografiasmega.com/descargar-discografia-liquits-mega-completa/</a>, donde encontrarás los enlaces de descarga de todos sus álbumes.</li> -<li>Haz clic en el botón "ClickAqui" que corresponde al álbum que quieras descargar. Te llevará a una página de publicidad donde tendrás que esperar unos segundos.</li> -<li>Pulsa el botón "Saltar Publicidad" que aparecerá en la esquina superior derecha. Te redirigirá a otra página donde tendrás que ingresar la contraseña www.discografiasmega.com para acceder al enlace de descarga.</li> -<li>Haz clic en el botón "Descargar" que te llevará a Mediafire. Allí podrás descargar el archivo comprimido en formato rar que contiene el álbum.</li> -<li>Repite los pasos anteriores para descargar los demás álbumes que quieras.</li> -<li>Una vez que hayas descargado todos los archivos rar, descomprímelos con un programa como WinRAR o 7-Zip. Obtendrás las carpetas con los archivos mp3 de cada álbum.</li> -<li>Disfruta de la música de Liquits en tu reproductor favorito.</li> -</ol> -<h2>¿Por qué descargar su discografía por Mediafire?</h2> -<p>Descargar la discografía de Liquits por Mediafire tiene varias ventajas:</p> -<ul> -<li>Es gratis y seguro. No tendrás que pagar nada ni registrarte en ningún sitio para acc</p> cec2833e83<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Garmin Mobile Xt 5.00.30 Wp For.md b/spaces/tioseFevbu/cartoon-converter/scripts/Garmin Mobile Xt 5.00.30 Wp For.md deleted file mode 100644 index 1b848e5778452a6cc61c83e954544875b81ba7c9..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Garmin Mobile Xt 5.00.30 Wp For.md +++ /dev/null @@ -1,52 +0,0 @@ -<br /> -<h1>Garmin Mobile Xt 5.00.30 Wp For: A Guide to Install and Use This GPS Navigation Software</h1> - -<p>Garmin Mobile Xt 5.00.30 Wp For is a GPS navigation software that can be installed on Windows Mobile phones. It offers features such as voice guidance, turn-by-turn directions, points of interest, traffic alerts, weather forecasts, and more. It can also use maps from various sources, such as Garmin's own maps or free maps from MalFreeMaps.</p> - -<p>In this article, we will show you how to install and use Garmin Mobile Xt 5.00.30 Wp For on your Windows Mobile phone.</p> -<h2>Garmin Mobile Xt 5.00.30 Wp For</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://urlcod.com/2uHvU4">https://urlcod.com/2uHvU4</a></b></p><br /><br /> - -<h2>How to Install Garmin Mobile Xt 5.00.30 Wp For</h2> - -<p>To install Garmin Mobile Xt 5.00.30 Wp For, you will need to download three files from Garmin's website[^1^]:</p> - -<ul> -<li>Garmin Mobile XT Navigation for Smartphones</li> -<li>Garmin Mobile XT Support Files software</li> -<li>Garmin Mobile XT Free Basemap software</li> -</ul> - -<p>After downloading these files, connect your Windows Mobile phone to your PC with Microsoft ActiveSync. Run the files and install them to your phone's storage card or device memory.</p> - -<p>After installing all three files, disconnect your phone from your PC and let it run the new application installation process.</p> - -<h2>How to Install Maps to Garmin Mobile Xt 5.00.30 Wp For</h2> - -<p>A GPS navigation software without maps is useless. Therefore, you will need to install maps to Garmin Mobile Xt 5.00.30 Wp For.</p> - -<p>You can use Garmin's own maps or free maps from MalFreeMaps[^2^]. To use Garmin's maps, you will need to purchase them from their website or use a compatible Garmin device to transfer them to your phone.</p> - -<p>To use MalFreeMaps, you will need to download their weekly updated Malaysia map from their website[^2^]. After downloading the map file (gmapsupp.img), connect your phone to your PC again and copy the file to the Storage Card\\Garmin folder on your phone.</p> - -<h2>How to Use Garmin Mobile Xt 5.00.30 Wp For</h2> - -<p>To use Garmin Mobile Xt 5.00.30 Wp For, you will need to launch the application from your phone's menu or home screen.</p> - -<p>The first time you run the application, you will need to activate it with a product key or a free trial code. You can purchase a product key from Garmin's website[^1^] or contact their customer support for a free trial code.</p> -<p></p> - -<p>Once activated, you can start using the application to navigate with your phone's GPS receiver or an external Bluetooth GPS receiver.</p> - -<p>You can enter a destination by address, point of interest, coordinates, or recent locations. You can also browse the map and tap on a location to set it as your destination.</p> - -<p>The application will calculate the best route for you and guide you with voice prompts and on-screen directions. You can also view information such as distance, time, speed, elevation, traffic, weather, and more.</p> - -<p>You can customize the application settings according to your preferences, such as map display, units, language, voice, alerts, etc.</p> - -<h2>Conclusion</h2> - -<p>Garmin Mobile Xt 5.00.30 Wp For is a powerful and versatile GPS navigation software for Windows Mobile phones. It can help you find your way around with ease and convenience.</p> - -<p>If you are looking for a GPS navigation software for your Windows Mobile phone, you should give Garmin Mobile Xt 5.00.30 Wp For a try.</p> 81aa517590<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 2d0c587cbf42126eb903f27c11dc2dde9146c1cc..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,611 +0,0 @@ -import railroad -import pyparsing -from pkg_resources import resource_filename -from typing import ( - List, - Optional, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: - template = Template(fp.read()) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: str = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call(AnnotatedItem, label=type(element).__name__.lower(), item="") - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/spaces/trysem/DreamShaper-3.3/README.md b/spaces/trysem/DreamShaper-3.3/README.md deleted file mode 100644 index f2f78b75b963815c590f24762c2df362da444559..0000000000000000000000000000000000000000 --- a/spaces/trysem/DreamShaper-3.3/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DreamShaper 3.3 -emoji: 👀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/trysem/image-matting-app/ppmatting/models/dim.py b/spaces/trysem/image-matting-app/ppmatting/models/dim.py deleted file mode 100644 index 5d9ae654322242f785407e61ff7b8405d6b443b4..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/models/dim.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from paddleseg.models import layers -from paddleseg import utils -from paddleseg.cvlibs import manager - -from ppmatting.models.losses import MRSD - - -@manager.MODELS.add_component -class DIM(nn.Layer): - """ - The DIM implementation based on PaddlePaddle. - - The original article refers to - Ning Xu, et, al. "Deep Image Matting" - (https://arxiv.org/pdf/1908.07919.pdf). - - Args: - backbone: backbone model. - stage (int, optional): The stage of model. Defautl: 3. - decoder_input_channels(int, optional): The channel of decoder input. Default: 512. - pretrained(str, optional): The path of pretrianed model. Defautl: None. - - """ - - def __init__(self, - backbone, - stage=3, - decoder_input_channels=512, - pretrained=None): - super().__init__() - self.backbone = backbone - self.pretrained = pretrained - self.stage = stage - self.loss_func_dict = None - - decoder_output_channels = [64, 128, 256, 512] - self.decoder = Decoder( - input_channels=decoder_input_channels, - output_channels=decoder_output_channels) - if self.stage == 2: - for param in self.backbone.parameters(): - param.stop_gradient = True - for param in self.decoder.parameters(): - param.stop_gradient = True - if self.stage >= 2: - self.refine = Refine() - self.init_weight() - - def forward(self, inputs): - input_shape = paddle.shape(inputs['img'])[-2:] - x = paddle.concat([inputs['img'], inputs['trimap'] / 255], axis=1) - fea_list = self.backbone(x) - - # decoder stage - up_shape = [] - for i in range(5): - up_shape.append(paddle.shape(fea_list[i])[-2:]) - alpha_raw = self.decoder(fea_list, up_shape) - alpha_raw = F.interpolate( - alpha_raw, input_shape, mode='bilinear', align_corners=False) - logit_dict = {'alpha_raw': alpha_raw} - if self.stage < 2: - return logit_dict - - if self.stage >= 2: - # refine stage - refine_input = paddle.concat([inputs['img'], alpha_raw], axis=1) - alpha_refine = self.refine(refine_input) - - # finally alpha - alpha_pred = alpha_refine + alpha_raw - alpha_pred = F.interpolate( - alpha_pred, input_shape, mode='bilinear', align_corners=False) - if not self.training: - alpha_pred = paddle.clip(alpha_pred, min=0, max=1) - logit_dict['alpha_pred'] = alpha_pred - if self.training: - loss_dict = self.loss(logit_dict, inputs) - return logit_dict, loss_dict - else: - return alpha_pred - - def loss(self, logit_dict, label_dict, loss_func_dict=None): - if loss_func_dict is None: - if self.loss_func_dict is None: - self.loss_func_dict = defaultdict(list) - self.loss_func_dict['alpha_raw'].append(MRSD()) - self.loss_func_dict['comp'].append(MRSD()) - self.loss_func_dict['alpha_pred'].append(MRSD()) - else: - self.loss_func_dict = loss_func_dict - - loss = {} - mask = label_dict['trimap'] == 128 - loss['all'] = 0 - - if self.stage != 2: - loss['alpha_raw'] = self.loss_func_dict['alpha_raw'][0]( - logit_dict['alpha_raw'], label_dict['alpha'], mask) - loss['alpha_raw'] = 0.5 * loss['alpha_raw'] - loss['all'] = loss['all'] + loss['alpha_raw'] - - if self.stage == 1 or self.stage == 3: - comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + \ - (1 - logit_dict['alpha_raw']) * label_dict['bg'] - loss['comp'] = self.loss_func_dict['comp'][0]( - comp_pred, label_dict['img'], mask) - loss['comp'] = 0.5 * loss['comp'] - loss['all'] = loss['all'] + loss['comp'] - - if self.stage == 2 or self.stage == 3: - loss['alpha_pred'] = self.loss_func_dict['alpha_pred'][0]( - logit_dict['alpha_pred'], label_dict['alpha'], mask) - loss['all'] = loss['all'] + loss['alpha_pred'] - - return loss - - def init_weight(self): - if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) - - -# bilinear interpolate skip connect -class Up(nn.Layer): - def __init__(self, input_channels, output_channels): - super().__init__() - self.conv = layers.ConvBNReLU( - input_channels, - output_channels, - kernel_size=5, - padding=2, - bias_attr=False) - - def forward(self, x, skip, output_shape): - x = F.interpolate( - x, size=output_shape, mode='bilinear', align_corners=False) - x = x + skip - x = self.conv(x) - x = F.relu(x) - - return x - - -class Decoder(nn.Layer): - def __init__(self, input_channels, output_channels=(64, 128, 256, 512)): - super().__init__() - self.deconv6 = nn.Conv2D( - input_channels, input_channels, kernel_size=1, bias_attr=False) - self.deconv5 = Up(input_channels, output_channels[-1]) - self.deconv4 = Up(output_channels[-1], output_channels[-2]) - self.deconv3 = Up(output_channels[-2], output_channels[-3]) - self.deconv2 = Up(output_channels[-3], output_channels[-4]) - self.deconv1 = Up(output_channels[-4], 64) - - self.alpha_conv = nn.Conv2D( - 64, 1, kernel_size=5, padding=2, bias_attr=False) - - def forward(self, fea_list, shape_list): - x = fea_list[-1] - x = self.deconv6(x) - x = self.deconv5(x, fea_list[4], shape_list[4]) - x = self.deconv4(x, fea_list[3], shape_list[3]) - x = self.deconv3(x, fea_list[2], shape_list[2]) - x = self.deconv2(x, fea_list[1], shape_list[1]) - x = self.deconv1(x, fea_list[0], shape_list[0]) - alpha = self.alpha_conv(x) - alpha = F.sigmoid(alpha) - - return alpha - - -class Refine(nn.Layer): - def __init__(self): - super().__init__() - self.conv1 = layers.ConvBNReLU( - 4, 64, kernel_size=3, padding=1, bias_attr=False) - self.conv2 = layers.ConvBNReLU( - 64, 64, kernel_size=3, padding=1, bias_attr=False) - self.conv3 = layers.ConvBNReLU( - 64, 64, kernel_size=3, padding=1, bias_attr=False) - self.alpha_pred = layers.ConvBNReLU( - 64, 1, kernel_size=3, padding=1, bias_attr=False) - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = self.conv3(x) - alpha = self.alpha_pred(x) - - return alpha diff --git a/spaces/tumuyan/RealSR/install_realsr_android.sh b/spaces/tumuyan/RealSR/install_realsr_android.sh deleted file mode 100644 index 1b5fc6e8a1b48cd6587035d7b5f0dce206326f67..0000000000000000000000000000000000000000 --- a/spaces/tumuyan/RealSR/install_realsr_android.sh +++ /dev/null @@ -1,8 +0,0 @@ -curl -C - -L -O --retry 10 https://huggingface.co/spaces/tumuyan/RealSR/resolve/main/assets.zip -# curl -L https://github.com/Tencent/ncnn/releases/download/20221128/ncnn-20221128-android-vulkan-shared.zip -o ncnn.zip -unzip -o assets.zip -d ./ -# unzip -o ncnn.zip -d realsr -# mv realsr/arm64-v8a/lib/libncnn.so realsr/libncnn.so -chmod 777 ./realsr/* -cd realsr -./realsr-ncnn \ No newline at end of file diff --git a/spaces/tumuyan/vnc/Dockerfile b/spaces/tumuyan/vnc/Dockerfile deleted file mode 100644 index 2ed77072a23c34788d2043e7cd47c4ce40714b53..0000000000000000000000000000000000000000 --- a/spaces/tumuyan/vnc/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM accetto/ubuntu-vnc-xfce-firefox-plus -WORKDIR /dockerstartup - -EXPOSE 6901 -ENTRYPOINT ["./vnc_startup.sh"] \ No newline at end of file diff --git a/spaces/ucalyptus/PTI/training/coaches/multi_id_coach.py b/spaces/ucalyptus/PTI/training/coaches/multi_id_coach.py deleted file mode 100644 index 1bc600a22fb9b63201a4787a8e53b5dc9f462bc7..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/training/coaches/multi_id_coach.py +++ /dev/null @@ -1,72 +0,0 @@ -import os - -import torch -from tqdm import tqdm - -from configs import paths_config, hyperparameters, global_config -from training.coaches.base_coach import BaseCoach -from utils.log_utils import log_images_from_w - - -class MultiIDCoach(BaseCoach): - - def __init__(self, data_loader, use_wandb): - super().__init__(data_loader, use_wandb) - - def train(self): - self.G.synthesis.train() - self.G.mapping.train() - - w_path_dir = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}' - os.makedirs(w_path_dir, exist_ok=True) - os.makedirs(f'{w_path_dir}/{paths_config.pti_results_keyword}', exist_ok=True) - - use_ball_holder = True - w_pivots = [] - images = [] - - for fname, image in self.data_loader: - if self.image_counter >= hyperparameters.max_images_to_invert: - break - - image_name = fname[0] - if hyperparameters.first_inv_type == 'w+': - embedding_dir = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}' - else: - embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}' - os.makedirs(embedding_dir, exist_ok=True) - - w_pivot = self.get_inversion(w_path_dir, image_name, image) - w_pivots.append(w_pivot) - images.append((image_name, image)) - self.image_counter += 1 - - for i in tqdm(range(hyperparameters.max_pti_steps)): - self.image_counter = 0 - - for data, w_pivot in zip(images, w_pivots): - image_name, image = data - - if self.image_counter >= hyperparameters.max_images_to_invert: - break - - real_images_batch = image.to(global_config.device) - - generated_images = self.forward(w_pivot) - loss, l2_loss_val, loss_lpips = self.calc_loss(generated_images, real_images_batch, image_name, - self.G, use_ball_holder, w_pivot) - - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - - use_ball_holder = global_config.training_step % hyperparameters.locality_regularization_interval == 0 - - global_config.training_step += 1 - self.image_counter += 1 - - if self.use_wandb: - log_images_from_w(w_pivots, self.G, [image[0] for image in images]) - - torch.save(self.G, - f'{paths_config.checkpoints_dir}/model_{global_config.run_name}_multi_id.pt') diff --git a/spaces/udion/BayesCap/src/README.md b/spaces/udion/BayesCap/src/README.md deleted file mode 100644 index de5f937af97bfba7df565e8806d2c5b1ae0f0ffa..0000000000000000000000000000000000000000 --- a/spaces/udion/BayesCap/src/README.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: BayesCap -emoji: 🔥 -colorFrom: indigo -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- -# Configuration -`title`: _string_ -Display title for the Space -`emoji`: _string_ -Space emoji (emoji-only character allowed) -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) -`sdk`: _string_ -Can be either `gradio` or `streamlit` -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/umichVision/virtex-redcaps/virtex/utils/nucleus_sampling.py b/spaces/umichVision/virtex-redcaps/virtex/utils/nucleus_sampling.py deleted file mode 100644 index a905c228eef653d014417e7dac87b4ef62c6929f..0000000000000000000000000000000000000000 --- a/spaces/umichVision/virtex-redcaps/virtex/utils/nucleus_sampling.py +++ /dev/null @@ -1,131 +0,0 @@ -r""" -Nucleus Sampling was introduced in the paper -`The Curious Case of Neural Text Degeneration <https://arxiv.org/abs/1904.09751>`_. -If you take it from here, make sure to cite them: - -.. code-block:: text - - @inproceedings{, - title={The Curious Case of Neural Text Degeneration}, - author={Ari Holtzman and Jan Buys and Li Du and Maxwell Forbes and Yejin Choi}, - journal={ICLR}, - year={2020} - } - -Some core parts of this code are adapted with minor modifications from Thomas Wolf's -gist: https://gist.githubusercontent.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 -""" - -from typing import Callable, List, Tuple - -import torch -import torch.nn.functional as F - - -class AutoRegressiveNucleusSampling(object): - """ - Implements the nucleus sampling for decoding captions. This class only works - for auto-regressive models (Transformer-like), not recurrent models (LSTM-like). - - Parameters - ---------- - eos_index: int - The index of the end token (``[EOS]``) in vocabulary. - max_steps: int, optional (default = 50) - The maximum number of decoding steps. - nucleus_size: int, optional (default = 5) - Size of top-K nucleus for sampling. - """ - - def __init__( - self, - eos_index: int, - max_steps: int = 50, - nucleus_size: float = 0.9, - ): - super().__init__() - self._eos_index = eos_index - self.max_steps = max_steps - self.nucleus_size = nucleus_size - - def search( - self, start_predictions: torch.Tensor, step: Callable[..., torch.Tensor] - ) -> Tuple[torch.Tensor, None]: - - batch_size = start_predictions.size()[0] - - # List of `(batch_size, )` tensors. One for each timestep. - # This includes the start-of-sentence tokens, unlike the implementation - # in `AutoregressiveBeamSearch`. We will remove them in the end. - - # Transpose `start_predictions` and make a list when prompt is provided. - predictions = [ - start_predictions[:, i] for i in range(start_predictions.size(1)) - ] - - for timestep in range(self.max_steps): - # Get the predictions from last timestep (most recent). - # shape: (batch_size, ) - last_predictions = predictions[-1] - - # If every predicted token from the last step is end-of-sentence token, - # then we can stop early. - if (last_predictions == self._eos_index).all(): - break - - # Combine step predictions made so far into one tensor. This is our - # "partial" caption input to the transformer. - # shape: (batch_size, timestep + 1) - predictions_so_far = torch.stack(predictions).permute(1, 0) - - # Take a step, get the distribution of logits from next timestep. - # shape: (batch_size, num_classes) - current_logits = step(predictions_so_far) - - # Sort logits in descending order to determine the nucleus. - sorted_logits, sorted_idx = torch.sort(current_logits, descending=True) - - # Get cumulative softmax probabilites. For every instance in batch, a - # variable amount of tokens (N) will consitute the nucleus. - # shape: (batch_size, num_classes) - cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) - - # Determine indices of tokens at the tail of distribution. These will be - # removed from the nucleus. - sorted_idx_to_remove = cumulative_probs > self.nucleus_size - - # Shift the indices to the right to keep the first token outside nucleus. - sorted_idx_to_remove[..., 1:] = sorted_idx_to_remove[..., :-1].clone() - sorted_idx_to_remove[..., 0] = 0 - - # Set logits to large negative value to avoid sampling them. Iterate over - # the batch of examples. - for t in range(current_logits.size()[0]): - idx_to_remove = sorted_idx[t][sorted_idx_to_remove[t]] - current_logits[t][idx_to_remove] = -1e12 - - # Set logits for last predicted token to a large negative value to - # avoid repetition. - current_logits[t][last_predictions[t]] = -1e12 - - # Sample from the filtered distribution. - # shape: (batch_size, num_classes) - current_probs = F.softmax(current_logits, dim=-1) - - # shape: (batch_size, ) - current_predictions = torch.multinomial(current_probs, 1) - current_predictions = current_predictions.view(batch_size) - - # Set current predicted tokens to be end-of-sentence for instances where - # last prediction was also end-of-sentence token. - current_predictions[last_predictions == self._eos_index] = self._eos_index - - predictions.append(current_predictions) - - # Remove start-of-sentence token from predictions, and collect them together. - # shape: (batch_size, max_steps) .. or could be less than max_steps. - all_predictions = torch.stack(predictions[1:]).permute(1, 0) - - # We don't return any logprobs of generated sequence with nucleus sampling, - # unlike `AutoregressiveBeamSearch`. - return all_predictions, None diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Cali Skye Maddy Model Why She Is More Than Just a Pretty Face.md b/spaces/usbethFlerru/sovits-modelsV2/example/Cali Skye Maddy Model Why She Is More Than Just a Pretty Face.md deleted file mode 100644 index ddc9943881ade7c13176a3a649b8c681e213f792..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Cali Skye Maddy Model Why She Is More Than Just a Pretty Face.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Cali Skye Maddy Model</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://urlcod.com/2uyUYS">https://urlcod.com/2uyUYS</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/models/sam.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/models/sam.md deleted file mode 100644 index 8dd1e35c24b19ccc21ece951b8de4bf85957b240..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/models/sam.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -comments: true -description: Discover the Segment Anything Model (SAM), a revolutionary promptable image segmentation model, and delve into the details of its advanced architecture and the large-scale SA-1B dataset. -keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI ---- - -# Segment Anything Model (SAM) - -Welcome to the frontier of image segmentation with the Segment Anything Model, or SAM. This revolutionary model has changed the game by introducing promptable image segmentation with real-time performance, setting new standards in the field. - -## Introduction to SAM: The Segment Anything Model - -The Segment Anything Model, or SAM, is a cutting-edge image segmentation model that allows for promptable segmentation, providing unparalleled versatility in image analysis tasks. SAM forms the heart of the Segment Anything initiative, a groundbreaking project that introduces a novel model, task, and dataset for image segmentation. - -SAM's advanced design allows it to adapt to new image distributions and tasks without prior knowledge, a feature known as zero-shot transfer. Trained on the expansive [SA-1B dataset](https://ai.facebook.com/datasets/segment-anything/), which contains more than 1 billion masks spread over 11 million carefully curated images, SAM has displayed impressive zero-shot performance, surpassing previous fully supervised results in many cases. - -![Dataset sample image](https://user-images.githubusercontent.com/26833433/238056229-0e8ffbeb-f81a-477e-a490-aff3d82fd8ce.jpg) -Example images with overlaid masks from our newly introduced dataset, SA-1B. SA-1B contains 11M diverse, high-resolution, licensed, and privacy protecting images and 1.1B high-quality segmentation masks. These masks were annotated fully automatically by SAM, and as verified by human ratings and numerous experiments, are of high quality and diversity. Images are grouped by number of masks per image for visualization (there are ∼100 masks per image on average). - -## Key Features of the Segment Anything Model (SAM) - -- **Promptable Segmentation Task:** SAM was designed with a promptable segmentation task in mind, allowing it to generate valid segmentation masks from any given prompt, such as spatial or text clues identifying an object. -- **Advanced Architecture:** The Segment Anything Model employs a powerful image encoder, a prompt encoder, and a lightweight mask decoder. This unique architecture enables flexible prompting, real-time mask computation, and ambiguity awareness in segmentation tasks. -- **The SA-1B Dataset:** Introduced by the Segment Anything project, the SA-1B dataset features over 1 billion masks on 11 million images. As the largest segmentation dataset to date, it provides SAM with a diverse and large-scale training data source. -- **Zero-Shot Performance:** SAM displays outstanding zero-shot performance across various segmentation tasks, making it a ready-to-use tool for diverse applications with minimal need for prompt engineering. - -For an in-depth look at the Segment Anything Model and the SA-1B dataset, please visit the [Segment Anything website](https://segment-anything.com) and check out the research paper [Segment Anything](https://arxiv.org/abs/2304.02643). - -## How to Use SAM: Versatility and Power in Image Segmentation - -The Segment Anything Model can be employed for a multitude of downstream tasks that go beyond its training data. This includes edge detection, object proposal generation, instance segmentation, and preliminary text-to-mask prediction. With prompt engineering, SAM can swiftly adapt to new tasks and data distributions in a zero-shot manner, establishing it as a versatile and potent tool for all your image segmentation needs. - -```python -from ultralytics import SAM - -model = SAM('sam_b.pt') -model.info() # display model information -model.predict('path/to/image.jpg') # predict -``` - -## Available Models and Supported Tasks - -| Model Type | Pre-trained Weights | Tasks Supported | -|------------|---------------------|-----------------------| -| SAM base | `sam_b.pt` | Instance Segmentation | -| SAM large | `sam_l.pt` | Instance Segmentation | - -## Operating Modes - -| Mode | Supported | -|------------|--------------------| -| Inference | :heavy_check_mark: | -| Validation | :x: | -| Training | :x: | - -## Auto-Annotation: A Quick Path to Segmentation Datasets - -Auto-annotation is a key feature of SAM, allowing users to generate a [segmentation dataset](https://docs.ultralytics.com/datasets/segment) using a pre-trained detection model. This feature enables rapid and accurate annotation of a large number of images, bypassing the need for time-consuming manual labeling. - -### Generate Your Segmentation Dataset Using a Detection Model - -To auto-annotate your dataset with the Ultralytics framework, use the `auto_annotate` function as shown below: - -```python -from ultralytics.yolo.data.annotator import auto_annotate - -auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model='sam_b.pt') -``` - -| Argument | Type | Description | Default | -|------------|---------------------|---------------------------------------------------------------------------------------------------------|--------------| -| data | str | Path to a folder containing images to be annotated. | | -| det_model | str, optional | Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'. | 'yolov8x.pt' | -| sam_model | str, optional | Pre-trained SAM segmentation model. Defaults to 'sam_b.pt'. | 'sam_b.pt' | -| device | str, optional | Device to run the models on. Defaults to an empty string (CPU or GPU, if available). | | -| output_dir | str, None, optional | Directory to save the annotated results. Defaults to a 'labels' folder in the same directory as 'data'. | None | - -The `auto_annotate` function takes the path to your images, with optional arguments for specifying the pre-trained detection and SAM segmentation models, the device to run the models on, and the output directory for saving the annotated results. - -Auto-annotation with pre-trained models can dramatically cut down the time and effort required for creating high-quality segmentation datasets. This feature is especially beneficial for researchers and developers dealing with large image collections, as it allows them to focus on model development and evaluation rather than manual annotation. - -## Citations and Acknowledgements - -If you find SAM useful in your research or development work, please consider citing our paper: - -```bibtex -@misc{kirillov2023segment, - title={Segment Anything}, - author={Alexander Kirillov and Eric Mintun and Nikhila Ravi and Hanzi Mao and Chloe Rolland and Laura Gustafson and Tete Xiao and Spencer Whitehead and Alexander C. Berg and Wan-Yen Lo and Piotr Dollár and Ross Girshick}, - year={2023}, - eprint={2304.02643}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -We would like to express our gratitude to Meta AI for creating and maintaining this valuable resource for the computer vision community. - -*keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI.* \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5augmentations.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5augmentations.md deleted file mode 100644 index 63df3692cb755d8cd835318a95d7a3cf4c95c139..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5augmentations.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Enhance image data with Albumentations CenterCrop, normalize, augment_hsv, replicate, random_perspective, cutout, & box_candidates. -keywords: YOLO, object detection, data loaders, V5 augmentations, CenterCrop, normalize, random_perspective ---- - -## Albumentations ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.Albumentations -<br><br> - -## LetterBox ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.LetterBox -<br><br> - -## CenterCrop ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.CenterCrop -<br><br> - -## ToTensor ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.ToTensor -<br><br> - -## normalize ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.normalize -<br><br> - -## denormalize ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.denormalize -<br><br> - -## augment_hsv ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.augment_hsv -<br><br> - -## hist_equalize ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.hist_equalize -<br><br> - -## replicate ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.replicate -<br><br> - -## letterbox ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.letterbox -<br><br> - -## random_perspective ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.random_perspective -<br><br> - -## copy_paste ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.copy_paste -<br><br> - -## cutout ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.cutout -<br><br> - -## mixup ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.mixup -<br><br> - -## box_candidates ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.box_candidates -<br><br> - -## classify_albumentations ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.classify_albumentations -<br><br> - -## classify_transforms ---- -### ::: ultralytics.yolo.data.dataloaders.v5augmentations.classify_transforms -<br><br> diff --git a/spaces/videfikri/aicover/uvr5_pack/lib_v5/layers_123821KB.py b/spaces/videfikri/aicover/uvr5_pack/lib_v5/layers_123821KB.py deleted file mode 100644 index 9835dc0f0dd66a7ef3517101180ec2c54eb6011d..0000000000000000000000000000000000000000 --- a/spaces/videfikri/aicover/uvr5_pack/lib_v5/layers_123821KB.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from uvr5_pack.lib_v5 import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/vyshnaviii/MyGenAIchatbot/app.py b/spaces/vyshnaviii/MyGenAIchatbot/app.py deleted file mode 100644 index 8306bc777544ebc70147e76990ef6b39ee7ede7f..0000000000000000000000000000000000000000 --- a/spaces/vyshnaviii/MyGenAIchatbot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet isha, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. isha's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/wanghuoto/gogoai/src/components/ui/dropdown-menu.tsx b/spaces/wanghuoto/gogoai/src/components/ui/dropdown-menu.tsx deleted file mode 100644 index 184d4e6007ef85187446362f69532ab077897fea..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/components/ui/dropdown-menu.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu' - -import { cn } from '@/lib/utils' - -const DropdownMenu = DropdownMenuPrimitive.Root - -const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger - -const DropdownMenuGroup = DropdownMenuPrimitive.Group - -const DropdownMenuPortal = DropdownMenuPrimitive.Portal - -const DropdownMenuSub = DropdownMenuPrimitive.Sub - -const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup - -const DropdownMenuSubContent = React.forwardRef< - React.ElementRef<typeof DropdownMenuPrimitive.SubContent>, - React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent> ->(({ className, ...props }, ref) => ( - <DropdownMenuPrimitive.SubContent - ref={ref} - className={cn( - 'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-md animate-in data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1', - className - )} - {...props} - /> -)) -DropdownMenuSubContent.displayName = - DropdownMenuPrimitive.SubContent.displayName - -const DropdownMenuContent = React.forwardRef< - React.ElementRef<typeof DropdownMenuPrimitive.Content>, - React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content> ->(({ className, sideOffset = 4, ...props }, ref) => ( - <DropdownMenuPrimitive.Portal> - <DropdownMenuPrimitive.Content - ref={ref} - sideOffset={sideOffset} - className={cn( - 'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow animate-in data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2', - className - )} - {...props} - /> - </DropdownMenuPrimitive.Portal> -)) -DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName - -const DropdownMenuItem = React.forwardRef< - React.ElementRef<typeof DropdownMenuPrimitive.Item>, - React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - <DropdownMenuPrimitive.Item - ref={ref} - className={cn( - 'relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50', - inset && 'pl-8', - className - )} - {...props} - /> -)) -DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName - -const DropdownMenuLabel = React.forwardRef< - React.ElementRef<typeof DropdownMenuPrimitive.Label>, - React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - <DropdownMenuPrimitive.Label - ref={ref} - className={cn( - 'px-2 py-1.5 text-sm font-semibold', - inset && 'pl-8', - className - )} - {...props} - /> -)) -DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName - -const DropdownMenuSeparator = React.forwardRef< - React.ElementRef<typeof DropdownMenuPrimitive.Separator>, - React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator> ->(({ className, ...props }, ref) => ( - <DropdownMenuPrimitive.Separator - ref={ref} - className={cn('-mx-1 my-1 h-px bg-muted', className)} - {...props} - /> -)) -DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName - -const DropdownMenuShortcut = ({ - className, - ...props -}: React.HTMLAttributes<HTMLSpanElement>) => { - return ( - <span - className={cn('ml-auto text-xs tracking-widest opacity-60', className)} - {...props} - /> - ) -} -DropdownMenuShortcut.displayName = 'DropdownMenuShortcut' - -export { - DropdownMenu, - DropdownMenuTrigger, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuShortcut, - DropdownMenuGroup, - DropdownMenuPortal, - DropdownMenuSub, - DropdownMenuSubContent, - DropdownMenuRadioGroup -} diff --git a/spaces/weide/ChuanhuChatGPT2/ChuanhuChatbot.py b/spaces/weide/ChuanhuChatGPT2/ChuanhuChatbot.py deleted file mode 100644 index 5d18393a7cc42c6545d90e9a8ebf949745ebe5bf..0000000000000000000000000000000000000000 --- a/spaces/weide/ChuanhuChatGPT2/ChuanhuChatbot.py +++ /dev/null @@ -1,423 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.chat_func import * -from modules.openai_func import get_usage - -gr.Chatbot.postprocess = postprocess -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - user_name = gr.State("") - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_api_key = gr.State(my_api_key) - user_question = gr.State("") - outputing = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(): - gr.HTML(title) - user_info = gr.Markdown(value="", elem_id="user_info") - gr.HTML('<center><a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center>') - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - - # https://github.com/gradio-app/gradio/pull/3296 - def create_greeting(request: gr.Request): - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get User Name: {request.username}") - return gr.Markdown.update(value=f"User: {request.username}"), request.username - else: - return gr.Markdown.update(value=f"User: default", visible=False), "" - demo.load(create_greeting, inputs=None, outputs=[user_info, user_name]) - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder="在这里输入" - ).style(container=False) - with gr.Column(min_width=70, scale=1): - submitBtn = gr.Button("发送", variant="primary") - cancelBtn = gr.Button("取消", variant="secondary", visible=False) - with gr.Row(): - emptyBtn = gr.Button( - "🧹 新的对话", - ) - retryBtn = gr.Button("🔄 重新生成") - delFirstBtn = gr.Button("🗑️ 删除最旧对话") - delLastBtn = gr.Button("🗑️ 删除最新对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label="ChatGPT"): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"OpenAI API-key...", - value=hide_middle_chars(my_api_key), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown("多账号模式已开启,无需输入key,可直接开始对话", elem_id="usage_display") - else: - usageTxt = gr.Markdown("**发送消息** 或 **提交key** 以显示额度", elem_id="usage_display") - model_select_dropdown = gr.Dropdown( - label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0] - ) - use_streaming_checkbox = gr.Checkbox( - label="实时传输回答", value=True, visible=enable_streaming_option - ) - use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False) - language_select_dropdown = gr.Dropdown( - label="选择回复语言(针对搜索&索引功能)", - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label="上传索引文件", type="file", multiple=True) - two_column = gr.Checkbox(label="双栏pdf", value=advance_docs["pdf"].get("two_column", False)) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label="识别公式", value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入System Prompt...", - label="System prompt", - value=initial_prompt, - lines=10, - ).style(container=False) - with gr.Accordion(label="加载Prompt模板", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label="选择Prompt模板集合文件", - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label="从Prompt模板中加载", - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - ).style(container=False) - - with gr.Tab(label="保存/加载"): - with gr.Accordion(label="保存/加载对话历史记录", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label="从列表中加载对话", - choices=get_history_names(plain=True), - multiselect=False, - value=get_history_names(plain=True)[0], - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=f"设置文件名: 默认为.json,可选为.md", - label="设置保存文件名", - value="对话历史记录", - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - exportMarkdownBtn = gr.Button("📝 导出为Markdown") - gr.Markdown("默认保存于history文件夹") - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label="高级"): - gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置") - default_btn = gr.Button("🔙 恢复默认设置") - - with gr.Accordion("参数", open=False): - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="Top-p", - ) - temperature = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - with gr.Accordion("网络设置", open=False, visible=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入API-Host...", - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1, - ) - changeAPIURLBtn = gr.Button("🔄 切换API地址") - proxyTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入代理地址...", - label="代理地址(示例:http://127.0.0.1:10809)", - value="", - lines=2, - ) - changeProxyBtn = gr.Button("🔄 设置代理地址") - - gr.Markdown(description) - gr.HTML(footer.format(versions=versions_html()), elem_id="footer") - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - user_api_key, - systemPromptTxt, - history, - user_question, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, history, status_display, token_count], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False - ) - - - # Chatbot - cancelBtn.click(cancel_outputing, [], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - emptyBtn.click( - reset_state, - outputs=[chatbot, history, token_count, status_display], - show_progress=True, - ) - emptyBtn.click(**reset_textbox_args) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [history, token_count], - [history, token_count, status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [chatbot, history, token_count], - [chatbot, history, token_count, status_display], - show_progress=True, - ) - - reduceTokenBtn.click( - reduce_token_size, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - gr.State(sum(token_count.value[-4:])), - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ) - reduceTokenBtn.click(**get_usage_args) - - two_column.change(update_doc_config, [two_column], None) - - # ChatGPT - keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args) - keyTxt.submit(**get_usage_args) - - # Template - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [saveFileName, systemPromptTxt, history, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [saveFileName, systemPromptTxt, history, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change( - load_chat_history, - [historyFileSelectDropdown, systemPromptTxt, history, chatbot, user_name], - [saveFileName, systemPromptTxt, history, chatbot], - show_progress=True, - ) - downloadFile.change( - load_chat_history, - [downloadFile, systemPromptTxt, history, chatbot, user_name], - [saveFileName, systemPromptTxt, history, chatbot], - ) - - # Advanced - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "川虎ChatGPT 🚀" - -if __name__ == "__main__": - reload_javascript() - # if running in Docker - if dockerflag: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - auth=auth_list, - favicon_path="./assets/favicon.ico", - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - share=False, - favicon_path="./assets/favicon.ico", - ) - # if not running in Docker - else: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, - auth=auth_list, - favicon_path="./assets/favicon.ico", - inbrowser=True, - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, favicon_path="./assets/favicon.ico", inbrowser=True - ) # 改为 share=True 可以创建公开分享链接 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/train.py b/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/train.py deleted file mode 100644 index a556200838b6187b7d5befa42403291f1fb2f78b..0000000000000000000000000000000000000000 --- a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/train.py +++ /dev/null @@ -1,534 +0,0 @@ -import argparse -import logging -import math -import os -import random -import shutil -import time -from pathlib import Path - -import numpy as np -import torch.distributed as dist -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler -import torch.utils.data -import yaml -from torch.cuda import amp -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -import test # import test.py to get mAP after each epoch -from models.yolo import Model -from utils.datasets import create_dataloader -from utils.general import ( - torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors, labels_to_image_weights, - compute_loss, plot_images, fitness, strip_optimizer, plot_results, get_latest_run, check_dataset, check_file, - check_git_status, check_img_size, increment_dir, print_mutation, plot_evolution, set_logging) -from utils.google_utils import attempt_download -from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts - -logger = logging.getLogger(__name__) - - -def train(hyp, opt, device, tb_writer=None): - logger.info(f'Hyperparameters {hyp}') - log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory - wdir = log_dir / 'weights' # weights directory - os.makedirs(wdir, exist_ok=True) - last = wdir / 'last.pt' - best = wdir / 'best.pt' - results_file = str(log_dir / 'results.txt') - epochs, batch_size, total_batch_size, weights, rank = \ - opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank - - # Save run settings - with open(log_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) - with open(log_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) - - # Configure - cuda = device.type != 'cpu' - init_seeds(2 + rank) - with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict - with torch_distributed_zero_first(rank): - check_dataset(data_dict) # check - train_path = data_dict['train'] - test_path = data_dict['val'] - nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check - - # Model - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location=device) # load checkpoint - # if hyp['anchors']: - # ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor - model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create - exclude = ['anchor'] if opt.cfg else [] # exclude keys - state_dict = ckpt['model'].float().state_dict() # to FP32 - state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(state_dict, strict=False) # load - logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report - else: - model = Model(opt.cfg, ch=3, nc=nc).to(device) # create - - # Freeze - freeze = ['', ] # parameter names to freeze (full or partial) - if any(freeze): - for k, v in model.named_parameters(): - if any(x in k for x in freeze): - print('freezing %s' % k) - v.requires_grad = False - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay - - pg0, pg1, pg2 = [], [], [] # optimizer parameter groups - for k, v in model.named_parameters(): - v.requires_grad = True - if '.bias' in k: - pg2.append(v) # biases - elif '.weight' in k and '.bn' not in k: - pg1.append(v) # apply weight decay - else: - pg0.append(v) # all else - - if opt.adam: - optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum - else: - optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - - optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay - optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) - del pg0, pg1, pg2 - - # Scheduler https://arxiv.org/pdf/1812.01187.pdf - # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR - lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # plot_lr_scheduler(optimizer, scheduler, epochs) - - # Resume - start_epoch, best_fitness = 0, 0.0 - if pretrained: - # Optimizer - if ckpt['optimizer'] is not None: - optimizer.load_state_dict(ckpt['optimizer']) - best_fitness = ckpt['best_fitness'] - - # Results - if ckpt.get('training_results') is not None: - with open(results_file, 'w') as file: - file.write(ckpt['training_results']) # write results.txt - - # Epochs - start_epoch = ckpt['epoch'] + 1 - if opt.resume: - assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) - shutil.copytree(wdir, wdir.parent / f'weights_backup_epoch{start_epoch - 1}') # save previous weights - if epochs < start_epoch: - logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) - epochs += ckpt['epoch'] # finetune additional epochs - - del ckpt, state_dict - - # Image sizes - gs = int(max(model.stride)) # grid size (max stride) - imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples - - # DP mode - if cuda and rank == -1 and torch.cuda.device_count() > 1: - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and rank != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logger.info('Using SyncBatchNorm()') - - # Exponential moving average - ema = ModelEMA(model) if rank in [-1, 0] else None - - # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank)) - - # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, - world_size=opt.world_size, workers=opt.workers) - mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class - nb = len(dataloader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) - - # Testloader - if rank in [-1, 0]: - ema.updates = start_epoch * nb // accumulate # set EMA updates - testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, - hyp=hyp, augment=False, cache=opt.cache_images, rect=True, rank=-1, - world_size=opt.world_size, workers=opt.workers)[0] # only runs on process 0 - - # Model parameters - hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights - model.names = names - - # Class frequency - if rank in [-1, 0]: - labels = np.concatenate(dataset.labels, 0) - c = torch.tensor(labels[:, 0]) # classes - # cf = torch.bincount(c.long(), minlength=nc) + 1. - # model._initialize_biases(cf.to(device)) - plot_labels(labels, save_dir=log_dir) - if tb_writer: - # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384 - tb_writer.add_histogram('classes', c, 0) - - # Check anchors - if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - - # Start training - t0 = time.time() - nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=cuda) - logger.info('Image sizes %g train, %g test' % (imgsz, imgsz_test)) - logger.info('Using %g dataloader workers' % dataloader.num_workers) - logger.info('Starting training for %g epochs...' % epochs) - # torch.autograd.set_detect_anomaly(True) - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - model.train() - - # Update image weights (optional) - if opt.img_weights: - # Generate indices - if rank in [-1, 0]: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - # Broadcast if DDP - if rank != -1: - indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() - dist.broadcast(indices, 0) - if rank != 0: - dataset.indices = indices.cpu().numpy() - - # Update mosaic border - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(4, device=device) # mean losses - if rank != -1: - dataloader.sampler.set_epoch(epoch) - pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) - if rank in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with amp.autocast(enabled=cuda): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size - if rank != -1: - loss *= opt.world_size # gradient averaged between devices in DDP mode - - # Backward - scaler.scale(loss).backward() - - # Optimize - if ni % accumulate == 0: - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - - # Print - if rank in [-1, 0]: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) - pbar.set_description(s) - - # Plot - if ni < 3: - f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename - result = plot_images(images=imgs, targets=targets, paths=paths, fname=f) - if tb_writer and result is not None: - tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - # tb_writer.add_graph(model, imgs) # add model to tensorboard - - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard - scheduler.step() - - # DDP process 0 or single-GPU - if rank in [-1, 0]: - # mAP - if ema: - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride']) - final_epoch = epoch + 1 == epochs - if not opt.notest or final_epoch: # Calculate mAP - results, maps, times = test.test(opt.data, - batch_size=total_batch_size, - imgsz=imgsz_test, - model=ema.ema, - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=log_dir) - - # Write - with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) - if len(opt.name) and opt.bucket: - os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) - - # Tensorboard - if tb_writer: - tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/giou_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - tb_writer.add_scalar(tag, x, epoch) - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] - if fi > best_fitness: - best_fitness = fi - - # Save model - save = (not opt.nosave) or (final_epoch and not opt.evolve) - if save: - with open(results_file, 'r') as f: # create checkpoint - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'training_results': f.read(), - 'model': ema.ema, - 'optimizer': None if final_epoch else optimizer.state_dict()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - del ckpt - # end epoch ---------------------------------------------------------------------------------------------------- - # end training - - if rank in [-1, 0]: - # Strip optimizers - n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name - fresults, flast, fbest = 'results%s.txt' % n, wdir / f'last{n}.pt', wdir / f'best{n}.pt' - for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', 'results.txt'], [flast, fbest, fresults]): - if os.path.exists(f1): - os.rename(f1, f2) # rename - if str(f2).endswith('.pt'): # is *.pt - strip_optimizer(f2) # strip optimizer - os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload - # Finish - if not opt.evolve: - plot_results(save_dir=log_dir) # save as results.png - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - - dist.destroy_process_group() if rank not in [-1, 0] else None - torch.cuda.empty_cache() - return results - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml') - parser.add_argument('--epochs', type=int, default=300) - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') - parser.add_argument('--img-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--notest', action='store_true', help='only test final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') - parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') - parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - parser.add_argument('--logdir', type=str, default='runs/', help='logging directory') - parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - opt = parser.parse_args() - - # Set DDP variables - opt.total_batch_size = opt.batch_size - opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 - opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 - set_logging(opt.global_rank) - if opt.global_rank in [-1, 0]: - check_git_status() - - # Resume - if opt.resume: # resume an interrupted run - ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path - log_dir = Path(ckpt).parent.parent # runs/exp0 - assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - with open(log_dir / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace - opt.cfg, opt.weights, opt.resume = '', ckpt, True - logger.info('Resuming training from %s' % ckpt) - - else: - opt.hyp = opt.hyp or ('data/hyp.finetune.yaml' if opt.weights else 'data/hyp.scratch.yaml') - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) - log_dir = increment_dir(Path(opt.logdir) / 'exp', opt.name) # runs/exp1 - - device = select_device(opt.device, batch_size=opt.batch_size) - - # DDP mode - if opt.local_rank != -1: - assert torch.cuda.device_count() > opt.local_rank - torch.cuda.set_device(opt.local_rank) - device = torch.device('cuda', opt.local_rank) - dist.init_process_group(backend='nccl', init_method='env://') # distributed backend - assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' - opt.batch_size = opt.total_batch_size // opt.world_size - - logger.info(opt) - with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps - - # Train - if not opt.evolve: - tb_writer = None - if opt.global_rank in [-1, 0]: - logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir) - tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0 - - train(hyp, opt, device, tb_writer) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'giou': (1, 0.02, 0.2), # GIoU loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - # 'anchors': (1, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mixup': (1, 0.0, 1.0)} # image mixup (probability) - - assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' - opt.notest, opt.nosave = True, True # only test/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here - if opt.bucket: - os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists - - for _ in range(100): # generations to evolve - if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt('evolve.txt', ndmin=2) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() # weights - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.9, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([x[0] for x in meta.values()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device) - - # Write mutation results - print_mutation(hyp.copy(), results, yaml_file, opt.bucket) - - # Plot results - plot_evolution(yaml_file) - print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these ' - 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file)) diff --git a/spaces/willgibs/ControlNet-v1-1/README.md b/spaces/willgibs/ControlNet-v1-1/README.md deleted file mode 100644 index 6233ca211cfefb5d2dc8a4be6fbc2412af2d3568..0000000000000000000000000000000000000000 --- a/spaces/willgibs/ControlNet-v1-1/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: ControlNet V1.1 -emoji: 📉 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.34.0 -python_version: 3.10.11 -app_file: app.py -pinned: false -license: mit -suggested_hardware: t4-medium -duplicated_from: hysts/ControlNet-v1-1 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wolf-sigma/Starburst_Galaxy__PyStarburst_Demo/mlModels.py b/spaces/wolf-sigma/Starburst_Galaxy__PyStarburst_Demo/mlModels.py deleted file mode 100644 index c86d84e113ebbb2ff1775855916baeaacb9a655e..0000000000000000000000000000000000000000 --- a/spaces/wolf-sigma/Starburst_Galaxy__PyStarburst_Demo/mlModels.py +++ /dev/null @@ -1,137 +0,0 @@ -''' -Copyright 2023 Starburst Data - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -''' - -import openai - -import env - -from dataModels import Data - -import pandas as pd -pd.options.plotting.backend = "plotly" - -import env - -from tabulate import tabulate - -class OpenAI(): - '''Class to handle OpenAI API calls - Attributes - ---------- - api_key : str - OpenAI API key - model : str - OpenAI model name - data_class : dataModels.Data - Data class for the model - system_message : str - System message for OpenAI chatbot - - Methods - ------- - get_models() - Get list of OpenAI models - save_settings(api_key, model) - Save settings for the session. - set_system_message() - Set system message for OpenAI chatbot. - predict(message, system_message = None) - Predict response from OpenAI chatbot from the supplied question''' - - def __init__(self, data_class: Data, model = None, api_key = None) -> None: - '''Initialize OpenAI class with data class and model name - Args: data_class : dataModels.Data class - model: OpenAI model name = None - api_key: OpenAI API key = None - ''' - if api_key is not None: - self.api_key = api_key - else: - self.api_key = env.OPENAI_API_KEY - - openai.api_key = self.api_key - - if model is None: - self.model = env.OPENAI_MODEL - else: - self.model = model - - self.data_class = data_class - - self.models = self.get_models() - - def get_models(self): - '''Get list of OpenAI models''' - models = openai.Model.list(self.api_key) - print(models) - return models - - def save_settings(self, api_key: str, model: str): - '''Save settings for the session. - Args: api_key: OpenAI API key - model: OpenAI model name''' - self.api_key = api_key - self.model = model - - def set_system_message(self): - '''Set system message for OpenAI chatbot.''' - - #message_data_type = 'csv' - #message_data = self.data_class.df_summary.to_pandas().rename(columns={'state': 'State', 'risk_appetite': 'Risk_Appetite', 'count': 'Count_of_Customers'}).to_csv(index=False) - - message_data_type = 'table' - message_data = tabulate(self.data_class.df_summary.to_pandas().rename(columns={'state': 'State', 'risk_appetite': 'Risk_Appetite', 'count': 'Count_of_Customers'}), headers='keys', tablefmt='outline', showindex=False) - - self.system_message = f'''You are an AI assistant who's purpose is to provide information on structured data. - The data formated as {message_data_type} is: - {message_data}"''' - - def predict(self, message, system_message = None): - '''Predict response from OpenAI chatbot from the supplied question - Args: message: Question to ask the OpenAI chatbot - system_message = None: System message for OpenAI chatbot''' - - if system_message is None: - system_message = self.set_system_message() - - if system_message and self.system_message is None: - raise Exception("System message is not defined. Please use set_system_message() method to set system message.") - - if env.DEBUG: print(self.system_message) - - response = openai.ChatCompletion.create( - model=self.model, - messages=[ - { - "role": "system", - "content": self.system_message - }, - { - "role": "user", - "content": message - } - ], - temperature=1, - max_tokens=2048, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - n = 3 - ) - responses = [i.message.content + "\n\n" for i in response.choices] - if env.DEBUG: print(responses) - return responses[0] - \ No newline at end of file diff --git a/spaces/wootang04/text_generator/README.md b/spaces/wootang04/text_generator/README.md deleted file mode 100644 index 17d58a4e4ff35b8937481ddfec98e8370d8ee8ae..0000000000000000000000000000000000000000 --- a/spaces/wootang04/text_generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text Generator -emoji: 🚀 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wrldreform/Text2ImageStable2.1/README.md b/spaces/wrldreform/Text2ImageStable2.1/README.md deleted file mode 100644 index 1832e489c5cb19e1c78074f18ac5b877795d27fb..0000000000000000000000000000000000000000 --- a/spaces/wrldreform/Text2ImageStable2.1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text2ImageStable2.1 -emoji: 🌍 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xfbhsdfndjndghz/Ultraman/app.py b/spaces/xfbhsdfndjndghz/Ultraman/app.py deleted file mode 100644 index e7b346b308d96ff18b3b204317d6c74812f97786..0000000000000000000000000000000000000000 --- a/spaces/xfbhsdfndjndghz/Ultraman/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface?gpt2).launch")() \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/video/mars.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/video/mars.py deleted file mode 100644 index 4128e1cbf53ca39fad8e468eed90c3d80c9310a5..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/video/mars.py +++ /dev/null @@ -1,133 +0,0 @@ -from __future__ import division, print_function, absolute_import -import os.path as osp -import warnings -from scipy.io import loadmat - -from ..dataset import VideoDataset - - -class Mars(VideoDataset): - """MARS. - - Reference: - Zheng et al. MARS: A Video Benchmark for Large-Scale Person Re-identification. ECCV 2016. - - URL: `<http://www.liangzheng.com.cn/Project/project_mars.html>`_ - - Dataset statistics: - - identities: 1261. - - tracklets: 8298 (train) + 1980 (query) + 9330 (gallery). - - cameras: 6. - """ - dataset_dir = 'mars' - dataset_url = None - - def __init__(self, root='', **kwargs): - self.root = osp.abspath(osp.expanduser(root)) - self.dataset_dir = osp.join(self.root, self.dataset_dir) - self.download_dataset(self.dataset_dir, self.dataset_url) - - self.train_name_path = osp.join( - self.dataset_dir, 'info/train_name.txt' - ) - self.test_name_path = osp.join(self.dataset_dir, 'info/test_name.txt') - self.track_train_info_path = osp.join( - self.dataset_dir, 'info/tracks_train_info.mat' - ) - self.track_test_info_path = osp.join( - self.dataset_dir, 'info/tracks_test_info.mat' - ) - self.query_IDX_path = osp.join(self.dataset_dir, 'info/query_IDX.mat') - - required_files = [ - self.dataset_dir, self.train_name_path, self.test_name_path, - self.track_train_info_path, self.track_test_info_path, - self.query_IDX_path - ] - self.check_before_run(required_files) - - train_names = self.get_names(self.train_name_path) - test_names = self.get_names(self.test_name_path) - track_train = loadmat(self.track_train_info_path - )['track_train_info'] # numpy.ndarray (8298, 4) - track_test = loadmat(self.track_test_info_path - )['track_test_info'] # numpy.ndarray (12180, 4) - query_IDX = loadmat(self.query_IDX_path - )['query_IDX'].squeeze() # numpy.ndarray (1980,) - query_IDX -= 1 # index from 0 - track_query = track_test[query_IDX, :] - gallery_IDX = [ - i for i in range(track_test.shape[0]) if i not in query_IDX - ] - track_gallery = track_test[gallery_IDX, :] - - train = self.process_data( - train_names, track_train, home_dir='bbox_train', relabel=True - ) - query = self.process_data( - test_names, track_query, home_dir='bbox_test', relabel=False - ) - gallery = self.process_data( - test_names, track_gallery, home_dir='bbox_test', relabel=False - ) - - super(Mars, self).__init__(train, query, gallery, **kwargs) - - def get_names(self, fpath): - names = [] - with open(fpath, 'r') as f: - for line in f: - new_line = line.rstrip() - names.append(new_line) - return names - - def process_data( - self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0 - ): - assert home_dir in ['bbox_train', 'bbox_test'] - num_tracklets = meta_data.shape[0] - pid_list = list(set(meta_data[:, 2].tolist())) - - if relabel: - pid2label = {pid: label for label, pid in enumerate(pid_list)} - tracklets = [] - - for tracklet_idx in range(num_tracklets): - data = meta_data[tracklet_idx, ...] - start_index, end_index, pid, camid = data - if pid == -1: - continue # junk images are just ignored - assert 1 <= camid <= 6 - if relabel: - pid = pid2label[pid] - camid -= 1 # index starts from 0 - img_names = names[start_index - 1:end_index] - - # make sure image names correspond to the same person - pnames = [img_name[:4] for img_name in img_names] - assert len( - set(pnames) - ) == 1, 'Error: a single tracklet contains different person images' - - # make sure all images are captured under the same camera - camnames = [img_name[5] for img_name in img_names] - assert len( - set(camnames) - ) == 1, 'Error: images are captured under different cameras!' - - # append image names with directory information - img_paths = [ - osp.join(self.dataset_dir, home_dir, img_name[:4], img_name) - for img_name in img_names - ] - if len(img_paths) >= min_seq_len: - img_paths = tuple(img_paths) - tracklets.append((img_paths, pid, camid)) - - return tracklets - - def combine_all(self): - warnings.warn( - 'Some query IDs do not appear in gallery. Therefore, combineall ' - 'does not make any difference to Mars' - ) diff --git a/spaces/xin/PatentSolver/App/bin/TextSummarizer.py b/spaces/xin/PatentSolver/App/bin/TextSummarizer.py deleted file mode 100644 index 300156dedc604e2ab337b790f30a874f6132e5dc..0000000000000000000000000000000000000000 --- a/spaces/xin/PatentSolver/App/bin/TextSummarizer.py +++ /dev/null @@ -1,5 +0,0 @@ -from textblob import TextBlob - -sentiment = TextBlob("Therefore, leakage of the fluid from the connection part of the outer cylinders (or connection parts of the inner cylinders) is unlikely to occur in this connector, and especially in the case in which a cryogenic fluid such as liquefied hydrogen or the like is being handled, the reliability of such a connector is high in the case of use in applications in which heat shrinkage may occur in the outer cylinders or inner cylinders").sentiment - -print(sentiment) \ No newline at end of file diff --git a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py b/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py deleted file mode 100644 index 2af819d61d589cfec2e0ca46612a7456f42b831a..0000000000000000000000000000000000000000 --- a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copied from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -from .groundingdino import build_groundingdino diff --git a/spaces/xxie92/antibody_visulization/diffab/utils/data.py b/spaces/xxie92/antibody_visulization/diffab/utils/data.py deleted file mode 100644 index c206ae5d8e304a0117b78a38cc144a48bc8e5d10..0000000000000000000000000000000000000000 --- a/spaces/xxie92/antibody_visulization/diffab/utils/data.py +++ /dev/null @@ -1,89 +0,0 @@ -import math -import torch -from torch.utils.data._utils.collate import default_collate - - -DEFAULT_PAD_VALUES = { - 'aa': 21, - 'chain_id': ' ', - 'icode': ' ', -} - -DEFAULT_NO_PADDING = { - 'origin', -} - -class PaddingCollate(object): - - def __init__(self, length_ref_key='aa', pad_values=DEFAULT_PAD_VALUES, no_padding=DEFAULT_NO_PADDING, eight=True): - super().__init__() - self.length_ref_key = length_ref_key - self.pad_values = pad_values - self.no_padding = no_padding - self.eight = eight - - @staticmethod - def _pad_last(x, n, value=0): - if isinstance(x, torch.Tensor): - assert x.size(0) <= n - if x.size(0) == n: - return x - pad_size = [n - x.size(0)] + list(x.shape[1:]) - pad = torch.full(pad_size, fill_value=value).to(x) - return torch.cat([x, pad], dim=0) - elif isinstance(x, list): - pad = [value] * (n - len(x)) - return x + pad - else: - return x - - @staticmethod - def _get_pad_mask(l, n): - return torch.cat([ - torch.ones([l], dtype=torch.bool), - torch.zeros([n-l], dtype=torch.bool) - ], dim=0) - - @staticmethod - def _get_common_keys(list_of_dict): - keys = set(list_of_dict[0].keys()) - for d in list_of_dict[1:]: - keys = keys.intersection(d.keys()) - return keys - - - def _get_pad_value(self, key): - if key not in self.pad_values: - return 0 - return self.pad_values[key] - - def __call__(self, data_list): - max_length = max([data[self.length_ref_key].size(0) for data in data_list]) - keys = self._get_common_keys(data_list) - - if self.eight: - max_length = math.ceil(max_length / 8) * 8 - data_list_padded = [] - for data in data_list: - data_padded = { - k: self._pad_last(v, max_length, value=self._get_pad_value(k)) if k not in self.no_padding else v - for k, v in data.items() - if k in keys - } - data_padded['mask'] = self._get_pad_mask(data[self.length_ref_key].size(0), max_length) - data_list_padded.append(data_padded) - return default_collate(data_list_padded) - - -def apply_patch_to_tensor(x_full, x_patch, patch_idx): - """ - Args: - x_full: (N, ...) - x_patch: (M, ...) - patch_idx: (M, ) - Returns: - (N, ...) - """ - x_full = x_full.clone() - x_full[patch_idx] = x_patch - return x_full diff --git a/spaces/yairVag/Image_Captioning/app.py b/spaces/yairVag/Image_Captioning/app.py deleted file mode 100644 index 61274a7325ba4143ba2dd879ae479b58b77b2ab8..0000000000000000000000000000000000000000 --- a/spaces/yairVag/Image_Captioning/app.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import torch -import gradio as gr -from transformers import VisionEncoderDecoderModel, AutoFeatureExtractor, AutoTokenizer - - -def create_caption_transformer(img): - """ - create_caption_transformer() create a caption for an image using a transformer model - that was trained on 'Flickr image dataset' - :param img: a numpy array of the image - :return: a string of the image caption - """ - - sample = feature_extractor(img, return_tensors="pt").pixel_values.to('cpu') - caption_ids = model.generate(sample)[0] - caption_text = tokenizer.decode(caption_ids, skip_special_tokens=True) - caption_text = caption_text.split('.')[0] - return caption_text - - -torch.__version__ -IMAGES_EXAMPLES_FOLDER = 'examples/' -images = os.listdir(IMAGES_EXAMPLES_FOLDER) -IMAGES_EXAMPLES = [IMAGES_EXAMPLES_FOLDER + img for img in images] -model = VisionEncoderDecoderModel.from_pretrained(os.getcwd()).to('cpu') -feature_extractor = AutoFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') -tokenizer = AutoTokenizer.from_pretrained('gpt2') -iface = gr.Interface(fn=create_caption_transformer, - inputs="image", - outputs='text', - examples=IMAGES_EXAMPLES - ).launch(share=True) diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/SettingNavigation.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/SettingNavigation.tsx deleted file mode 100644 index 926bafbf8a5e26865e9486477dfdbcbb50d53433..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/SettingNavigation.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import styled from "@emotion/styled" -import { FC } from "react" -import { Button } from "../../../components/Button" -import { Localized } from "../../../components/Localized" - -export type SettingRoute = "general" | "midi" -const routes: SettingRoute[] = ["general", "midi"] - -const RouteItem = styled(Button)<{ selected: boolean }>` - display: flex; - font-size: 1rem; - align-items: center; - margin-bottom: 0.5rem; - background: ${({ theme, selected }) => - selected ? theme.highlightColor : "inherit"}; -` - -const Container = styled.div` - display: flex; - flex-direction: column; - min-width: 8em; - margin-right: 2rem; -` - -const RouteName: FC<{ route: SettingRoute }> = ({ route }) => { - switch (route) { - case "general": - return <Localized default="General">general</Localized> - case "midi": - return <Localized default="MIDI">midi</Localized> - } -} - -export const SettingNavigation: FC<{ - route: SettingRoute - onChange: (route: SettingRoute) => void -}> = ({ route, onChange }) => { - return ( - <Container> - {routes.map((r) => ( - <RouteItem selected={route === r} onClick={() => onChange(r)}> - <RouteName key={r} route={r} /> - </RouteItem> - ))} - </Container> - ) -} diff --git a/spaces/yeqingmei123/face-test/e4e/models/encoders/helpers.py b/spaces/yeqingmei123/face-test/e4e/models/encoders/helpers.py deleted file mode 100644 index c4a58b34ea5ca6912fe53c63dede0a8696f5c024..0000000000000000000000000000000000000000 --- a/spaces/yeqingmei123/face-test/e4e/models/encoders/helpers.py +++ /dev/null @@ -1,140 +0,0 @@ -from collections import namedtuple -import torch -import torch.nn.functional as F -from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module - -""" -ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Flatten(Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -def l2_norm(input, axis=1): - norm = torch.norm(input, 2, axis, True) - output = torch.div(input, norm) - return output - - -class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): - """ A named tuple describing a ResNet block. """ - - -def get_block(in_channel, depth, num_units, stride=2): - return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] - - -def get_blocks(num_layers): - if num_layers == 50: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=4), - get_block(in_channel=128, depth=256, num_units=14), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 100: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=13), - get_block(in_channel=128, depth=256, num_units=30), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 152: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=8), - get_block(in_channel=128, depth=256, num_units=36), - get_block(in_channel=256, depth=512, num_units=3) - ] - else: - raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) - return blocks - - -class SEModule(Module): - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = AdaptiveAvgPool2d(1) - self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = ReLU(inplace=True) - self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = Sigmoid() - - def forward(self, x): - module_input = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class bottleneck_IR(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -class bottleneck_IR_SE(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR_SE, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), - PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), - BatchNorm2d(depth), - SEModule(depth, 16) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -def _upsample_add(x, y): - """Upsample and add two feature maps. - Args: - x: (Variable) top feature map to be upsampled. - y: (Variable) lateral feature map. - Returns: - (Variable) added feature map. - Note in PyTorch, when input size is odd, the upsampled feature map - with `F.upsample(..., scale_factor=2, mode='nearest')` - maybe not equal to the lateral feature map size. - e.g. - original input size: [N,_,15,15] -> - conv2d feature map size: [N,_,8,8] -> - upsampled feature map size: [N,_,16,16] - So we choose bilinear upsample which supports arbitrary output sizes. - """ - _, _, H, W = y.size() - return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/modeling_chinese_clip.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/modeling_chinese_clip.py deleted file mode 100644 index 7bab0aea6eb95d0b942c647a572879a9c47ef44a..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/modeling_chinese_clip.py +++ /dev/null @@ -1,1581 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch Chinese-CLIP model.""" - - -import math -from dataclasses import dataclass -from typing import Any, List, Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn - -from ...activations import ACT2FN -from ...modeling_outputs import ( - BaseModelOutput, - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPooling, - BaseModelOutputWithPoolingAndCrossAttentions, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import ( - ModelOutput, - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" -_CONFIG_FOR_DOC = "ChineseCLIPConfig" - -CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "OFA-Sys/chinese-clip-vit-base-patch16", - # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip -] - - -# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html -# Copied from transformers.models.clip.modeling_clip.contrastive_loss -def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: - return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) - - -def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: - caption_loss = contrastive_loss(similarity) - image_loss = contrastive_loss(similarity.t()) - return (caption_loss + image_loss) / 2.0 - - -@dataclass -class ChineseCLIPOutput(ModelOutput): - """ - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): - Contrastive loss for image-text similarity. - logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): - The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text - similarity scores. - logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): - The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image - similarity scores. - text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The text embeddings obtained by applying the projection layer to the pooled output of - [`ChineseCLIPTextModel`]. - image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The image embeddings obtained by applying the projection layer to the pooled output of - [`ChineseCLIPVisionModel`]. - text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): - The output of the [`ChineseCLIPTextModel`]. - vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): - The output of the [`ChineseCLIPVisionModel`]. - """ - - loss: Optional[torch.FloatTensor] = None - logits_per_image: torch.FloatTensor = None - logits_per_text: torch.FloatTensor = None - text_embeds: torch.FloatTensor = None - image_embeds: torch.FloatTensor = None - text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None - vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None - - def to_tuple(self) -> Tuple[Any]: - return tuple( - self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() - for k in self.keys() - ) - - -# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText -class ChineseCLIPTextEmbeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) - self.register_buffer( - "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False - ) - - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - past_key_values_length: int = 0, - ) -> torch.Tensor: - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] - - # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs - # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves - # issue #5664 - if token_type_ids is None: - if hasattr(self, "token_type_ids"): - buffered_token_type_ids = self.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = inputs_embeds + token_type_embeddings - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP -class ChineseCLIPVisionEmbeddings(nn.Module): - def __init__(self, config: ChineseCLIPVisionConfig): - super().__init__() - self.config = config - self.embed_dim = config.hidden_size - self.image_size = config.image_size - self.patch_size = config.patch_size - - self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) - - self.patch_embedding = nn.Conv2d( - in_channels=config.num_channels, - out_channels=self.embed_dim, - kernel_size=self.patch_size, - stride=self.patch_size, - bias=False, - ) - - self.num_patches = (self.image_size // self.patch_size) ** 2 - self.num_positions = self.num_patches + 1 - self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) - self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) - - def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: - batch_size = pixel_values.shape[0] - target_dtype = self.patch_embedding.weight.dtype - patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] - patch_embeds = patch_embeds.flatten(2).transpose(1, 2) - - class_embeds = self.class_embedding.expand(batch_size, 1, -1) - embeddings = torch.cat([class_embeds, patch_embeds], dim=1) - embeddings = embeddings + self.position_embedding(self.position_ids) - return embeddings - - -# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText -class ChineseCLIPTextSelfAttention(nn.Module): - def __init__(self, config, position_embedding_type=None): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = position_embedding_type or getattr( - config, "position_embedding_type", "absolute" - ) - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - - self.is_decoder = config.is_decoder - - def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - mixed_query_layer = self.query(hidden_states) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_layer = past_key_value[0] - value_layer = past_key_value[1] - attention_mask = encoder_attention_mask - elif is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - use_cache = past_key_value is not None - if self.is_decoder: - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - query_length, key_length = query_layer.shape[2], key_layer.shape[2] - if use_cache: - position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( - -1, 1 - ) - else: - position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - if self.is_decoder: - outputs = outputs + (past_key_value,) - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText -class ChineseCLIPTextSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText -class ChineseCLIPTextAttention(nn.Module): - def __init__(self, config, position_embedding_type=None): - super().__init__() - self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) - self.output = ChineseCLIPTextSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class ChineseCLIPVisionAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__(self, config): - super().__init__() - self.config = config - self.embed_dim = config.hidden_size - self.num_heads = config.num_attention_heads - self.head_dim = self.embed_dim // self.num_heads - if self.head_dim * self.num_heads != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" - f" {self.num_heads})." - ) - self.scale = self.head_dim**-0.5 - self.dropout = config.attention_dropout - - self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - """Input shape: Batch x Time x Channel""" - - bsz, tgt_len, embed_dim = hidden_states.size() - - # get query proj - query_states = self.q_proj(hidden_states) * self.scale - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_states = value_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {attn_weights.size()}" - ) - - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - - if output_attentions: - # this operation is a bit akward, but it's required to - # make sure that attn_weights keeps its gradient. - # In order to do so, attn_weights have to reshaped - # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) - else: - attn_weights_reshaped = None - - attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) - - attn_output = torch.bmm(attn_probs, value_states) - - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, attn_weights_reshaped - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText -class ChineseCLIPTextIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText -class ChineseCLIPTextOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision -class ChineseCLIPVisionMLP(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.activation_fn = ACT2FN[config.hidden_act] - self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) - self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.fc1(hidden_states) - hidden_states = self.activation_fn(hidden_states) - hidden_states = self.fc2(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText -class ChineseCLIPTextLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = ChineseCLIPTextAttention(config) - self.is_decoder = config.is_decoder - self.add_cross_attention = config.add_cross_attention - if self.add_cross_attention: - if not self.is_decoder: - raise ValueError(f"{self} should be used as a decoder model if cross attention is added") - self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") - self.intermediate = ChineseCLIPTextIntermediate(config) - self.output = ChineseCLIPTextOutput(config) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - - # if decoder, the last output is tuple of self-attn cache - if self.is_decoder: - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - else: - outputs = self_attention_outputs[1:] # add self attentions if we output attention weights - - cross_attn_present_key_value = None - if self.is_decoder and encoder_hidden_states is not None: - if not hasattr(self, "crossattention"): - raise ValueError( - f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" - " by setting `config.add_cross_attention=True`" - ) - - # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple - cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - cross_attn_past_key_value, - output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights - - # add cross-attn cache to positions 3,4 of present_key_value tuple - cross_attn_present_key_value = cross_attention_outputs[-1] - present_key_value = present_key_value + cross_attn_present_key_value - - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - # if decoder, return the attn key/values as the last output - if self.is_decoder: - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class ChineseCLIPVisionLayer(nn.Module): - def __init__(self, config: ChineseCLIPConfig): - super().__init__() - self.embed_dim = config.hidden_size - self.self_attn = ChineseCLIPVisionAttention(config) - self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) - self.mlp = ChineseCLIPVisionMLP(config) - self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) - - def forward( - self, - hidden_states: torch.Tensor, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ - residual = hidden_states - - hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( - hidden_states=hidden_states, - output_attentions=output_attentions, - ) - hidden_states = residual + hidden_states - - residual = hidden_states - hidden_states = self.layer_norm2(hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText -class ChineseCLIPTextPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class ChineseCLIPPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = ChineseCLIPConfig - base_model_prefix = "chinese_clip" - supports_gradient_checkpointing = True - - def _init_weights(self, module): - """Initialize the weights""" - factor = self.config.initializer_factor - if isinstance(module, ChineseCLIPVisionEmbeddings): - factor = self.config.initializer_factor - nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) - nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) - nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) - elif isinstance(module, ChineseCLIPTextEmbeddings): - nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) - nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) - nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) - for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: - if embedding.padding_idx is not None: - embedding.weight.data[embedding.padding_idx].zero_() - elif isinstance(module, ChineseCLIPVisionAttention): - factor = self.config.initializer_factor - in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - out_proj_std = (module.embed_dim**-0.5) * factor - nn.init.normal_(module.q_proj.weight, std=in_proj_std) - nn.init.normal_(module.k_proj.weight, std=in_proj_std) - nn.init.normal_(module.v_proj.weight, std=in_proj_std) - nn.init.normal_(module.out_proj.weight, std=out_proj_std) - elif isinstance(module, ChineseCLIPVisionMLP): - factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) - fc_std = (2 * module.config.hidden_size) ** -0.5 * factor - nn.init.normal_(module.fc1.weight, std=fc_std) - nn.init.normal_(module.fc2.weight, std=in_proj_std) - elif isinstance(module, ChineseCLIPModel): - nn.init.normal_( - module.text_projection.weight, - std=module.text_embed_dim**-0.5 * self.config.initializer_factor, - ) - nn.init.normal_( - module.visual_projection.weight, - std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, - ) - - if isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, ChineseCLIPVisionEncoder) or isinstance(module, ChineseCLIPTextEncoder): - module.gradient_checkpointing = value - - -CHINESE_CLIP_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it - as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and - behavior. - - Parameters: - config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" - Args: - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using - [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -CHINESE_CLIP_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using - [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. - return_loss (`bool`, *optional*): - Whether or not to return the contrastive loss. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText -class ChineseCLIPTextEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = False, - output_hidden_states: Optional[bool] = False, - return_dict: Optional[bool] = True, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - next_decoder_cache = () if use_cache else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if self.config.add_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class ChineseCLIPVisionEncoder(nn.Module): - """ - Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a - [`ChineseCLIPVisionEncoderLayer`]. - - Args: - config: ChineseCLIPConfig - """ - - def __init__(self, config: ChineseCLIPConfig): - super().__init__() - self.config = config - self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - inputs_embeds, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(encoder_layer), - hidden_states, - ) - else: - layer_outputs = encoder_layer( - hidden_states, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) - - -class ChineseCLIPVisionTransformer(nn.Module): - def __init__(self, config: ChineseCLIPVisionConfig): - super().__init__() - self.config = config - embed_dim = config.hidden_size - - self.embeddings = ChineseCLIPVisionEmbeddings(config) - self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) - self.encoder = ChineseCLIPVisionEncoder(config) - self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) - def forward( - self, - pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPooling]: - r""" - Returns: - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if pixel_values is None: - raise ValueError("You have to specify pixel_values") - - hidden_states = self.embeddings(pixel_values) - hidden_states = self.pre_layrnorm(hidden_states) - - encoder_outputs = self.encoder( - inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - last_hidden_state = encoder_outputs[0] - pooled_output = last_hidden_state[:, 0, :] - pooled_output = self.post_layernorm(pooled_output) - - if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPooling( - last_hidden_state=last_hidden_state, - pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - "The text model from CHINESE_CLIP without any head or projection on top.", - CHINESE_CLIP_START_DOCSTRING, -) -class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): - """ - - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in [Attention is - all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - - To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set - to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and - `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. - """ - - config_class = ChineseCLIPTextConfig - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = ChineseCLIPTextEmbeddings(config) - self.encoder = ChineseCLIPTextEncoder(config) - - self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPoolingAndCrossAttentions, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - encoder_hidden_states: Optional[torch.Tensor] = None, - encoder_attention_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: - r""" - encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if self.config.is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - batch_size, seq_length = input_shape - device = input_ids.device if input_ids is not None else inputs_embeds.device - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) - - if token_type_ids is None: - if hasattr(self.embeddings, "token_type_ids"): - buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if self.config.is_decoder and encoder_hidden_states is not None: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - -@add_start_docstrings( - """The vision model from CHINESE_CLIP without any head or projection on top.""", - CHINESE_CLIP_START_DOCSTRING, -) -class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): - config_class = ChineseCLIPVisionConfig - main_input_name = "pixel_values" - - def __init__(self, config: ChineseCLIPVisionConfig): - super().__init__(config) - self.vision_model = ChineseCLIPVisionTransformer(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> nn.Module: - return self.vision_model.embeddings.patch_embedding - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) - def forward( - self, - pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPooling]: - r""" - Returns: - - Examples: - - ```python - >>> from PIL import Image - >>> import requests - >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel - - >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - - >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> inputs = processor(images=image, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> last_hidden_state = outputs.last_hidden_state - >>> pooled_output = outputs.pooler_output # pooled CLS states - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - return self.vision_model( - pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - -@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) -class ChineseCLIPModel(ChineseCLIPPreTrainedModel): - config_class = ChineseCLIPConfig - - def __init__(self, config: ChineseCLIPConfig): - super().__init__(config) - - if not isinstance(config.text_config, ChineseCLIPTextConfig): - raise ValueError( - "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" - f" {type(config.text_config)}." - ) - - if not isinstance(config.vision_config, ChineseCLIPVisionConfig): - raise ValueError( - "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" - f" {type(config.vision_config)}." - ) - - text_config = config.text_config - vision_config = config.vision_config - - self.projection_dim = config.projection_dim - self.text_embed_dim = text_config.hidden_size - self.vision_embed_dim = vision_config.hidden_size - - self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) - self.vision_model = ChineseCLIPVisionTransformer(vision_config) - - self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) - self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) - self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) - def get_text_features( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> torch.FloatTensor: - r""" - Returns: - text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by - applying the projection layer to the final [CLS] hidden state of Text-Transformer. - - Examples: - - ```python - >>> from transformers import AutoTokenizer, ChineseCLIPModel - - >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - - >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") - >>> text_features = model.get_text_features(**inputs) - >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) - ```""" - # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = text_outputs[0][:, 0, :] - text_features = self.text_projection(pooled_output) - - return text_features - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) - def get_image_features( - self, - pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> torch.FloatTensor: - r""" - Returns: - image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by - applying the projection layer to the final [CLS] hidden state of Vision-Transformer. - - Examples: - - ```python - >>> from PIL import Image - >>> import requests - >>> from transformers import AutoProcessor, ChineseCLIPModel - - >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - - >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> inputs = processor(images=image, return_tensors="pt") - - >>> image_features = model.get_image_features(**inputs) - >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) - ```""" - # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - vision_outputs = self.vision_model( - pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = vision_outputs[1] # pooled_output - image_features = self.visual_projection(pooled_output) - - return image_features - - @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - pixel_values: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - return_loss: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, ChineseCLIPOutput]: - r""" - Returns: - - Examples: - - ```python - >>> from PIL import Image - >>> import requests - >>> from transformers import AutoProcessor, ChineseCLIPModel - - >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") - - >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) - - >>> outputs = model(**inputs) - >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score - >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities - ```""" - # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - vision_outputs = self.vision_model( - pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - image_embeds = vision_outputs[1] - image_embeds = self.visual_projection(image_embeds) - - text_embeds = text_outputs[0][:, 0, :] - text_embeds = self.text_projection(text_embeds) - - # normalized features - image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) - text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) - - # cosine similarity as logits - logit_scale = self.logit_scale.exp() - logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale - logits_per_image = logits_per_text.t() - - loss = None - if return_loss: - loss = chinese_clip_loss(logits_per_text) - - if not return_dict: - # fix the None pooled_output of text_outputs to conform with dict_output - pooled_output = text_outputs[1] - if pooled_output is None: - text_outputs = (text_outputs[0],) + text_outputs[2:] - output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) - return ((loss,) + output) if loss is not None else output - - return ChineseCLIPOutput( - loss=loss, - logits_per_image=logits_per_image, - logits_per_text=logits_per_text, - text_embeds=text_embeds, - image_embeds=image_embeds, - text_model_output=text_outputs, - vision_model_output=vision_outputs, - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/flaubert/modeling_tf_flaubert.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/flaubert/modeling_tf_flaubert.py deleted file mode 100644 index 068119d35f1709e2ad4380e70ab14c38e5eb70b1..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/flaubert/modeling_tf_flaubert.py +++ /dev/null @@ -1,1213 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - TF 2.0 Flaubert model. -""" - - -from __future__ import annotations - -import itertools -import random -import warnings -from dataclasses import dataclass -from typing import Dict, Optional, Tuple, Union - -import numpy as np -import tensorflow as tf - -from ...activations_tf import get_tf_activation -from ...modeling_tf_outputs import ( - TFBaseModelOutput, - TFMultipleChoiceModelOutput, - TFQuestionAnsweringModelOutput, - TFSequenceClassifierOutput, - TFTokenClassifierOutput, -) -from ...modeling_tf_utils import ( - TFModelInputType, - TFMultipleChoiceLoss, - TFPreTrainedModel, - TFQuestionAnsweringLoss, - TFSequenceClassificationLoss, - TFSequenceSummary, - TFSharedEmbeddings, - TFTokenClassificationLoss, - get_initializer, - keras_serializable, - unpack_inputs, -) -from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax -from ...utils import ( - MULTIPLE_CHOICE_DUMMY_INPUTS, - ModelOutput, - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, -) -from .configuration_flaubert import FlaubertConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased" -_CONFIG_FOR_DOC = "FlaubertConfig" - -TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - # See all Flaubert models at https://huggingface.co/models?filter=flaubert -] - -FLAUBERT_START_DOCSTRING = r""" - - This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it - as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and - behavior. - - <Tip> - - TensorFlow models and layers in `transformers` accept two formats as input: - - - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional argument. - - The reason the second format is supported is that Keras methods prefer this format when passing inputs to models - and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just - pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second - format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with - the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first - positional argument: - - - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` - - Note that when creating models and layers with - [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry - about any of this, as you can just pass inputs like you would to any other Python function! - - </Tip> - - Parameters: - config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -FLAUBERT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and - [`PreTrainedTokenizer.encode`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - `1` for tokens that are **not masked**, - - `0` for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): - A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are - languages ids which can be obtained from the language names by using two conversion mappings provided in - the configuration of the model (only provided for multilingual models). More precisely, the *language name - to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the - *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). - - See usage examples detailed in the [multilingual documentation](../multilingual). - token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - `0` corresponds to a *sentence A* token, - - `1` corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*): - Length of each sentence that can be used to avoid performing attention on padding token indices. You can - also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in - `[0, ..., input_ids.size(-1)]`: - cache (`Dict[str, tf.Tensor]`, *optional*): - Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the - attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential - decoding. - - The dictionary object will be modified in-place during the forward pass to add newly computed - hidden-states. - head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - `1` indicates the head is **not masked**, - - `0` indicates the head is **masked**. - - inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the - config will be used instead. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. This argument can be used only in eager mode, in graph mode the value in the config will be - used instead. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in - eager mode, in graph mode the value will always be set to True. - training (`bool`, *optional*, defaults to `False`): - Whether or not to use the model in training mode (some modules like dropout modules have different - behaviors between training and evaluation). -""" - - -def get_masks(slen, lengths, causal, padding_mask=None): - """ - Generate hidden states mask, and optionally an attention mask. - """ - bs = shape_list(lengths)[0] - if padding_mask is not None: - mask = padding_mask - else: - # assert lengths.max().item() <= slen - alen = tf.range(slen, dtype=lengths.dtype) - mask = alen < tf.expand_dims(lengths, axis=1) - - # attention mask is the same as mask, or triangular inferior attention (causal) - if causal: - attn_mask = tf.less_equal( - tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) - ) - else: - attn_mask = mask - - # sanity check - # assert shape_list(mask) == [bs, slen] - tf.debugging.assert_equal(shape_list(mask), [bs, slen]) - if causal: - tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) - - return mask, attn_mask - - -class TFFlaubertPreTrainedModel(TFPreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = FlaubertConfig - base_model_prefix = "transformer" - - @property - def dummy_inputs(self): - # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed - inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32) - attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32) - if self.config.use_lang_emb and self.config.n_langs > 1: - return { - "input_ids": inputs_list, - "attention_mask": attns_list, - "langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32), - } - else: - return {"input_ids": inputs_list, "attention_mask": attns_list} - - -@add_start_docstrings( - "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.", - FLAUBERT_START_DOCSTRING, -) -class TFFlaubertModel(TFFlaubertPreTrainedModel): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.transformer = TFFlaubertMainLayer(config, name="transformer") - - @unpack_inputs - @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFBaseModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: np.ndarray | tf.Tensor | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[Tuple, TFBaseModelOutput]: - outputs = self.transformer( - input_ids=input_ids, - attention_mask=attention_mask, - langs=langs, - token_type_ids=token_type_ids, - position_ids=position_ids, - lengths=lengths, - cache=cache, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - return outputs - - -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert -class TFFlaubertMultiHeadAttention(tf.keras.layers.Layer): - NEW_ID = itertools.count() - - def __init__(self, n_heads, dim, config, **kwargs): - super().__init__(**kwargs) - self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID) - self.dim = dim - self.n_heads = n_heads - self.output_attentions = config.output_attentions - assert self.dim % self.n_heads == 0 - - self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin") - self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin") - self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin") - self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin") - self.dropout = tf.keras.layers.Dropout(config.attention_dropout) - self.pruned_heads = set() - - def prune_heads(self, heads): - raise NotImplementedError - - def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False): - """ - Self-attention (if kv is None) or attention over source sentence (provided by kv). - """ - # Input is (bs, qlen, dim) - # Mask is (bs, klen) (non-causal) or (bs, klen, klen) - bs, qlen, dim = shape_list(input) - - if kv is None: - klen = qlen if cache is None else cache["slen"] + qlen - else: - klen = shape_list(kv)[1] - - # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' - dim_per_head = self.dim // self.n_heads - mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen) - - def shape(x): - """projection""" - return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) - - def unshape(x): - """compute context""" - return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) - - q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head) - - if kv is None: - k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head) - v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head) - elif cache is None or self.layer_id not in cache: - k = v = kv - k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head) - v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head) - - if cache is not None: - if self.layer_id in cache: - if kv is None: - k_, v_ = cache[self.layer_id] - k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head) - v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head) - else: - k, v = cache[self.layer_id] - - cache[self.layer_id] = (k, v) - - f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype) - q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head) - k = tf.cast(k, dtype=q.dtype) - scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen) - mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen) - # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen) - mask = tf.cast(mask, dtype=scores.dtype) - scores = scores - 1e30 * (1.0 - mask) - weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen) - weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen) - - # Mask heads if we want to - if head_mask is not None: - weights = weights * head_mask - - context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) - context = unshape(context) # (bs, qlen, dim) - outputs = (self.out_lin(context),) - - if output_attentions: - outputs = outputs + (weights,) - - return outputs - - -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN -class TFFlaubertTransformerFFN(tf.keras.layers.Layer): - def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs): - super().__init__(**kwargs) - - self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1") - self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2") - self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu") - self.dropout = tf.keras.layers.Dropout(config.dropout) - - def call(self, input, training=False): - x = self.lin1(input) - x = self.act(x) - x = self.lin2(x) - x = self.dropout(x, training=training) - - return x - - -@keras_serializable -class TFFlaubertMainLayer(tf.keras.layers.Layer): - config_class = FlaubertConfig - - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - - self.config = config - self.n_heads = config.n_heads - self.n_langs = config.n_langs - self.dim = config.emb_dim - self.hidden_dim = self.dim * 4 - self.n_words = config.n_words - self.pad_index = config.pad_index - self.causal = config.causal - self.n_layers = config.n_layers - self.use_lang_emb = config.use_lang_emb - self.layerdrop = getattr(config, "layerdrop", 0.0) - self.pre_norm = getattr(config, "pre_norm", False) - self.output_attentions = config.output_attentions - self.output_hidden_states = config.output_hidden_states - self.return_dict = config.use_return_dict - self.max_position_embeddings = config.max_position_embeddings - self.embed_init_std = config.embed_init_std - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.embeddings = TFSharedEmbeddings( - self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings" - ) - self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb") - self.attentions = [] - self.layer_norm1 = [] - self.ffns = [] - self.layer_norm2 = [] - - for i in range(self.n_layers): - self.attentions.append( - TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}") - ) - self.layer_norm1.append( - tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}") - ) - # if self.is_decoder: - # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) - # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout)) - self.ffns.append( - TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}") - ) - self.layer_norm2.append( - tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}") - ) - - def build(self, input_shape): - with tf.name_scope("position_embeddings"): - self.position_embeddings = self.add_weight( - name="embeddings", - shape=[self.max_position_embeddings, self.dim], - initializer=get_initializer(self.embed_init_std), - ) - - if self.n_langs > 1 and self.use_lang_emb: - with tf.name_scope("lang_embeddings"): - self.lang_embeddings = self.add_weight( - name="embeddings", - shape=[self.n_langs, self.dim], - initializer=get_initializer(self.embed_init_std), - ) - - super().build(input_shape) - - def get_input_embeddings(self): - return self.embeddings - - def set_input_embeddings(self, value): - self.embeddings.weight = value - self.embeddings.vocab_size = shape_list(value)[0] - - @unpack_inputs - def call( - self, - input_ids: np.ndarray | tf.Tensor | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[Tuple, TFBaseModelOutput]: - # removed: src_enc=None, src_len=None - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - bs, slen = shape_list(input_ids) - elif inputs_embeds is not None: - bs, slen = shape_list(inputs_embeds)[:2] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if lengths is None: - if input_ids is not None: - lengths = tf.reduce_sum( - tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1 - ) - else: - lengths = tf.convert_to_tensor([slen] * bs) - # mask = input_ids != self.pad_index - - # check inputs - # assert shape_list(lengths)[0] == bs - tf.debugging.assert_equal( - shape_list(lengths)[0], bs - ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" - # assert lengths.max().item() <= slen - # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 - # assert (src_enc is None) == (src_len is None) - # if src_enc is not None: - # assert self.is_decoder - # assert src_enc.size(0) == bs - - # generate masks - mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) - # if self.is_decoder and src_enc is not None: - # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] - - # position_ids - if position_ids is None: - position_ids = tf.expand_dims(tf.range(slen), axis=0) - position_ids = tf.tile(position_ids, (bs, 1)) - - # assert shape_list(position_ids) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(position_ids), [bs, slen] - ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" - # position_ids = position_ids.transpose(0, 1) - - # langs - if langs is not None: - # assert shape_list(langs) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(langs), [bs, slen] - ), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched" - # langs = langs.transpose(0, 1) - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen] - if head_mask is not None: - raise NotImplementedError - else: - head_mask = [None] * self.n_layers - - # do not recompute cached elements - if cache is not None and input_ids is not None: - _slen = slen - cache["slen"] - input_ids = input_ids[:, -_slen:] - position_ids = position_ids[:, -_slen:] - if langs is not None: - langs = langs[:, -_slen:] - mask = mask[:, -_slen:] - attn_mask = attn_mask[:, -_slen:] - - # embeddings - if inputs_embeds is None: - check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size) - inputs_embeds = self.embeddings(input_ids) - - tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) - - if langs is not None and self.use_lang_emb: - tensor = tensor + tf.gather(self.lang_embeddings, langs) - if token_type_ids is not None: - tensor = tensor + self.embeddings(token_type_ids) - - tensor = self.layer_norm_emb(tensor) - tensor = self.dropout(tensor, training=training) - mask = tf.cast(mask, dtype=tensor.dtype) - tensor = tensor * tf.expand_dims(mask, axis=-1) - - # hidden_states and attentions cannot be None in graph mode. - hidden_states = () if output_hidden_states else None - attentions = () if output_attentions else None - - # transformer layers - for i in range(self.n_layers): - # LayerDrop - dropout_probability = random.uniform(0, 1) - - if training and (dropout_probability < self.layerdrop): - continue - - if output_hidden_states: - hidden_states = hidden_states + (tensor,) - - # self attention - if not self.pre_norm: - attn_outputs = self.attentions[i]( - tensor, - attn_mask, - None, - cache, - head_mask[i], - output_attentions, - training=training, - ) - attn = attn_outputs[0] - - if output_attentions: - attentions = attentions + (attn_outputs[1],) - - attn = self.dropout(attn, training=training) - tensor = tensor + attn - tensor = self.layer_norm1[i](tensor) - else: - tensor_normalized = self.layer_norm1[i](tensor) - attn_outputs = self.attentions[i]( - tensor_normalized, - attn_mask, - None, - cache, - head_mask[i], - output_attentions, - training=training, - ) - attn = attn_outputs[0] - - if output_attentions: - attentions = attentions + (attn_outputs[1],) - - attn = self.dropout(attn, training=training) - tensor = tensor + attn - - # encoder attention (for decoder only) - # if self.is_decoder and src_enc is not None: - # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache) - # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training) - # tensor = tensor + attn - # tensor = self.layer_norm15[i](tensor) - - # FFN - if not self.pre_norm: - tensor = tensor + self.ffns[i](tensor) - tensor = self.layer_norm2[i](tensor) - else: - tensor_normalized = self.layer_norm2[i](tensor) - tensor = tensor + self.ffns[i](tensor_normalized) - - tensor = tensor * tf.expand_dims(mask, axis=-1) - - # Add last hidden state - if output_hidden_states: - hidden_states = hidden_states + (tensor,) - - # update cache length - if cache is not None: - cache["slen"] += tensor.size(1) - - # move back sequence length to dimension 0 - # tensor = tensor.transpose(0, 1) - - if not return_dict: - return tuple(v for v in [tensor, hidden_states, attentions] if v is not None) - - return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) - - -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer -class TFFlaubertPredLayer(tf.keras.layers.Layer): - """ - Prediction layer (cross_entropy or adaptive_softmax). - """ - - def __init__(self, config, input_embeddings, **kwargs): - super().__init__(**kwargs) - - self.asm = config.asm - self.n_words = config.n_words - self.pad_index = config.pad_index - - if config.asm is False: - self.input_embeddings = input_embeddings - else: - raise NotImplementedError - # self.proj = nn.AdaptiveLogSoftmaxWithLoss( - # in_features=dim, - # n_classes=config.n_words, - # cutoffs=config.asm_cutoffs, - # div_value=config.asm_div_value, - # head_bias=True, # default is False - # ) - - def build(self, input_shape): - # The output weights are the same as the input embeddings, but there is an output-only bias for each token. - self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias") - - super().build(input_shape) - - def get_output_embeddings(self): - return self.input_embeddings - - def set_output_embeddings(self, value): - self.input_embeddings.weight = value - self.input_embeddings.vocab_size = shape_list(value)[0] - - def get_bias(self): - return {"bias": self.bias} - - def set_bias(self, value): - self.bias = value["bias"] - self.vocab_size = shape_list(value["bias"])[0] - - def call(self, hidden_states): - hidden_states = self.input_embeddings(hidden_states, mode="linear") - hidden_states = hidden_states + self.bias - - return hidden_states - - -@dataclass -class TFFlaubertWithLMHeadModelOutput(ModelOutput): - """ - Base class for [`TFFlaubertWithLMHeadModel`] outputs. - - Args: - logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - logits: tf.Tensor = None - hidden_states: Tuple[tf.Tensor] | None = None - attentions: Tuple[tf.Tensor] | None = None - - -@add_start_docstrings( - """ - The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input - embeddings). - """, - FLAUBERT_START_DOCSTRING, -) -class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.transformer = TFFlaubertMainLayer(config, name="transformer") - self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj") - # Flaubert does not have past caching features - self.supports_xla_generation = False - - def get_lm_head(self): - return self.pred_layer - - def get_prefix_bias_name(self): - warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) - return self.name + "/" + self.pred_layer.name - - def prepare_inputs_for_generation(self, inputs, **kwargs): - mask_token_id = self.config.mask_token_id - lang_id = self.config.lang_id - - effective_batch_size = inputs.shape[0] - mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id - inputs = tf.concat([inputs, mask_token], axis=1) - - if lang_id is not None: - langs = tf.ones_like(inputs) * lang_id - else: - langs = None - return {"input_ids": inputs, "langs": langs} - - @unpack_inputs - @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFFlaubertWithLMHeadModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: np.ndarray | tf.Tensor | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]: - transformer_outputs = self.transformer( - input_ids=input_ids, - attention_mask=attention_mask, - langs=langs, - token_type_ids=token_type_ids, - position_ids=position_ids, - lengths=lengths, - cache=cache, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - output = transformer_outputs[0] - outputs = self.pred_layer(output) - - if not return_dict: - return (outputs,) + transformer_outputs[1:] - - return TFFlaubertWithLMHeadModelOutput( - logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions - ) - - -@add_start_docstrings( - """ - Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) - e.g. for GLUE tasks. - """, - FLAUBERT_START_DOCSTRING, -) -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert -class TFFlaubertForSequenceClassification(TFFlaubertPreTrainedModel, TFSequenceClassificationLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.num_labels = config.num_labels - - self.transformer = TFFlaubertMainLayer(config, name="transformer") - self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") - - @unpack_inputs - @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFSequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: bool = False, - ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - transformer_outputs = self.transformer( - input_ids=input_ids, - attention_mask=attention_mask, - langs=langs, - token_type_ids=token_type_ids, - position_ids=position_ids, - lengths=lengths, - cache=cache, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - output = transformer_outputs[0] - - logits = self.sequence_summary(output) - - loss = None if labels is None else self.hf_compute_loss(labels, logits) - - if not return_dict: - output = (logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFSequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - -@add_start_docstrings( - """ - Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layer on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - FLAUBERT_START_DOCSTRING, -) -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert -class TFFlaubertForQuestionAnsweringSimple(TFFlaubertPreTrainedModel, TFQuestionAnsweringLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.transformer = TFFlaubertMainLayer(config, name="transformer") - self.qa_outputs = tf.keras.layers.Dense( - config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFQuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - start_positions: np.ndarray | tf.Tensor | None = None, - end_positions: np.ndarray | tf.Tensor | None = None, - training: bool = False, - ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: - r""" - start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - transformer_outputs = self.transformer( - input_ids=input_ids, - attention_mask=attention_mask, - langs=langs, - token_type_ids=token_type_ids, - position_ids=position_ids, - lengths=lengths, - cache=cache, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = transformer_outputs[0] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = tf.split(logits, 2, axis=-1) - start_logits = tf.squeeze(start_logits, axis=-1) - end_logits = tf.squeeze(end_logits, axis=-1) - - loss = None - if start_positions is not None and end_positions is not None: - labels = {"start_position": start_positions} - labels["end_position"] = end_positions - loss = self.hf_compute_loss(labels, (start_logits, end_logits)) - - if not return_dict: - output = (start_logits, end_logits) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFQuestionAnsweringModelOutput( - loss=loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - -@add_start_docstrings( - """ - Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for - Named-Entity-Recognition (NER) tasks. - """, - FLAUBERT_START_DOCSTRING, -) -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert -class TFFlaubertForTokenClassification(TFFlaubertPreTrainedModel, TFTokenClassificationLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.num_labels = config.num_labels - - self.transformer = TFFlaubertMainLayer(config, name="transformer") - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.classifier = tf.keras.layers.Dense( - config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFTokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: bool = False, - ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - transformer_outputs = self.transformer( - input_ids=input_ids, - attention_mask=attention_mask, - langs=langs, - token_type_ids=token_type_ids, - position_ids=position_ids, - lengths=lengths, - cache=cache, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = transformer_outputs[0] - - sequence_output = self.dropout(sequence_output, training=training) - logits = self.classifier(sequence_output) - - loss = None if labels is None else self.hf_compute_loss(labels, logits) - - if not return_dict: - output = (logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFTokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - -@add_start_docstrings( - """ - Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a - softmax) e.g. for RocStories/SWAG tasks. - """, - FLAUBERT_START_DOCSTRING, -) -# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert -class TFFlaubertForMultipleChoice(TFFlaubertPreTrainedModel, TFMultipleChoiceLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.transformer = TFFlaubertMainLayer(config, name="transformer") - self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") - self.logits_proj = tf.keras.layers.Dense( - 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" - ) - - @property - def dummy_inputs(self): - """ - Dummy inputs to build the network. - - Returns: - tf.Tensor with dummy inputs - """ - # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed - if self.config.use_lang_emb and self.config.n_langs > 1: - return { - "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), - "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), - } - else: - return { - "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), - } - - @unpack_inputs - @add_start_docstrings_to_model_forward( - FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") - ) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFMultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - langs: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - lengths: np.ndarray | tf.Tensor | None = None, - cache: Optional[Dict[str, tf.Tensor]] = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: bool = False, - ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: - if input_ids is not None: - num_choices = shape_list(input_ids)[1] - seq_length = shape_list(input_ids)[2] - else: - num_choices = shape_list(inputs_embeds)[1] - seq_length = shape_list(inputs_embeds)[2] - - flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None - flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None - flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None - flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None - flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None - flat_inputs_embeds = ( - tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) - if inputs_embeds is not None - else None - ) - - if lengths is not None: - logger.warning( - "The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the " - "attention mask instead.", - ) - lengths = None - - transformer_outputs = self.transformer( - flat_input_ids, - flat_attention_mask, - flat_langs, - flat_token_type_ids, - flat_position_ids, - lengths, - cache, - head_mask, - flat_inputs_embeds, - output_attentions, - output_hidden_states, - return_dict=return_dict, - training=training, - ) - output = transformer_outputs[0] - logits = self.sequence_summary(output) - logits = self.logits_proj(logits) - reshaped_logits = tf.reshape(logits, (-1, num_choices)) - - loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) - - if not return_dict: - output = (reshaped_logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFMultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/flask_api.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/flask_api.py deleted file mode 100644 index b3f1e06847b2711a8e5841a4c95375445470d2ee..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/flask_api.py +++ /dev/null @@ -1,60 +0,0 @@ -import io -import logging - -import soundfile -import torch -import torchaudio -from flask import Flask, request, send_file -from flask_cors import CORS - -from inference.infer_tool import Svc, RealTimeVC - -app = Flask(__name__) - -CORS(app) - -logging.getLogger('numba').setLevel(logging.WARNING) - - -@app.route("/voiceChangeModel", methods=["POST"]) -def voice_change_model(): - request_form = request.form - wave_file = request.files.get("sample", None) - # 变调信息 - f_pitch_change = float(request_form.get("fPitchChange", 0)) - # DAW所需的采样率 - daw_sample = int(float(request_form.get("sampleRate", 0))) - speaker_id = int(float(request_form.get("sSpeakId", 0))) - # http获得wav文件并转换 - input_wav_path = io.BytesIO(wave_file.read()) - - # 模型推理 - if raw_infer: - # out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0, - auto_predict_f0=False, noice_scale=0.4, f0_filter=False) - tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample) - else: - out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0, - auto_predict_f0=False, noice_scale=0.4, f0_filter=False) - tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample) - # 返回音频 - out_wav_path = io.BytesIO() - soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav") - out_wav_path.seek(0) - return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) - - -if __name__ == '__main__': - # 启用则为直接切片合成,False为交叉淡化方式 - # vst插件调整0.3-0.5s切片时间可以降低延迟,直接切片方法会有连接处爆音、交叉淡化会有轻微重叠声音 - # 自行选择能接受的方法,或将vst最大切片时间调整为1s,此处设为Ture,延迟大音质稳定一些 - raw_infer = True - # 每个模型和config是唯一对应的 - model_name = "logs/32k/G_174000-Copy1.pth" - config_name = "configs/config.json" - cluster_model_path = "logs/44k/kmeans_10000.pt" - svc_model = Svc(model_name, config_name, cluster_model_path=cluster_model_path) - svc = RealTimeVC() - # 此处与vst插件对应,不建议更改 - app.run(port=6842, host="0.0.0.0", debug=False, threaded=False) diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/hubert/hubert_model_onnx.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/hubert/hubert_model_onnx.py deleted file mode 100644 index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/hubert/hubert_model_onnx.py +++ /dev/null @@ -1,217 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - def forward(self, x): - return self.units(x) - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/soft_nms.py b/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/soft_nms.py deleted file mode 100644 index 6a5aae7c4261191b8e07e0fd25055d8917f7f97d..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/soft_nms.py +++ /dev/null @@ -1,177 +0,0 @@ -import torch - -from detectron2.structures import Boxes, RotatedBoxes, pairwise_iou, pairwise_iou_rotated - - -def soft_nms(boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold): - """ - Performs soft non-maximum suppression algorithm on axis aligned boxes - - Args: - boxes (Tensor[N, 5]): - boxes where NMS will be performed. They - are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format - scores (Tensor[N]): - scores for each one of the boxes - method (str): - one of ['gaussian', 'linear', 'hard'] - see paper for details. users encouraged not to use "hard", as this is the - same nms available elsewhere in detectron2 - gaussian_sigma (float): - parameter for Gaussian penalty function - linear_threshold (float): - iou threshold for applying linear decay. Nt from the paper - re-used as threshold for standard "hard" nms - prune_threshold (float): - boxes with scores below this threshold are pruned at each iteration. - Dramatically reduces computation time. Authors use values in [10e-4, 10e-2] - - Returns: - tuple(Tensor, Tensor): - [0]: int64 tensor with the indices of the elements that have been kept - by Soft NMS, sorted in decreasing order of scores - [1]: float tensor with the re-scored scores of the elements that were kept -""" - return _soft_nms( - Boxes, - pairwise_iou, - boxes, - scores, - method, - gaussian_sigma, - linear_threshold, - prune_threshold, - ) - - -def batched_soft_nms( - boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold -): - """ - Performs soft non-maximum suppression in a batched fashion. - - Each index value correspond to a category, and NMS - will not be applied between elements of different categories. - - Args: - boxes (Tensor[N, 4]): - boxes where NMS will be performed. They - are expected to be in (x1, y1, x2, y2) format - scores (Tensor[N]): - scores for each one of the boxes - idxs (Tensor[N]): - indices of the categories for each one of the boxes. - method (str): - one of ['gaussian', 'linear', 'hard'] - see paper for details. users encouraged not to use "hard", as this is the - same nms available elsewhere in detectron2 - gaussian_sigma (float): - parameter for Gaussian penalty function - linear_threshold (float): - iou threshold for applying linear decay. Nt from the paper - re-used as threshold for standard "hard" nms - prune_threshold (float): - boxes with scores below this threshold are pruned at each iteration. - Dramatically reduces computation time. Authors use values in [10e-4, 10e-2] - Returns: - tuple(Tensor, Tensor): - [0]: int64 tensor with the indices of the elements that have been kept - by Soft NMS, sorted in decreasing order of scores - [1]: float tensor with the re-scored scores of the elements that were kept - """ - if boxes.numel() == 0: - return ( - torch.empty((0,), dtype=torch.int64, device=boxes.device), - torch.empty((0,), dtype=torch.float32, device=scores.device), - ) - # strategy: in order to perform NMS independently per class. - # we add an offset to all the boxes. The offset is dependent - # only on the class idx, and is large enough so that boxes - # from different classes do not overlap - max_coordinate = boxes.max() - offsets = idxs.to(boxes) * (max_coordinate + 1) - boxes_for_nms = boxes + offsets[:, None] - return soft_nms( - boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold - ) - - -def _soft_nms( - box_class, - pairwise_iou_func, - boxes, - scores, - method, - gaussian_sigma, - linear_threshold, - prune_threshold, -): - """ - Soft non-max suppression algorithm. - - Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec] - (https://arxiv.org/abs/1704.04503) - - Args: - box_class (cls): one of Box, RotatedBoxes - pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated - boxes (Tensor[N, ?]): - boxes where NMS will be performed - if Boxes, in (x1, y1, x2, y2) format - if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format - scores (Tensor[N]): - scores for each one of the boxes - method (str): - one of ['gaussian', 'linear', 'hard'] - see paper for details. users encouraged not to use "hard", as this is the - same nms available elsewhere in detectron2 - gaussian_sigma (float): - parameter for Gaussian penalty function - linear_threshold (float): - iou threshold for applying linear decay. Nt from the paper - re-used as threshold for standard "hard" nms - prune_threshold (float): - boxes with scores below this threshold are pruned at each iteration. - Dramatically reduces computation time. Authors use values in [10e-4, 10e-2] - - Returns: - tuple(Tensor, Tensor): - [0]: int64 tensor with the indices of the elements that have been kept - by Soft NMS, sorted in decreasing order of scores - [1]: float tensor with the re-scored scores of the elements that were kept - """ - boxes = boxes.clone() - scores = scores.clone() - idxs = torch.arange(scores.size()[0]) - - idxs_out = [] - scores_out = [] - - while scores.numel() > 0: - top_idx = torch.argmax(scores) - idxs_out.append(idxs[top_idx].item()) - scores_out.append(scores[top_idx].item()) - - top_box = boxes[top_idx] - ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0] - - if method == "linear": - decay = torch.ones_like(ious) - decay_mask = ious > linear_threshold - decay[decay_mask] = 1 - ious[decay_mask] - elif method == "gaussian": - decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma) - elif method == "hard": # standard NMS - decay = (ious < linear_threshold).float() - else: - raise NotImplementedError("{} soft nms method not implemented.".format(method)) - - scores *= decay - keep = scores > prune_threshold - keep[top_idx] = False - - boxes = boxes[keep] - scores = scores[keep] - idxs = idxs[keep] - - return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py deleted file mode 100644 index 52c321f979726b8aa89ba34874bc6729a75b70b4..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py +++ /dev/null @@ -1,686 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import datetime -import itertools -import logging -import math -import operator -import os -import tempfile -import time -import warnings -from collections import Counter -import torch -from fvcore.common.checkpoint import Checkpointer -from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer -from fvcore.common.param_scheduler import ParamScheduler -from fvcore.common.timer import Timer -from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats - -import detectron2.utils.comm as comm -from detectron2.evaluation.testing import flatten_results_dict -from detectron2.solver import LRMultiplier -from detectron2.utils.events import EventStorage, EventWriter -from detectron2.utils.file_io import PathManager - -from .train_loop import HookBase - -__all__ = [ - "CallbackHook", - "IterationTimer", - "PeriodicWriter", - "PeriodicCheckpointer", - "BestCheckpointer", - "LRScheduler", - "AutogradProfiler", - "EvalHook", - "PreciseBN", - "TorchProfiler", - "TorchMemoryStats", -] - - -""" -Implement some common hooks. -""" - - -class CallbackHook(HookBase): - """ - Create a hook using callback functions provided by the user. - """ - - def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): - """ - Each argument is a function that takes one argument: the trainer. - """ - self._before_train = before_train - self._before_step = before_step - self._after_step = after_step - self._after_train = after_train - - def before_train(self): - if self._before_train: - self._before_train(self.trainer) - - def after_train(self): - if self._after_train: - self._after_train(self.trainer) - # The functions may be closures that hold reference to the trainer - # Therefore, delete them to avoid circular reference. - del self._before_train, self._after_train - del self._before_step, self._after_step - - def before_step(self): - if self._before_step: - self._before_step(self.trainer) - - def after_step(self): - if self._after_step: - self._after_step(self.trainer) - - -class IterationTimer(HookBase): - """ - Track the time spent for each iteration (each run_step call in the trainer). - Print a summary in the end of training. - - This hook uses the time between the call to its :meth:`before_step` - and :meth:`after_step` methods. - Under the convention that :meth:`before_step` of all hooks should only - take negligible amount of time, the :class:`IterationTimer` hook should be - placed at the beginning of the list of hooks to obtain accurate timing. - """ - - def __init__(self, warmup_iter=3): - """ - Args: - warmup_iter (int): the number of iterations at the beginning to exclude - from timing. - """ - self._warmup_iter = warmup_iter - self._step_timer = Timer() - self._start_time = time.perf_counter() - self._total_timer = Timer() - - def before_train(self): - self._start_time = time.perf_counter() - self._total_timer.reset() - self._total_timer.pause() - - def after_train(self): - logger = logging.getLogger(__name__) - total_time = time.perf_counter() - self._start_time - total_time_minus_hooks = self._total_timer.seconds() - hook_time = total_time - total_time_minus_hooks - - num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter - - if num_iter > 0 and total_time_minus_hooks > 0: - # Speed is meaningful only after warmup - # NOTE this format is parsed by grep in some scripts - logger.info( - "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( - num_iter, - str(datetime.timedelta(seconds=int(total_time_minus_hooks))), - total_time_minus_hooks / num_iter, - ) - ) - - logger.info( - "Total training time: {} ({} on hooks)".format( - str(datetime.timedelta(seconds=int(total_time))), - str(datetime.timedelta(seconds=int(hook_time))), - ) - ) - - def before_step(self): - self._step_timer.reset() - self._total_timer.resume() - - def after_step(self): - # +1 because we're in after_step, the current step is done - # but not yet counted - iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1 - if iter_done >= self._warmup_iter: - sec = self._step_timer.seconds() - self.trainer.storage.put_scalars(time=sec) - else: - self._start_time = time.perf_counter() - self._total_timer.reset() - - self._total_timer.pause() - - -class PeriodicWriter(HookBase): - """ - Write events to EventStorage (by calling ``writer.write()``) periodically. - - It is executed every ``period`` iterations and after the last iteration. - Note that ``period`` does not affect how data is smoothed by each writer. - """ - - def __init__(self, writers, period=20): - """ - Args: - writers (list[EventWriter]): a list of EventWriter objects - period (int): - """ - self._writers = writers - for w in writers: - assert isinstance(w, EventWriter), w - self._period = period - - def after_step(self): - if (self.trainer.iter + 1) % self._period == 0 or ( - self.trainer.iter == self.trainer.max_iter - 1 - ): - for writer in self._writers: - writer.write() - - def after_train(self): - for writer in self._writers: - # If any new data is found (e.g. produced by other after_train), - # write them before closing - writer.write() - writer.close() - - -class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): - """ - Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook. - - Note that when used as a hook, - it is unable to save additional data other than what's defined - by the given `checkpointer`. - - It is executed every ``period`` iterations and after the last iteration. - """ - - def before_train(self): - self.max_iter = self.trainer.max_iter - - def after_step(self): - # No way to use **kwargs - self.step(self.trainer.iter) - - -class BestCheckpointer(HookBase): - """ - Checkpoints best weights based off given metric. - - This hook should be used in conjunction to and executed after the hook - that produces the metric, e.g. `EvalHook`. - """ - - def __init__( - self, - eval_period: int, - checkpointer: Checkpointer, - val_metric: str, - mode: str = "max", - file_prefix: str = "model_best", - ) -> None: - """ - Args: - eval_period (int): the period `EvalHook` is set to run. - checkpointer: the checkpointer object used to save checkpoints. - val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50" - mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be - maximized or minimized, e.g. for "bbox/AP50" it should be "max" - file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best" - """ - self._logger = logging.getLogger(__name__) - self._period = eval_period - self._val_metric = val_metric - assert mode in [ - "max", - "min", - ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.' - if mode == "max": - self._compare = operator.gt - else: - self._compare = operator.lt - self._checkpointer = checkpointer - self._file_prefix = file_prefix - self.best_metric = None - self.best_iter = None - - def _update_best(self, val, iteration): - if math.isnan(val) or math.isinf(val): - return False - self.best_metric = val - self.best_iter = iteration - return True - - def _best_checking(self): - metric_tuple = self.trainer.storage.latest().get(self._val_metric) - if metric_tuple is None: - self._logger.warning( - f"Given val metric {self._val_metric} does not seem to be computed/stored." - "Will not be checkpointing based on it." - ) - return - else: - latest_metric, metric_iter = metric_tuple - - if self.best_metric is None: - if self._update_best(latest_metric, metric_iter): - additional_state = {"iteration": metric_iter} - self._checkpointer.save(f"{self._file_prefix}", **additional_state) - self._logger.info( - f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps" - ) - elif self._compare(latest_metric, self.best_metric): - additional_state = {"iteration": metric_iter} - self._checkpointer.save(f"{self._file_prefix}", **additional_state) - self._logger.info( - f"Saved best model as latest eval score for {self._val_metric} is " - f"{latest_metric:0.5f}, better than last best score " - f"{self.best_metric:0.5f} @ iteration {self.best_iter}." - ) - self._update_best(latest_metric, metric_iter) - else: - self._logger.info( - f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, " - f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}." - ) - - def after_step(self): - # same conditions as `EvalHook` - next_iter = self.trainer.iter + 1 - if ( - self._period > 0 - and next_iter % self._period == 0 - and next_iter != self.trainer.max_iter - ): - self._best_checking() - - def after_train(self): - # same conditions as `EvalHook` - if self.trainer.iter + 1 >= self.trainer.max_iter: - self._best_checking() - - -class LRScheduler(HookBase): - """ - A hook which executes a torch builtin LR scheduler and summarizes the LR. - It is executed after every iteration. - """ - - def __init__(self, optimizer=None, scheduler=None): - """ - Args: - optimizer (torch.optim.Optimizer): - scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler): - if a :class:`ParamScheduler` object, it defines the multiplier over the base LR - in the optimizer. - - If any argument is not given, will try to obtain it from the trainer. - """ - self._optimizer = optimizer - self._scheduler = scheduler - - def before_train(self): - self._optimizer = self._optimizer or self.trainer.optimizer - if isinstance(self.scheduler, ParamScheduler): - self._scheduler = LRMultiplier( - self._optimizer, - self.scheduler, - self.trainer.max_iter, - last_iter=self.trainer.iter - 1, - ) - self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer) - - @staticmethod - def get_best_param_group_id(optimizer): - # NOTE: some heuristics on what LR to summarize - # summarize the param group with most parameters - largest_group = max(len(g["params"]) for g in optimizer.param_groups) - - if largest_group == 1: - # If all groups have one parameter, - # then find the most common initial LR, and use it for summary - lr_count = Counter([g["lr"] for g in optimizer.param_groups]) - lr = lr_count.most_common()[0][0] - for i, g in enumerate(optimizer.param_groups): - if g["lr"] == lr: - return i - else: - for i, g in enumerate(optimizer.param_groups): - if len(g["params"]) == largest_group: - return i - - def after_step(self): - lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] - self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) - self.scheduler.step() - - @property - def scheduler(self): - return self._scheduler or self.trainer.scheduler - - def state_dict(self): - if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler): - return self.scheduler.state_dict() - return {} - - def load_state_dict(self, state_dict): - if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler): - logger = logging.getLogger(__name__) - logger.info("Loading scheduler from state_dict ...") - self.scheduler.load_state_dict(state_dict) - - -class TorchProfiler(HookBase): - """ - A hook which runs `torch.profiler.profile`. - - Examples: - :: - hooks.TorchProfiler( - lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR - ) - - The above example will run the profiler for iteration 10~20 and dump - results to ``OUTPUT_DIR``. We did not profile the first few iterations - because they are typically slower than the rest. - The result files can be loaded in the ``chrome://tracing`` page in chrome browser, - and the tensorboard visualizations can be visualized using - ``tensorboard --logdir OUTPUT_DIR/log`` - """ - - def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True): - """ - Args: - enable_predicate (callable[trainer -> bool]): a function which takes a trainer, - and returns whether to enable the profiler. - It will be called once every step, and can be used to select which steps to profile. - output_dir (str): the output directory to dump tracing files. - activities (iterable): same as in `torch.profiler.profile`. - save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/ - """ - self._enable_predicate = enable_predicate - self._activities = activities - self._output_dir = output_dir - self._save_tensorboard = save_tensorboard - - def before_step(self): - if self._enable_predicate(self.trainer): - if self._save_tensorboard: - on_trace_ready = torch.profiler.tensorboard_trace_handler( - os.path.join( - self._output_dir, - "log", - "profiler-tensorboard-iter{}".format(self.trainer.iter), - ), - f"worker{comm.get_rank()}", - ) - else: - on_trace_ready = None - self._profiler = torch.profiler.profile( - activities=self._activities, - on_trace_ready=on_trace_ready, - record_shapes=True, - profile_memory=True, - with_stack=True, - with_flops=True, - ) - self._profiler.__enter__() - else: - self._profiler = None - - def after_step(self): - if self._profiler is None: - return - self._profiler.__exit__(None, None, None) - if not self._save_tensorboard: - PathManager.mkdirs(self._output_dir) - out_file = os.path.join( - self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) - ) - if "://" not in out_file: - self._profiler.export_chrome_trace(out_file) - else: - # Support non-posix filesystems - with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d: - tmp_file = os.path.join(d, "tmp.json") - self._profiler.export_chrome_trace(tmp_file) - with open(tmp_file) as f: - content = f.read() - with PathManager.open(out_file, "w") as f: - f.write(content) - - -class AutogradProfiler(TorchProfiler): - """ - A hook which runs `torch.autograd.profiler.profile`. - - Examples: - :: - hooks.AutogradProfiler( - lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR - ) - - The above example will run the profiler for iteration 10~20 and dump - results to ``OUTPUT_DIR``. We did not profile the first few iterations - because they are typically slower than the rest. - The result files can be loaded in the ``chrome://tracing`` page in chrome browser. - - Note: - When used together with NCCL on older version of GPUs, - autograd profiler may cause deadlock because it unnecessarily allocates - memory on every device it sees. The memory management calls, if - interleaved with NCCL calls, lead to deadlock on GPUs that do not - support ``cudaLaunchCooperativeKernelMultiDevice``. - """ - - def __init__(self, enable_predicate, output_dir, *, use_cuda=True): - """ - Args: - enable_predicate (callable[trainer -> bool]): a function which takes a trainer, - and returns whether to enable the profiler. - It will be called once every step, and can be used to select which steps to profile. - output_dir (str): the output directory to dump tracing files. - use_cuda (bool): same as in `torch.autograd.profiler.profile`. - """ - warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.") - self._enable_predicate = enable_predicate - self._use_cuda = use_cuda - self._output_dir = output_dir - - def before_step(self): - if self._enable_predicate(self.trainer): - self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) - self._profiler.__enter__() - else: - self._profiler = None - - -class EvalHook(HookBase): - """ - Run an evaluation function periodically, and at the end of training. - - It is executed every ``eval_period`` iterations and after the last iteration. - """ - - def __init__(self, eval_period, eval_function): - """ - Args: - eval_period (int): the period to run `eval_function`. Set to 0 to - not evaluate periodically (but still after the last iteration). - eval_function (callable): a function which takes no arguments, and - returns a nested dict of evaluation metrics. - - Note: - This hook must be enabled in all or none workers. - If you would like only certain workers to perform evaluation, - give other workers a no-op function (`eval_function=lambda: None`). - """ - self._period = eval_period - self._func = eval_function - - def _do_eval(self): - results = self._func() - - if results: - assert isinstance( - results, dict - ), "Eval function must return a dict. Got {} instead.".format(results) - - flattened_results = flatten_results_dict(results) - for k, v in flattened_results.items(): - try: - v = float(v) - except Exception as e: - raise ValueError( - "[EvalHook] eval_function should return a nested dict of float. " - "Got '{}: {}' instead.".format(k, v) - ) from e - self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) - - # Evaluation may take different time among workers. - # A barrier make them start the next iteration together. - comm.synchronize() - - def after_step(self): - next_iter = self.trainer.iter + 1 - if self._period > 0 and next_iter % self._period == 0: - # do the last eval in after_train - if next_iter != self.trainer.max_iter: - self._do_eval() - - def after_train(self): - # This condition is to prevent the eval from running after a failed training - if self.trainer.iter + 1 >= self.trainer.max_iter: - self._do_eval() - # func is likely a closure that holds reference to the trainer - # therefore we clean it to avoid circular reference in the end - del self._func - - -class PreciseBN(HookBase): - """ - The standard implementation of BatchNorm uses EMA in inference, which is - sometimes suboptimal. - This class computes the true average of statistics rather than the moving average, - and put true averages to every BN layer in the given model. - - It is executed every ``period`` iterations and after the last iteration. - """ - - def __init__(self, period, model, data_loader, num_iter): - """ - Args: - period (int): the period this hook is run, or 0 to not run during training. - The hook will always run in the end of training. - model (nn.Module): a module whose all BN layers in training mode will be - updated by precise BN. - Note that user is responsible for ensuring the BN layers to be - updated are in training mode when this hook is triggered. - data_loader (iterable): it will produce data to be run by `model(data)`. - num_iter (int): number of iterations used to compute the precise - statistics. - """ - self._logger = logging.getLogger(__name__) - if len(get_bn_modules(model)) == 0: - self._logger.info( - "PreciseBN is disabled because model does not contain BN layers in training mode." - ) - self._disabled = True - return - - self._model = model - self._data_loader = data_loader - self._num_iter = num_iter - self._period = period - self._disabled = False - - self._data_iter = None - - def after_step(self): - next_iter = self.trainer.iter + 1 - is_final = next_iter == self.trainer.max_iter - if is_final or (self._period > 0 and next_iter % self._period == 0): - self.update_stats() - - def update_stats(self): - """ - Update the model with precise statistics. Users can manually call this method. - """ - if self._disabled: - return - - if self._data_iter is None: - self._data_iter = iter(self._data_loader) - - def data_loader(): - for num_iter in itertools.count(1): - if num_iter % 100 == 0: - self._logger.info( - "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) - ) - # This way we can reuse the same iterator - yield next(self._data_iter) - - with EventStorage(): # capture events in a new storage to discard them - self._logger.info( - "Running precise-BN for {} iterations... ".format(self._num_iter) - + "Note that this could produce different statistics every time." - ) - update_bn_stats(self._model, data_loader(), self._num_iter) - - -class TorchMemoryStats(HookBase): - """ - Writes pytorch's cuda memory statistics periodically. - """ - - def __init__(self, period=20, max_runs=10): - """ - Args: - period (int): Output stats each 'period' iterations - max_runs (int): Stop the logging after 'max_runs' - """ - - self._logger = logging.getLogger(__name__) - self._period = period - self._max_runs = max_runs - self._runs = 0 - - def after_step(self): - if self._runs > self._max_runs: - return - - if (self.trainer.iter + 1) % self._period == 0 or ( - self.trainer.iter == self.trainer.max_iter - 1 - ): - if torch.cuda.is_available(): - max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0 - reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0 - max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 - allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0 - - self._logger.info( - ( - " iter: {} " - " max_reserved_mem: {:.0f}MB " - " reserved_mem: {:.0f}MB " - " max_allocated_mem: {:.0f}MB " - " allocated_mem: {:.0f}MB " - ).format( - self.trainer.iter, - max_reserved_mb, - reserved_mb, - max_allocated_mb, - allocated_mb, - ) - ) - - self._runs += 1 - if self._runs == self._max_runs: - mem_summary = torch.cuda.memory_summary() - self._logger.info("\n" + mem_summary) - - torch.cuda.reset_peak_memory_stats() diff --git a/spaces/ysharma/Low-rank-Adaptation/setup.py b/spaces/ysharma/Low-rank-Adaptation/setup.py deleted file mode 100644 index 405cac7f0c9ac88e388ce0af216ee3dbe5f31fd6..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Low-rank-Adaptation/setup.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - -import pkg_resources -from setuptools import find_packages, setup - -setup( - name="lora_diffusion", - py_modules=["lora_diffusion"], - version="0.0.1", - description="Low Rank Adaptation for Diffusion Models. Works with Stable Diffusion out-of-the-box.", - author="Simo Ryu", - packages=find_packages(), - entry_points={ - "console_scripts": [ - "lora_add = lora_diffusion.cli_lora_add:main", - ], - }, - install_requires=[ - str(r) - for r in pkg_resources.parse_requirements( - open(os.path.join(os.path.dirname(__file__), "requirements.txt")) - ) - ], - include_package_data=True, -) diff --git a/spaces/yufiofficial/MusicGenQ/MODEL_CARD.md b/spaces/yufiofficial/MusicGenQ/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/yufiofficial/MusicGenQ/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/zeno-ml/translation-report/gpt-MT/evaluation/system-outputs/text-davinci-003/zeroshot/encs/test.en-cs.cs b/spaces/zeno-ml/translation-report/gpt-MT/evaluation/system-outputs/text-davinci-003/zeroshot/encs/test.en-cs.cs deleted file mode 100644 index 4fab7788509cea344d45eccdadf767e48cc13af8..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/translation-report/gpt-MT/evaluation/system-outputs/text-davinci-003/zeroshot/encs/test.en-cs.cs +++ /dev/null @@ -1,2037 +0,0 @@ -Pokud vás nenajdou, určitě zavolají. -Je však lepší, když jsou blízko vaší dodací adresy, můžete je kontaktovat místo toho. -Samco Sport vakuové hadice jsou čisté silikonové gumové hadice, které jsou k dispozici ve vnitřních průměrech (I.D) od 3 mm do 9 mm. -Konkrétně navrženo pro všechny válcové vákuumové trubky, sací hadice karburátoru, hadice ventilace nádrže na palivo, přebytečnou chladicí kapalinu a kontrolu emisí a může být použito pro hadice stěračů a izolaci drátů. -Pouze pro instalace s nízkým tlakem. -Samcoův vysavačový hadicí není navržen pro přepravu oleje, paliva nebo pro trvalý přenos stlačeného horké vody. -Nabízí neuvěřitelnou schopnost roztahování v průměru, což umožňuje hadici natáhnout na spoj pro dokonalé těsnění (tj. můžete natáhnout hadici s průměrem 3 mm na spoj s průměrem 5 mm). -Přidejte do své objednávky 5 dvojitých drátových svorek za pouhých 99p, perfektní pro upevnění hadice vysavače na místě! -S více než 12 let zkušeností s obchodováním s výkonnými silikonovými hadicemi Samco Sport po záruce jsme hrdí na to, že jsme světovým vedoucím distributorem specialistů na silikonové hadice pro motocykly. -S velkým skladem univerzálních dílů závodních vozidel se snažíme o 100% servis. -Samco Sport Vacuum hadice má celou řadu aplikací: kontrola emisí, přetečení chladiče, stěrače a je ideální pro náhradu hadice ventilu karburátoru na motokrosových a silničních aplikacích. -To je skvělý produkt a vhodný pro všechny kola, auta a komerční aplikace. -Přidejte do své objednávky 5 dvojitých drátových svorek za pouhých 99p, perfektní pro upevnění hadice vysavače na místě! -Není vhodné pro instalace vysokého tlaku vody nebo systémy teplé vody. -Tento hadicí není vhodný pro přenos oleje. -Proč vybrat silikonové hadice Samco Sport? -Ltd životní záruka, 2 roky pro aplikace pro paliva. -Jen se přihlaste do svého účtu a počkejte, až synchronizace dokončí, knihy se automaticky načtou do #PRS_ORG#. -To je vše. -Je tu něco jiného, s čím bych vám mohl pomoci? -Jsi tam? -Promiňte, z důvodu kvality musím tento chat uzavřít, pokud neobdržím odpověď do 2 minut. -Zavřu tento chat teď, protože nebyla obdržena žádná odpověď. -Rád budu nadále pomáhat prostřednictvím e-mailu nebo se můžete znovu obrátit na nás v pohodlnějším čase pro vás. -Odpojení bude vyžadovat, aby se ujistili, že jejich aplikace fungují na všech různých verzích iOS. -To není ani nutně pravda. -Jen stanovte horní hranici verzí iOS, které aplikace podporuje, a vydávejte aktualizace pouze pro zařízení s nejnovějšími kompatibilními verzemi iOS. -To je jak většina aplikací funguje teď. -Také, protože Apple může často vydávat nové verze iOS a macOS. -Není problém, že to stále není dost často? -Také to vytváří některé strašné UX. -Proč by uživatel musel pokaždé provádět *celou aktualizaci operačního systému*, i když Apple zvýšil rychlost aktualizací operačního systému kvůli opravám drobných chyb ve hrstce aplikací? -Co to vlastně znamená "vývojáři mohou být jisti, že jejich oprava/funkce bude v nové verzi vydána"? -To je v protikladu k Google. -Google musel odpojit, protože bylo ve volné přírodě mnoho verzí Androidu se velkým podílem na trhu. -Bez pochybností mohu říct, že kdyby verze Androidu na telefonech byly konzistentní jako iOS, Google by nikdy neudělal vydání OS pro tyto aplikace. -To je odvážné tvrzení, ale ať už je to jakkoli, stále to nevysvětluje, jak je seskupování aktualizací aplikací jako aktualizací operačního systému „lepší“ pro vývojáře nebo koncového uživatele. -Můžu vidět, že jste objednali z restaurace, která provádí vlastní doručení. -Oni přijali vaši objednávku, která je #PRS_ORG#. -Restaurace na vás volala a nemají položku, kterou jste objednali? -Německo říká, že čas pro dohodu o jaderném programu Íránu se krátí. -Německá zahraniční ministryně varovala v sobotu, že čas se krátí, aby se našel způsob, jak obnovit jadernou dohodu z roku 2015 mezi světovými mocnostmi a Íránem, po setkáních s jejími protějšky zemí G7. -Jednání v Rakousku se obnovila, aby se pokusila oživit jadernou dohodu, s oběma stranami se snažícími odhadnout předpoklady úspěchu po nejnovějších výměnách v přerušovaných jednáních. -Německá ministryně zahraničí Annalena Baerbock řekla novinářům ve městě Liverpool ve Velké Británii, kde se scházejí ministři zemí G7, že čas běží. -Ukázalo se v posledních dnech, že nemáme žádný pokrok. -Baerbock řekl, že Írán obnovil jednání s pozicí, která vrátila jednání o šest měsíců zpět. -Aktuální kolo jednání v Drážďanech následuje po přestávce pěti měsíců po volbě tvrdého protizápadníka Ebrahima Raisiho jako prezidenta Íránu. -Dříve američtí úředníci řekli, že ministr zahraničí Antony Blinken v pátek uspořádal "produktivní" schůzku se svými protějšky z Velké Británie, Německa a Francie, kde se diskutovalo o cestě vpřed pro jednání o Íránu. -Senior úředník ministerstva zahraničí řekl, že mezi zeměmi G7 proběhla „intenzivní“ konverzace, které byly jednotné ve svém postoji k jaderným jednáním. -Oficiální, který informoval novináře pod podmínkou anonymity, řekl: „Vyhlášení bude také silné v důležitosti vrácení Íránu ke stolu a že je možné dohodu uzavřít, ale čas se krátí, takže jsme v tom jednotní.“ -Oficiální představitel dále uvedl, že americký speciální vyslanec pro Írán Robert Malley se vrací do Vídně na jednání. -Íránští úředníci dříve řekli, že se drží své tvrdé pozice. -Podle původního jaderné dohody, opuštěné v roce 2018 tehdejším prezidentem Donaldem Trumpem, omezilo Írán svůj jaderný program výměnou za uvolnění amerických, evropských a OSN sankcí. -Západ se obává, že by program mohl být použit k vývoji zbraní, což Teherán popírá. -Raisi řekl v sobotu, že Teherán je vážný ve svých jaderných jednáních v Vídni, podle oficiálního zpravodajského úřadu IRNA. -Nepřímé americko-íránské rozhovory, ve kterých diplomaté z Francie, Velké Británie, Německa, Ruska a Číny přepravují mezi nimi, protože Teherán odmítá přímý kontakt s Washingtonem, mají za cíl obě strany přimět k obnovení plného dodržování dohody. -G7 setkání, které se očekává, že vyústí ve společné volání po tom, aby Írán zmírnil svůj jaderný program a využil příležitosti vídeňských jednání. -Ze které země doručujete? -Kdy je balíček s námi? -Mají mnoho díků za jejich pomoc. -Děkuji vám za to, že jste si dnes udělali čas na rozhovor se mnou a doufám, že jsem dokázal vyřešit váš dotaz. Pokud byste nevadilo, abyste hodnotili naši dnešní konverzaci na základě mých zákaznických dovedností, byl bych vám velmi vděčný. Tlačítko pro hodnocení najdete v tomto chatu. -Děkuji za informace. -Budu více než šťastný, abych vám pomohl. -Zkontroluji váš účet, prosím počkejte chvíli. -Děkuji za vaši čekací dobu, zkontroloval jsem informace do vašeho účtu. -Je mi opravdu líto, že máte s vaším elektronickým knihou takový problém, ale jsem ochoten vám pomoci. -Czech: Podělím se o pár kroků, které je třeba provést ve vašem čtečce, ano? -Užívám si články jako tento, které pomáhají rozplést složitou síť sociopatických megalomanských mužů, které můj otec vychvaloval do nevolnosti, a množství vůdců, které odsoudil. -Udeřte na to, kde Nixon a Carter padli, a jeho zlatý chlapec Reagan nemohl udělat žádné zlo. -Dlouho jsem věděl, že tento světový pohled je úplný nesmysl a ve skutečnosti nenávidím každého z těch megalomanů od Caesara přes Bonaparta, Nixona, Reagana, Bushe a Trumpa. -Můžu ocenit historický význam Cézara nebo Napoleona, ale jejich sanitované historie s téměř posvátnou povahou mě od nich později odradily. -Dodnes odmítám studovat historii Polska, protože by to jen umožnilo paranoidní konspirace mého otce vyplout na povrch. -Vracíme-li se k tomuto článku, miluji ty malé detaily, které vás připomínají, že ve skutečnosti existovala jedna dobrá volba (navzdory menším nedostatkům - ale většinou jednající v dobré víře) a jedna strana, která nebyla dobrá, nejednala v dobré víře a kde zlomyslnost byla a je pravděpodobnějším vysvětlením než hloupost. -To je věc. -Republikáni rádi se schovávají za hloupost místo toho, aby přiznali zločinnost, ale nebuďte naivní, pokud je ideologický základ strany pod útokem. -Pak náhody, náhody, atd. vzácně existují. -Mentální obklíčení znamená, že každá akce musí mít smysl, nebo jinak vynakládáte omezenou energii na zbytečné činy. -Republikáni rádi schovávají za naše složitější pochopení světa a snaží se vrátit různé filozofické břitvy. -Proto jsme připraveni vám pomoci s jakýmikoli otázkami nebo obavami, které máte před objednáním nebo po obdržení vaší objednávky. -Prosím, kontaktujte nás prostřednictvím zpráv eBay a člen našeho týmu se vám co nejdříve ozve. -Prosím, vezměte na vědomí: Naše otevírací doba je pondělí až pátek od 09:00 do 17:30. -Kancelář zůstává o víkendu zavřená. -Na dny, kdy je kancelář zavřená, nebudeme moci odeslat vaše objednávky. -Také všechny objednávky zaslané o víkendu budou odeslány během následujících pracovních dnů. -Naším cílem je nabídnout našim zákazníkům nejlepší možnou službu. -Proč odesíláme naše objednávky během 1 pracovního dne po úhradě. -Dopravní služby, které nabízíme, jsou standardní sledovaná pošta (2-3 pracovní dny), první třída služby a také expresní služba. -Prosím, vezměte na vědomí, že během svátků (např. Vánoce) může dojít k mírným zpožděním od kurýrní služby. -Vrácení musí být do 30 dnů od doručení ve stejném stavu, v jakém byly odeslány. -Prosím, kontaktujte nás prostřednictvím zpráv eBay ohledně vašeho vrácení. -Prosím, uveďte své uživatelské jméno na eBay a důvod pro vrácení na poznámku do balíčku, abyste urychlili proces vrácení peněz nebo výměny. -Vrátíme náklady na dopravu, pokud jsou zboží vadné, ale pro všechny ostatní vrácení toto neplatí. -To je jiný problém. -Postižení lidé v Americe prostě nejsou správně zacházeno, konec. -Nemá to nic společného s příjmem nebo žít samotným. -Služby a úvahy pro postižené (stejně jako pro chudé) nejsou ani zdaleka tam, kde by měly být. -Zacházíme se zdravotně postiženými jako s odpadem. -Zacházíme se chudými jako s odpadem. -Všichni v USA by se měli stydět. -Naše společnost potřebuje více cenit lidský život. -Pokud bychom to udělali, viděli bychom, jak tyto masové střelby klesají. -Viděli bychom méně dopravních nehod a úmrtí. -Zdravotní péče a péče o děti by byly dostupné a mnohem snazší přístup, atd. -Bohužel se americká společnost "smířila" s údaji o útrapách, smrti a dalších obětech jako s "způsobem života"...výměnou za "svobodu" nebo něco takového. -Vidím tvůj komentář o tom, že jsi postižený a nedostáváš podporu, jako další příklad toho, jak Amerika prostě nepodporuje lidi. -Je to ve stejném duchu jako bod autora, ne konflikt. -Alamo na místě v Kalifornii mi provedlo podobný podvod. -Když jsem vrátil auto, agent našel škrábance pod autem (které jsem nezpůsobil). -Musel jsem podepsat, abych potvrdil "škodu". -Také jsem měl videa a fotografie, které nezahrnovaly spodek auta. -Po návratu domů jsem po několika týdnech obdržel dopis, ve kterém byly uvedeny další účtované škody, včetně škrábanců na dveřích, které údajně vyžadovaly přetření zadní části auta několik dní po vrácení pronájmu. -Žádné z těch škod nebylo viditelné pro mě (ani pro agenta, když jsem vracel vozidlo). -Žádné z toho nebylo viditelné na fotografiích, které jsem pořídil, když jsem vracel auto, takže jsem namítal nárok. -Oni odmítli spory a vyžadovali okamžitou platbu za škodu. -Odjel jsem na služební cestu, takže jsem předal své fotografie právnímu oddělení. -Obdržel jsem později dopis od Alama, ve kterém uvedli, že ve zájmu spokojenosti zákazníka odpustí poplatky. -Kdybych byl sám, určitě bych skončil platit účet za škodu, o které jsem si jistý, že se nestala, zatímco auto bylo ve mé péči. -Rusko varovalo před „důsledky“, pokud by Ukrajina útočila. -Skupina sedmi varovala Rusko před masivními důsledky a vážnými náklady, pokud prezident Vladimir Putin napadne Ukrajinu, podle návrhu prohlášení. -Americká zpravodajská služba odhaduje, že Rusko by mohlo plánovat vícefrontový útok na Ukrajinu již příští rok, zahrnující až 175 000 vojáků. -Kreml popírá, že plánuje invazi a říká, že Západ je ovládnut rusofobií. -Moskva říká, že rozšíření NATO ohrožuje Rusko a porušilo záruky, které mu byly dány, když Sovětský svaz v roce 1991 klesl. -Na schůzce ve severní anglickém městě Liverpoolu řekli zástupci G7, že jsou jednotní ve svém odsouzení ruského vojenského nárůstu u Ukrajiny a vyzvali Moskvu, aby deeskalovala. -Rusko by nemělo mít žádné pochybnosti, že další vojenská agrese proti Ukrajině by měla obrovské důsledky a vážné náklady, uvedlo se ve zprávě, kterou potvrdily zdroje G7. -„Potvrzujeme neochvějnou závazek k suverenitě a územní celistvosti Ukrajiny, stejně jako právo každého suverénního státu určit si svou vlastní budoucnost,“ uvádí návrh. -Pro Moskvu je rostoucí přijetí NATO sousedního bývalého sovětského státu a to, co vidí jako noční můru možnosti aliančních střel v Ukrajině cílených proti Rusku, červenou linií, kterou nedovolí překročit. -Pan Putin požaduje právně závazné bezpečnostní záruky, že NATO se nebude dále rozšiřovat na východ nebo umísťovat své zbraně blízko ruského území; Washington opakovaně řekl, že žádná země nemůže vetovat naděje Ukrajiny na NATO. -V roce 2014 Rusko obsadilo černomořskou poloostrov Krym od Ukrajiny, co vyvolalo na Západě uvalení sankcí na Rusko. -Kreml dnes řekl, že pan Putin řekl americkému prezidentovi Joe Bidenovi, že ruské jednotky neohrožují a že Moskva je démonizována za přesun jednotek po svém vlastním území. -Kremelský mluvčí Dmitry Peskov řekl, že mezi Ruskem a Spojenými státy existují velmi vážné konceptuální rozdíly ohledně Moskvy "červených linií". -G7 se skládá z Británie, Francie, Německa, Itálie, Japonska, Kanady a Spojených států a zahrnuje zástupce Evropské unie. -G7 ve znění návrhu uvedlo: „Vyzýváme Rusko, aby uklidnilo situaci, využilo diplomatické kanály a dodržovalo své mezinárodní závazky týkající se průhlednosti vojenských aktivit.“ -"Potvrzujeme naši podporu úsilí Francie a Německa ve formátu Normandie k dosažení plného provedení dohod Minsk k řešení konfliktu na východě Ukrajiny," uvádí se ve znění návrhu. -Čína Xi a Rusko Putin dominují G7. -Papež vyzývá k „vážnému mezinárodnímu dialogu“ k uklidnění napětí v Ukrajině. -Papež František ve svých prvních komentářích k napětí mezi Západem a Ruskem kvůli Ukrajině dnes vyzval k vážnému mezinárodnímu dialogu k řešení napětí a vyzval obě strany, aby se vyhnuly ozbrojenému konfliktu. -Řekl, že se modlí za "milou Ukrajinu, pro všechny její církve a náboženské společenství a pro všechny její lidi, aby napětí tam bylo vyřešeno vážným mezinárodním dialogem a ne zbraněmi." -Zbraně nejsou cesta, kterou bychom měli jít. -Papež řekl tisícům lidí na náměstí Svatého Petra během svého poledního požehnání a projevu: "Ať tento Vánoce přináší mír Ukrajině." -Ukrajina je převážně pravoslavná, s katolíky latinského nebo byzantského obřadu tvořícími asi 10% populace bývalého sovětského republiky. -Separátně řekl Biden Putinovi, že Rusko by zaplatilo "strašnou cenu" a čelilo devastujícím ekonomickým důsledkům, pokud by došlo k invazi Ukrajiny. -Dovolte mi okamžik, abych pro vás hledal. -V tuto chvíli se zdá, že nemáme žádné další kusy, zkontroluji, kdy očekáváme více. -Bohužel se nezdá, že by byly nějaké budoucí plány na výrobu jednotlivých sekcí. -Můžu vám ještě nějak jinak pomoci? -Nejsem seznámený s Teleloadingem. -Pokud však chcete otevřít nedávno zakoupenou knihu od #PRS_ORG# ve vašem čtečce #PRS_ORG#, stačí synchronizovat čtečku přes WiFi a stáhnout knihu do čtečky, abyste mohli začít číst. Přenos přes počítač nebo e-mail není nutný. -Pokud kniha po synchronizaci stále nemůže být otevřena ve vašem čtečce, můžeme zkusit nějaký postup pro odstraňování potíží. -Potřeboval bych vědět, zda kniha zobrazuje chybovou zprávu, zdá se být blokována nebo dokonce nezobrazuje ve vašem účtu #PRS_ORG# ve vašem čtečce #PRS_ORG#. -Bylo to před rokem 2018. -Zůstal jsem uvězněný u US Air v Philadelphii místo toho, aby mě odvezli až do Newarku, a já a dalších dvanáct lidí se snažilo sehnat auta pozdě večer. -Lidé na pokladně byli nejhorší, jaké jsem kdy viděl. -Fronta venku a oni si dávali přestávky a mluvili o náhodných věcech, které se netýkaly práce, jako bychom ani nebyli tady. -Měl jsem potvrzenou rezervaci. -Po hodině čekání jsem jí řekl, co jsem si rezervoval, a ona na mě hlasitě vyčítala, že jí lžu, a vyčetla mi to. -Nakonec jsem to vzdal a šel jsem do Hertz, kteří mi účtovali majlant, ale okamžitě mi dali auto. -Slíbil jsem, že už nikdy nebudu používat Avis. -Nejhorší zkušenost s autem vůbec. -National & Hertz byly pro mě vždy dobré zkušenosti. -Ti dva, následovaní Enterprise. -Enterprise nebylo vůbec špatné, ale nikdy nebylo tak pohodlné jako National, kde jsem mohl jít a vybrat si auto a odjet bez čekání na přepážce navěky. -Vím, že to jsou anekdotické zkušenosti, ale budu se snažit všem říct, aby se vyhnuli Avis jako čert kříži. -Je pravda, že dobrá zákaznická služba udrží zákazníky loajálními a špatná zákaznická zkušenost odradí desetkrát více možností zákazníků. -Opravování vašeho účtu #PRS_ORG# na čtečce knih. -Jdi na svou domovskou obrazovku. -Klepněte na -Více ikon na dolní straně obrazovky. -Nastavení. -Zvolte informace o zařízení. -Vedle "Opravit váš účet #PRS_ORG#", klepněte na "Opravit". -Opravit nyní. -Proces opravy účtu začne. -Pokud máte hodně knih, může to chvíli trvat. -Pokud oprava vašeho účtu nevyřešila problém: -Odhlásit se a znovu se přihlásit do svého čtečky knih. -Jdi na svou domovskou obrazovku. -Klepněte na -Více ikon na dolní straně obrazovky. -Nastavení. -Účty. -Pod #PRS_ORG#, klepněte na Odhlásit se. -Potvrzovací obrazovka se objeví. -Tapněte Odhlásit se. -Po odhlášení postupujte podle pokynů na obrazovce pro nastavení vašeho čtečky elektronických knih. -Pak aktualizujte slovník. -Omlouvám se za to, musíme získat povolení od držitele účtu, abychom mohli diskutovat o objednávce s jinou osobou. Omlouvám se, pokud to bylo již dříve provedeno, ale bez povolení držitele účtu bych nemohl o tomto diskutovat s vámi. -Víš něco. -Rozumím, o čem Dave mluvil. -Je horší zabíjet černé lidi, než se smát trans lidem. -A samozřejmě to je pravda. -Ale Dave něco zapomněl. -Mnoho lidí, kteří nenávidí trans lidi, také nenávidí černé lidi. -On nikoho nepřitáhl k #blacklivesmatter. -On právě dal transofobům dalšího hrdinu a více antitrans rétoriky. -On dal důvěryhodnost transofobii. -A vzhledem k tomu, že nejzranitelnější trans lidé jsou trans ženy barvy pleti, učinil je cílem pro násilí. -On opustil svůj výstup, protože si uvědomil, že bílí lidé se smáli ZA ním, ne S ním. -Jak si neuvědomil, že udělal přesně to samé trans lidem, je velmi smutné. -Ano, to znamená, že když cvičím, vůbec mi nezáleží na tom, kolik kalorií to spálí, a nezměním si čísla nebo makroživiny kvůli tomu, kolik jsem spálil. -Snažím se držet se 1200-1300. -Ale když jsem extra hladový, ano, sním něco navíc, abych nastartoval své tělo a přijmu, že hubnutí může být o den pomalejší, nebo ne. -Pokud už držíte 500 kalorickou redukci, další kousek steaku nebo dokonce chleba po náročném tréninku vůbec nezničí váš pokrok. -Mohlo by to jen zúžit váš deficit jeden den. -Další kousek pizzy nebo miska zmrzliny? -To nejde. -Pokud vždy potřebujete jíst více kvůli cvičení, zvažte, že na začátku nebudete omezovat tolik kalorií. -Možná začít s 300 dluhem. -Doufám, že to pomůže! -Víš, co ti rozumím. -Chceme, abyste měli svůj objednávku od nás. -Jako zdvořilost k vaší první objednávce zpracujeme plnou částku kreditu na tuto objednávku, takže můžete použít tento kredit k objednání správného oddělení. -Ach, jsem tak rád, že jsi se zeptal, mám o tom hodně co říct. -Ano, byla tu docela drastická změna. -Jsem obézní/tlustý od tří let, takže žít takto je vše, co jsem kdy znal, až do 30 let. -Většina mých rodin a přátel, kteří mě znali předtím, se ke mně chová stejně a jsou SO. -Čert to vem. -Podpůrný. -Mám jen několik rodinných vztahů, které byly od počátku napjaté, a zdá se, že můj úbytek hmotnosti zhoršil existující problémy. -To může být výsledkem jejich komplexů nebo mých, protože si myslím, že jsou zvyklí na to, že se na mě mohou vykašlat, a já zase jsem o to méně ochotný jejich hovna brát. -Jedna osoba se zvláštním způsobem snažila přivlastnit si mou ztrátu hmotnosti. -Základně, naznačili, že byli hnací silou, která mě sem dostala, pod záminkou podpory, když ve skutečnosti ani nevěděli, že dělám tyto změny, dokud jsem už neprošel RNY a neztratil více než 100 liber. -Ve skutečnosti byli poslední, kteří to věděli, úmyslně, protože jsem jim prostě nechtěl dovolovat, aby se snažili ovládat a šikanovat mě, jako jsem to dřív nechávala. -Nepřekvapivě, teď se začali urážet i mé další rysy, jako říkat, že mám nos a čelo příliš velké od té doby, co jsem zhubla, a že potřebuji operaci nosu a ofinu, aby to napravili - to je typické chování od nich. -Na začátku mi tyto věci posílali soukromou zprávou, ale když jsem neodpověděl, začali o tom veřejně komentovat na sociálních médiích bez stydlivosti. -Když jsem byl větší, to by mě zničilo a já bych to poslouchal, ale teď to jen ignoruji. -Naštěstí je moje kůže teď silnější (nejen proto, že je teď nadbytečná). -Nejdivnější pro mě je pozornost od cizích lidí. -Když jsem byl větší, lidé mi vůbec nevěnovali pozornost. -Jako, téměř žádný kontakt očima. -Žádné pozdravování ani usmívání se na mě, když jsme se míjeli na ulici, pokud mě neznali. -Určitě žádné vycházení z cesty, aby mi pomohli nebo mě pochválili. -Bylo to více izolující, než jsem si uvědomil, protože to bylo to, na co jsem byl zvyklý. -Věděl jsem, že lidé mohou být při posuzování mé velikosti přísní - s mnoha z nich to dělají otevřeně - ale nikdy jsem si neuvědomil, dokud jsem nezhubl, mikroúroveň toho a jen jak subtilní to může být. -Nejenže jsem o tom nebyl vědom, jen protože jsem na to byl zvyklý, ale myslím si, že ani ti, kteří to podporují, nejsou aktivně vědomi toho, co dělají. -Skutečně věřím, že je to podvědomá předsudek, vychovávaný a zesílený zobrazováním a zacházením s obézními lidmi v médiích, který si mnozí lidé ani neuvědomují, že projektují. -Teď se cítím, jako by se na mě všichni všude dívali, usmívali se na mě, mluvili se mnou atd. -Oba muži i ženy se se mnou zacházejí jinak, dělají více úsilí, aby se se mnou bavili/znali - a to jen platonicky. -Romanticky se můj výběr dat rozšířil od mála, kteří byli ochotni být viděni se mnou, až po to, co se cítí jako... všichni lol. -Je to ohromující. -Předpokládala jsem, že alespoň fakt, že jsem byla morbidně obézní, zhubla jsem všechnu tu váhu a teď mám přebytečnou kůži, by odradilo některé lidi, ale navzdory tomu, že jsem dala svou ztrátu váhy a přebytečnou kůži na první místo (protože to nechci být tajemství), to nikoho z mých zkušeností nezneklidnilo/neodradilo. -Zdá se, že to udělalo přesný opak a dokonce je to více zajímalo. -Obrovské šoku pro mě. -Musím tu dát malou veřejnou výzvu mužům, kteří začali mluvit/chodit s ženou, která zhubla: komentáře jako "Jsem tak šťastný, že jsi zhubla, ještě jsi si neuvědomila, jaká jsi krásná" NEJSOU cestou, jak jít. -Slyšel jsem nějakou variaci tohoto vícekrát, než bych si přál, a všichni si mysleli, že je to kompliment. -Říkal jsem ti to... -Obchod, ve kterém jsem pracoval, procházel úplnou a kompletní reorganizací. -Chodby se měnily a všichni jsme se učili, kde je všechno. -Samozřejmě to byl chaos. -V té době, kdy se to stalo, jsme byli docela zaneprázdněni a měl jsem frontu zákazníků, ale Karen se rozhodla přeskočit frontu a zeptat se mě, kde něco je. -Nezapamatoval jsem si přesnou položku, ale bylo to něco jako papírové talíře (nebo něco, co by bylo blízko nim...plastové vidličky? -Slámky? -Od té doby, co jsem měl frontu zákazníků, nemohl jsem odejít, abych jí pomohl najít, tak jsem jí řekl: "Myslím, že jsou teď na příčce 7." -Než se stihnu ani dostat k mému walkie-talkie, abych se na něco zeptal, ona se rozutíká. -Jen aby se o pár minut později vrátili a řekli mi, že tam nejsou. -Manažer je nyní blízko, tak se ho ptám, jestli jí může pomoci, a říkám mu, že jsem si myslel, že jsou na 7, ale ona říkala, že nejsou. -On vypadá zmateně a říká: "OK, možná jsou na 8." -Pomůžu vám je najít, paní. -Oni se chystají odejít, ona se otočí ke mně a říká: "Měl bys vědět lépe, než někomu říkat, kde něco je, když to ve skutečnosti nevíš." -Dlouhý příběh krátce, vrací se k pokladně, ale jde do jiného řádku. -Jak se manažer vrátil, sklonil se ke mně a šeptal mi: "Byli na příčce 7, jak jsi jí řekl." -HA....říkal jsem ti! -Tento rok trend pro druhý vánoční strom v ložnici posílá prodeje menších smrčků vzhůru. -Máte jen jednu vánoční strom? -Pak bys mohl být pozadu. -Tento rok je trendem druhý strom do ložnice a to vedlo ke zvýšení prodeje menších smrčků. -Odborníci říkají, že více než čtvrtina britských domácností nyní má dva vánoční smrky - a může to být více než symbol statutu. -Říká se, že uklidňující zelená barva a aroma borovice jsou dobré pro duševní zdraví a spánkové cykly - zatímco i umělé stromy mohou pomoci vyvolat pocit nostalgie. -Jiné rodiny dostávají dva stromy, takže děti mohou jeden z nich ozdobit tak nápaditě, jak chtějí, a poskytnout tak místo pro všechny domácí díla, zatímco druhý, více ozdobený smrk, je více viditelný a imponuje sousedům. -Mezi těmi, kteří se přidávají k trendu, který začal v USA, je Carole Middleton, matka Kate Middletonové, která má ve svém domě v Bucklebury, West Berkshire, druhý strom pro vnoučata George, Louis a Charlotte. -Minulý týden napsala na Instagramu: "Letos zase plánujeme mít dva vánoční stromy: jeden pro děti na zdobení a jeden, který si zdobím sama." -Britské zahradní centra uvedla, že prodeje menších stromů se letos ve svých 58 prodejnách zvýšily o 50 %. -Ředitel Boyd Douglas-Davies, který je také prezidentem Asociace obchodu s květinářstvím, řekl: „Lidé obměňují rostlinu v ložnici a dávají tam krásně zdobené stromy.“ -Řetězec zahradnických center Squire's hlásí, že 30 % jejich zákazníků plánuje mít alespoň dva stromy - a více než desetina z nich má v úmyslu mít tři nebo více. -Předsedkyně Sarah Squire řekla: "Dávají ložnici krásnou, uklidňující vůni, která je dokonalou pomůckou pro dobrý spánek." -Prý stromy také pomáhají k lepšímu spánku, stejně jako rostliny v ložnici, které jsou známé pro svůj pozitivní vliv na psychiku a čistí vzduch. -Spánkový odborník Carl Walsh řekl: "Naše mozky shromažďují informace z našeho okolí a to se překládá do signálů, které uvolňují hormony v reakci." -V tomto případě jsou to hormony melatonin a kortizol, které řídí váš spánkový cyklus a uvedou váš organismus do spánkového stavu. -On dodal, že strom v ložnici může také přenést lidi zpět do více bezstarostného a mladistvého období. -Vánoce mohou být docela stresující čas. -Strom ve ložnici lidi vrací do jejich dětství, kdy neměli žádné zodpovědnosti a mohli zapomenout na stresující věci. -To je vždy dobré na spaní. -Mějte skvělý den! -Děkuji, že jste si dnes udělali čas na rozhovor se mnou. -Jakmile tento chat skončí, obdržíte e-mail s hodnocením chatu. -Prosím, vyplňte to, pokud máte chvíli, ale pokud nemáte čas, přeji vám krásný den a ještě jednou děkuji. -Děkuji, prosím, buďte trpěliví chvíli, zatímco se na to pro vás podívám. -Omlouvám se za to, protože držitel účtu není sám, potřebujeme, aby #NAME# nás kontaktoval, aby potvrdil své údaje. Jakmile to udělá a potvrdí, že je s námi spokojen, abychom mohli diskutovat o objednávce s vámi, můžeme se podívat na předchozí korespondenci pro vás. -A udělejte svůj první nákup na webových stránkách #PRS_ORG#. -Aktualizujte své platební informace, prosím, postupujte podle těchto kroků: -Přihlaste se do svého účtu #PRS_ORG#. -Klikněte na "Můj účet" a v menu vyberte "Nastavení účtu". -Vyberte záložku "Informace o platbě". -Pod „Informace o platbě“ vyberte typ kreditní karty a zadejte číslo karty, bezpečnostní kód (CVV), jméno na kartě a datum expirace. -Klikněte na "Uložit”. -Objednávka byla zpracována jako objednávka pro osobní odběr, což znamená, že jste si ji vybrali, abyste si ji osobně vyzvedli. -Proč tedy nemůžeme přidělit jezdce pro toto? -Jelikož objednávka již byla přijata, v tuto chvíli nemůžeme objednávku zrušit. -Je to kruhovité... -Myslím, že jídlení boxy jsou šílený návrh. -Matematika, kterou dělají, je prostě šílená. -"Ve skutečnosti ušetříme peníze, protože nemusíme jít ven a koupit celou láhev sójové omáčky, abychom vyzkoušeli asijskou kuchyni..." Šílenost. -Myslím, že v spotřebitelském prostoru je jediným důvodem, proč někdo mimo horní třídu zažil nějaký růst mezd, levnější zboží s nižšími maržemi. -Mzdy se skutečně nezvýšily, ale věci se staly levnější. -Problém je, že jsme prodali lidi pod námi. -Souhlasím s tebou. -Někteří z nás musí alespoň částečně vzdát pohodlí, abychom udělali společnost lepší. -I když nejsem ve výši příjmu, který by platil více daní, stále mohu kupovat méně věcí, které jsou dražší, aby je mohli vyrábět lidé, kteří si vydělávají na živobytí, a mohu být ochoten počkat několik dní, než to dostanu, aby některý gig pracovník nemusel být vyčerpán do kostí... -Prosím, stále klepáním tam, kde se zobrazují obrázky, můžete vidět obrázky a sledovat, kam klepnout? -Budu dál poskytovat obrázky. -Prosím, dejte mi vědět, jestli jste byli schopni klepnout na své zařízení tam, kde obrázky říkají. -Dallas Cowboys přinášejí lavice do Washingtonu, rivalita se zvyšuje. -Strana hostí na Washingtonu má pro Dallas Cowboys povědomý domácí vzhled. -Po obdržení zpráv od ostatních týmů, že lavičky na straně hřiště na FedEx Field potřebují výraznou modernizaci, Cowboys přinesli své vlastní pro tento soubojový zápas. -Když dorazili do stadionu v neděli, už byli oděni ve znacích a logu Cowboys. -Kowboyové slyšeli od Seahawks, kteří nedávno hráli proti Washingtonu v pondělí večer a měli stížnosti, že topení na lavičkách nefungovalo. -Předtím v týdnu komentoval běžec Dallasu Cowboys Ezekiel Elliott o výhodách hraní venku v chladných zápasech, protože vyhřívané lavičky jsou pro jeho zranění kolene prospěšné. -Kowboyové právě zajistili, aby Zeke a jeho spoluhráči dostali tu příležitost. -Toto je poslední zvrat ve vztahu Dallasu a Washingtonu, který se ještě více rozpálil tento týden, když hlavní trenér Cowboys Mike McCarthy předpověděl vítězství svého týmu, což vyvolalo nějaké ohňostroje mezi Ronem Riverou a hráči Washingtonu. -Washington porazil Cowboys ve dvou po sobě jdoucích zápasech. -Už je to více než 30 let, co porazilo Dallas ve třech po sobě jdoucích setkáních (1986-88). -Fanoušci Cowboys tvořili více než polovinu davu na FedEx field, což bylo patrné podle modrých a bílých dresů ve stadionu. -Majitel Jerry Jones předpověděl to již tento týden, když řekl na 105.3 FM v Dallasu: "Vždy jsme prodávali více klobouků, čepic a triček Dallas Cowboys." -Vždy jsme měli největší podporu fanoušků pocházející z Washingtonu, což je mimo oblast Dallasu. -Venku z oblasti Texasu je Washington to místo, kde máme nejvíce podpory ve všech věcech, které byste mohli počítat. -Jmenovaný jezdec nikdy neukázal. -Měli jsme ho nepřiřazeného a systém nyní hledá nového jezdce. -Prosím, dejte mu ještě 15 minut, aby se tam dostal. -Ve některých komunitách poskytuje církev bezpečné místo pro některé pronásledované sociální skupiny. -Není to náhoda, že hnutí za občanská práva bylo velmi úzce spjato s menšinovými kostely, mešitami a chrámy. -Ahmadův Aubreyův proces je také příkladem pozitivního dopadu. -Satanský chrám také dělá dobré věci. -Nicméně, příklady, kde je něco velmi špatného se systémem, byly vždy zřejmé. -Náboženské organizace a instituce by obecně měly být drženy na stejných standardech jako jakákoli jiná charitativní organizace. -Transparentnost je jméno hry. -Podíváním se na případy jako je Katolická církev, může být vhodné zajistit, aby prostředky získané těmito daňově osvobozenými náboženskými organizacemi neopustily zemi. -Když přemýšlím o náboženských členstvích, možná je užitečný model spolupráce; každý člen dostane jeden hlas jako akcionář. -Doufejme, že alespoň přispívají do sociálního zabezpečení. -Zkontrolováním zde znovu, mohu vidět, že jezdec omylem označil objednávku jako doručenou. -Momentálně nemáme přesné informace o tom, co se stalo s jezdcem a také o vaší objednávce. -Nyní to pro vás vyšetřujeme. -Tohle je to, co můžu udělat. -Prosím, postupujte podle níže uvedených kroků pro provedení opravy synchronizace ve vašem #PRS_ORG# (před zahájením budete potřebovat připojení Wi-Fi): -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více v dolním pravém rohu obrazovky (3 vodorovné čáry). -Zobrazit informace o zařízení. -Kromě opravy/obnovení vašeho účtu #PRS_ORG#, klepněte na Oprava/Obnovení. -Opravit nyní/Obnovit -Až bude synchronizace dokončena, prosím, klepněte znovu na Synchronizovat nyní, abyste nainstalovali dostupné aktualizace. -Letiště Enterprise-D -**Enterprise-D** z *The Next Generation* měla **tři** výsadkové hangáry. -Na show vždy vidíme Shuttlebay 2 a 3 na palubách 12 a 13. -Tyto dva hangáry byly zastoupeny plně velikostním studiovým setem, který mohl pojmout plně velikostní modely vesmírných lodí. -Vždycky jsem to miloval, když epizody ukazovaly dvojité hangáry na zadní straně střední části, krku, nebo cokoli, co chcete nazvat. -Proč jsme nikdy neviděli hlavní výtahovou halu? -Bylo to umístěno pod hlavním mostem na palubách 3 a 4 a pravděpodobně by to byla obrovská zařízení. -Místo toho, aby tam šli, posádka mostu by se projela turbovýtahem přímo kolem toho všeho až na palubu 13. -V původním *Star Treku* byla postavena miniaturní scéna a používala se s miniaturním vesmírným přívěsem, aby se dal život hangáru. -Postavy občas byly viděny mluvit u dveří, které vedly do hangáru, se miniaturním setem a lodí, které byly nad sebou, aby daly lodi měřítko a život. -Nemohli to udělat na TNG? -Viděli jsme, jak Worf a Data vypustili výtah z hlavního výtahového hangáru ve "Nejlepším z obou světů, část II", ale start výtahu byl viděn zevnitř výtahu. -Prostě vidíme ze shuttle letícího ven z okna zdi, rychle nahrazeny vesmírem. -Jediným časem, kdy jsme viděli hangár na plnou měřítko, bylo ve "Příčina a následek". -Vidíme vesmírnou záběru otevírajícího se válcového dveře, dekompresujícího hlavní výsadkovou palubu a dostáváme se rychlého pohledu dovnitř spolu s několika zaparkovanými výsadkovými loděmi. -Jaké jsou vaše nápady, proč hlavní výsadková hala nikdy nebyla viděna mimo tyto dvě instance? -Podepsal jste se a přihlásil jste se do své aplikace? -Udělali jste obě procedury? -Pokud jste provedli oba postupy a nevyřešili problém, mohu vám peníze vrátit na váš účet Store Credit. -Tímto způsobem můžete okamžitě koupit knihu podle vašeho výběru. -Bylo by to v pořádku? -Jsi tam? -Pro kvalitativní účely budu muset uvolnit tento chat, pokud se v příštích 2 minutách neobjeví žádná interakce. -Děkuji vám za kontaktování #PRS_ORG#, bylo mi potěšením vám dnes pomoci. -Doufám, že máte skvělý den. -Czech: Ok, prosím, udělejte mi laskavost a následujte následující kroky. -Zasuňte do zásuvky adaptér ze sítě (není součástí balení) a poté připojte svůj čtečku knih k adaptéru ze sítě. -Stiskněte a podržte tlačítko napájení, dokud neuvidíte slova „Vypnuto“ na vrcholu obrazovky. -Pokračujte ve stisknutí tlačítka napájení po dobu 3-4 sekund. -Uvolněte tlačítko napájení. -Stiskněte a podržte tlačítko napájení na vašem čtečce knih po dobu 30 sekund. -Čekejte, až se objeví obrazovka "Obnovit". -Uvolněte tlačítko napájení. -Po resetování čtečky se vás zeptá na nastavení jazyka a sítě WiFi. -Poté budete muset přihlásit se svou e-mailovou adresou a heslem. -Pokud toto nefunguje, prosím, nyní se odhlaste a znovu se přihlaste z vašeho čtečky knih. -Odhlásit se z #PRS_ORG# -Jdi na svou domovskou obrazovku. -Více ikon na dolní straně obrazovky. -Nastavení. -Účty. -Pod #PRS_ORG#, klepněte na Odhlásit se. -Potvrzovací obrazovka se objeví. -I když neznáte své heslo, můžete vytvořit nové heslo postupováním podle kroků, které jsem poslal. -Ale nebojte, můžu vám také poslat odkaz pro obnovení vašeho hesla. -Děkuji, že jste si dnes udělali čas na rozhovor se mnou. -Jakmile tento chat skončí, obdržíte e-mail s hodnocením chatu. -Prosím, vyplňte to, pokud máte chvíli, ale pokud nemáte čas, přeji vám krásný den a ještě jednou děkuji. -Ahoj r/Military! -Jsem z země, kde je vojenská služba povinná, a jen se ptám, jak to je v ostatních zemích. -Ahoj všichni! -Jsem z Estonska, kde jsem součástí Národní obranné síly. -Zde je vojenská služba povinná pro všechny muže ve věku od 16 do 29 let. -Musíte absolvovat buď 8 nebo 11 měsíců výcviku, po kterém budete posláni do "rezervní" jednotky, dokud nedosáhnete 60 let. -V té době má Obranné síly právo požadovat, abyste se jednou nebo dvakrát ročně účastnili některých vojenských cvičení po dobu přibližně dvou týdnů ročně. -Nicméně nejste povinni jít na zahraniční misi. -Pokud chcete to udělat, musíte se připojit k "oddílu skautů", kde budete profesionálním vojákem s platbou a podobně. -Ptám se, jak to je v ostatních zemích? -Pokud se připojíte k armádě například v USA nebo ve Velké Británii, jste povinni jít bojovat do jiné země? -Co si myslíte o povinné vojenské službě? -Během tréninku, když jsem byl v Tapa 2018-2019, byly tam také jednotky z Velké Británie, USA, Francie, Belgie, Dánska a Kanady. -Bohužel jsme ale neměli moc času na sociální interakci a já jsem se nedostal osobně zeptat těch kluků, jaké to je pro ně sloužit ve vojsku jejich země. -Vím, že v tomto subredditu jsou pravděpodobně převážně členové NATO, ale bylo by zajímavé slyšet i od ostatních (nečlenských) zemí. -Promiňte mi mou špatnou gramatiku. -Angličtina je moje druhé jazyky. -Promiňte, že váš objednávka se zpožděním. -Prohlédl jsem to a můžu vidět, že vaše oblast v současné době má vysoké objemy objednávek, proto jim byl přidělen jezdec pro vaši objednávku. -Ale jen aktualizace, je tu jezdec, který potvrdil svůj příjezd do restaurace. -Francie reaguje na protichůdnou nabídku amerických fregat do Řecka -Francouzské a řecké ministerstvo obrany obě potvrdily, že konkurenční nabídka od USA nebude mít žádný vliv na již podepsanou a definitivní vícemiliardovou dohodu na nákup francouzských fregat Belharra. -Francouzské Ministerstvo ozbrojených sil v sobotu uvedlo, že smlouva o obraně s Aténami byla již „před několika dny iniciálně podepsána“, předtím než americké ministerstvo zahraničí oznámilo své schválení potenciálního prodeje amerických fregat. -Od chvíle, kdy jsme začali jednat s Řeky, americká nabídka už není na stole. -Také jsme podepsali smlouvu s Řeky. -Česká obranná ministerstva také potvrdila, že dohoda s Paříží byla "konečná", protože byla vyjednána na "nejvyšší možné úrovni" a "osobně oznámena" řeckým premiérem Kyriakem Mitsotakisem. -Údajně se očekává, že smlouvy budou brzy ratifikovány řeckým parlamentem. -Agentura pro bezpečnostní spolupráci obrany USA ve čtvrtek oznámila, že schválila prodej za 6,9 miliardy dolarů čtyř bojových fregat od společnosti Lockheed Martin a samostatný program za 2,5 miliardy dolarů na modernizaci fregat třídy MEKO Řecka. -Oznámení vyvolalo některé obavy ohledně dohody Atény-Paříž, zejména po dlouho existujícím "obchodu století" mezi Francií a Austrálií, který byl náhle zrušen bombastickou dohodou AUKUS v září, bez předchozího varování. -Paríž se rozhořčeně obvinil Washington a Canberra z "úderu do zad", zatímco o dva týdny později Macron vystoupil se řeckým premiérem a osobně oznámil prodej alespoň tří francouzských válečných lodí Aténám za kolem 3,5 miliardy dolarů, říkajíc, že je čas "přestat být naivní" a propagovat novou dohodu jako znamení "strategické autonomie a suverenity Evropy". -Francouzská armáda podle tohoto času oznámila, že Spojené státy "nás varovaly, že tato oznámení přijdou" a že Američané údajně neměli "žádnou touhu jít dál" s opravdovým prodejem jejich fregat. -Jenom kontroluji tyto informace pro tebe, nebudu dlouho. -Zkontrolovala jsem to a bohužel to bude bezkontaktní, takže nemohou přinést položku k vašemu majetku, omlouvám se za to. -Varování před bouřlivým počasím, když silné větry představují „nebezpečí pro život“. -Silné větry mají zasáhnout severní části Skotska, což bude mít za následek narušení dopravy, zejména lodních služeb. -Severní západ, Shetlandy a Orkney čelí v noci z neděle na pondělí nárazům větru až 85 mph. -Hebridy, západní pobřeží Highlands a části Argyll a Bute byly varovány, aby byly připraveny na létající odpadky, které představují "nebezpečí pro život" a mohou poškodit budovy. -Odborníci varují, že špatné počasí může vést k výpadkům elektrického proudu, uzavření silnic a mostů a zrušení leteckých a trajektových služeb. -Následují dvě pojmenované bouře, Arwen a Barra, které způsobily rozsáhlé narušení velkých částí země. -Více než 100 000 domů bylo bez elektřiny kvůli extrémním škodám, které napáchal Bouře Arwen 26. a 27. listopadu. -Bouře Barra narušila dodávky pro kolem 10 000 lidí jen 11 dní později 7. prosince. -STV zpravodaj o počasí Philip Petrie řekl, že to bylo velmi téměř tři v řadě. -Met Office sledovalo nízký tlakový systém, který se v noci na neděli pohyboval po severních oblastech a přinášel velmi silné větry a bouřlivé, silné přeháňky. -Met Office vydala žlutá varování před větrem, která začínají platit od 21 hodin v neděli, a to na Západních ostrovech, částech Highlands a Argyll a Bute. -Philip řekl: „V této oblasti je potenciál, že se objeví poryvy větru dosahující rychlosti 80-85 mil za hodinu, což způsobí narušení lodní dopravy a také nějaké škody a výpadky elektrického proudu.“ -Další varování vstupuje v platnost v půlnoci v neděli, které se týká Orkney a Shetlandu. -Philip řekl: "Toto varování trvá až do poledne v pondělí, jak se střed nízkého tlaku přibližuje k Severním ostrovům, opět přinášející poryvy větru o rychlosti 80-85 mph po pobřeží a místně v některých oblastech přesahující 90 mph." -Je to velmi rychlé, takže to bude pryč do pondělního odpoledne, s věcmi začínajícími uklidňovat a uklidňovat se k obědu. -Zbytek týdne bude pokračovat ve vyrovnávání před příštím víkendem. -Zlodějův kalhoty se mu sjíždějí, zatímco se pokouší utéct. -Takže si zaznamenejte, že jsem to neviděl. -Řekli mi to kolegové ve mém prvním obchodním zaměstnání. -Ti dva kluci přišli do obchodu. -Jedna z nich byla v městě docela proslulá, protože se vždycky dostávala do potíží se zákonem. -Po chvíli prohlížení, známý odcházel a vrátil se ke svému vozidlu, zatímco druhý vzal nákupní vozík a do něj vložil velkou 500 dolarovou sada klíčů. -Tento společník pak čekal, až budou oba pokladní u východových dveří zaneprázdněni, a pak se jen tak prošel kolem nich a ven z dveří. -Oba si toho všimli a ptali se navzájem, jestli ten kluk zaplatil. -Když bylo potvrzeno, že ne, jeden za ním běžel. -Říkali mi, že pokladní křičela na něj, aby zastavil, když ho pronásledovala, ale on začal běžet s vozíkem k únikovému vozidlu. -Nevím, jestli byl jedním z těch kluků, kteří měli rádi, že nosí kalhoty nízko, nebo neměl pás. -Ale mi bylo řečeno, že mu kalhoty začaly klesat a on se snažil je zvednout, zatímco běžel a tlačil vozík s těžkou sadou klíčů. -Poté opustil vozík, nechávaje sada zásuvek v něm, jak si táhl kalhoty nahoru, a běžel k únikovému vozidlu, skočil do něj se svým proslulým společníkem a odjeli. -Molekulární diagnostický test může detekovat variantu Omicronu během 20 minut: Zpráva -Korejští vědci vyvinuli molekulární diagnostickou technologii, která může detekovat varianty Omicronu. -Technologický vývoj byl nyní dokončen a očekává se, že bude trvat nějakou dobu, než bude komerčně využit. -POSTECH oznámil 10. dne, že výzkumný tým vedený profesorem Lee Jung-wook z Katedry chemického inženýrství vyvinul molekulární diagnostickou technologii, která může detekovat variantu Omicron během 20-30 minut a výsledky bude publikovat online. -Omicron je varianta, ve které jsou 26-32 mutace ve spike, který se používá k infikování buněk virem COVID-19. -Podle výzkumného týmu může technologie molekulární diagnostiky rozlišovat mutace na úrovni jednotlivých nukleotidů, takže může detekovat "Stealth Omicron", které jsou obtížně detekovatelné PCR testy. -V současné době Korea Centers for Disease Control and Prevention používá tři metody k detekci variant COVID-19: analýzu celého genomu, analýzu cílové DNA (mutace, jako je například spike protein) a PCR test. -V případě varianty Delta ji lze zjistit pomocí současného PCR testu, ale Omicron ne. -Nově vyvinutá technologie tentokrát není metoda sekvenování, která čte sekvence DNA nebo RNA, ale molekulární diagnostická technologie. -Technologie, která je k dispozici, zobrazuje pouze specifické oblasti viru, ale molekulární diagnostická technologie byla navržena tak, aby vyvolávala reakce pouze při přítomnosti RNA COVID-19, čímž umožňuje rychlé zjištění. -Podle profesora Lee má Omicron silný signál pro N geny v PCR testech, ale má slabý signál pro S geny. -V případě "Stealth Omicron" byly obě N a S geny potvrzeny jako pozitivní, což ztěžuje jeho odlišení od ostatních variant. -Molekulární diagnostická technologie pracuje na odlišných mechanismech od PCR, účinně detekující variantu Omicron. -Na rozdíl od běžné technologie, která obvykle zpracovává až 96 vzorků na zařízení, nová technologie dokáže zpracovat více než 125 za 30 minut (více než 250 vzorků za hodinu). -Navíc tato technologie nepotřebuje speciální vybavení, takže může vytvářet diagnostické sady jednoduše a snadno. -Metoda může vyvinout diagnostický kit během 4 dnů, takže se očekává, že bude schopna rychle reagovat i v případě, že se v budoucnu objeví nová varianta nebo virus. -"Doufám, že zveřejnění této technologie nám pomůže co nejdříve se vrátit k normálnímu každodennímu životu," řekl profesor Lee. -Budeme se snažit rychle diagnostikovat a reagovat na nové varianty, které by mohly vyjít po COVID-19. -Tato technologie je nyní před komerčním uvedením do provozu. -Nicméně, může být použito jako pomocné v současných situacích, kde nebyl vyvinut PCR test pro Omicron. -Profesor Lee řekl: „Myslím, že tato technologie bude blízko komercionalizaci ve druhé polovině příštího roku po klinických zkouškách.” -Důvod, proč zveřejňuji technologii, je sdílet ji s ostatními, aby vyvinuli lepší technologie pro překonání COVID-19 a umožnit také rozvojovým zemím analyzovat varianty COVID-19. -Změna adresy na objednávce není možná, nicméně to může být doručeno na novou adresu. -Můžete volat jezdce, jakmile se blíží k adresnímu místu uvedenému v tomto objednávce, pomocí funkce volání jezdce v aplikaci. -Je mi opravdu líto za nepříjemnosti, můžete mi odpovědět na můj e-mail a rád budu pokračovat ve vaší osobní asistenci, nebo můžete otevřít novou interakci s námi, jak si přejete, rádi vám pomůžeme. -Pamatujte, že naše chatovací služba je pro vás otevřena 24 hodin denně, 7 dní v týdnu. -Děkuji vám za kontaktování #PRS_ORG#, bylo mi potěšením vám dnes pomoci. -Doufám, že máte skvělý den. -Aston Villa je nejnovějším klubem Premier League, který trpí výbuchem Covidu. -Aston Villa se stala nejnovějším týmem Premier League, který utrpěl výbuch Covidu, když bylo objeveno několik pozitivních případů ve společnosti. -Trénink na Bodymoor Heath v neděli byl zrušen jako výsledek, trénink, který byl navržen pro malý počet hráčů k obnově po prohře s Liverpoolem v sobotu. -Není se zatím považováno za vážný výbuch, jak informuje The Athletic, že jen jeden hráč byl pozitivní testován, zatímco ostatní jsou zaměstnanci na tréninkovém hřišti. -Villa čelí venkovnímu zápasu proti Norwich City ve středu večer v Premier League a není žádný náznak, že by měl být zrušen, s tréninkem také očekávaným, že bude probíhat normálně v pondělí. -Identita hráče, který testoval pozitivně, nebyla potvrzena, ani to, zda to byl některý z mužů, kteří se zúčastnili proti Liverpoolu. -Manchester United také utrpěli v neděli nákazu Covidu a zdá se, že o té situaci je větší obava, s nyní údajně ohroženou cestou Red Devilů do Brentfordu v úterý. -Tottenham Hotspur se již potýká s virusem, jejich zápas proti Brightonu v neděli byl odložen poté, co osm hráčů a pět zaměstnanců obdrželo pozitivní výsledky. -West Brom a Queens Park Rangers, které bojují o mistrovský titul, také zaznamenaly výskyt nákazy a utkání QPR proti Sheffield United ve středu bylo odloženo. -Kdokoli, kdo bude pozitivní na omicron variantu Covid-19, bude muset izolovat po dobu 10 dní, stejně jako kdokoli, kdo byl identifikován jako blízký kontakt pozitivního výsledku. -Děkujeme za kontaktování #PRS_ORG#, jste přes #NAME#. -Abych vám mohl pomoci, můžete prosím poskytnout své údaje o účtu (celé jméno, e-mailovou adresu, poštovní adresu a číslo objednávky)? -Dalším krokem je odhlásit se ze svého zařízení. -Před tím, než to uděláte, chtěl bych, abyste zvážili, že všechny vaše poznámky, které jste udělali ve svých knihách, mohou být smazány, stejně jako filtry, postup čtení, stahování a další přizpůsobení. -Pokud máte e-knihy třetích stran, mohou zmizet. -Jsem v HR a v minulosti jsem pracoval s mzdami. -Pokud ke mně někdo přijde a řekne mi, že pracuje na tom, aby se dostal z finančně zneužívajícího vztahu a jeho zneužívatel se dívá na jejich výplatní pásky, -Možná bychom mohli pomoci! -V závislosti na společnosti. -Nemusel jsem udělat žádné z níže uvedených, ale musel jsem držet zaměstnance mimo naše adresáře a učit recepci, aby předstírali, že nevědí, kdo je někdo a jak identifikovat násilníka, kdyby přišel. -Mohl bych udělat dohodu o odečtu peněz jako pozdější daňový odpočet, dát tomuto odpočtu nesouvisející název, který by vypadal jako nějaký povinný odpočet a poté "odeslat" tento odpočet zpět vám samostatně. -Asi bych vás musela požádat o podpis dohody samozřejmě. -Další věc, kterou bych mohl udělat: mít s tebou falešnou e-mailovou konverzaci o tom, proč se ztrácejí tvé výplatní pásky nebo proč tvé heslo nefunguje (po tom, co je změníš) a jak se "snažíme to vyřešit, děkujeme za tvou trpělivost!" -Nemáme to, ale někteří zaměstnavatelé mohou vyplatit celou nebo část výplaty na jejich vlastní debetní kartu, bez potřeby banky. -Také mnoho zaměstnavatelů má různé podceňované služby pro podporu zaměstnanců. -Tyto mohou zahrnovat odbornou pomoc, právní pojištění, slevy a kupóny. -Stojí za to se zeptat, co mají, abyste mohli využít cokoli, co pomáhá. -Některé posilovny vám umožňují pronajmout si skříňky. -Není to ideální místo na skrývání věcí, protože je tu riziko krádeže, ale je to možnost, která by mohla fungovat pro některé. -To je věc, kterou lidé nepochopí. -Matematika neříká, že nemůžete být velmi nemocní, pokud jste mladí a zdraví. -Možná jsem pesimista, ale to je lepší než myslet si, že jsi nezranitelný. -To ti může stát život. -Myslím, že jsem to bral vážně, protože jsem často nemocný a nesnáším to. -Jsem obecně zdravý, ale chřipka mě vždycky zasáhne velmi silně. -Bál jsem se, že Covid bude horší. -Nebylo to tak špatné, pravděpodobně proto, že jsem se nedostal do styku s mnoha viry, ale bylo to dost špatné. -Stále to trvalo měsíce, než se mé tělo vrátilo do normálu. -Nevím, proč to ovlivňuje lidi různě, ale pro mě byly nejhorší bolesti těla a bolesti hlavy. -Objednávka je extrémně pozdě a ukazuje se, že náš jezdec je již v restauraci. -Nicméně je to divné, protože není žádný pokrok. -V tomto případě jsem označil vaši objednávku jako doručenou a zpracuji vám vrácení peněz. -Abyste mohli objednat novou objednávku. -Prosím, zkuste provést tyto postupy. -Chcete-li opravit svůj účet na aplikaci #PRS_ORG#, postupujte podle níže uvedených kroků: -Z domovské obrazovky aplikace #PRS_ORG# klepněte na tlačítko Více dole na obrazovce. -Opravte svůj účet. -Pokud máte hodně položek, může to chvíli trvat, než opravíte váš účet. -Jděte zpět na svou domovskou obrazovku a klepněte na knihy nebo audioknihy a zkontrolujte, zda se objeví chybějící položka. -Když dokončíte, pokračujte prosím tímto postupem. -Děkuji za vaši čekací dobu, zkontroloval jsem informace do vašeho účtu. -Je mi opravdu líto, že máte s vaším elektronickou knihou tuto záležitost, ale jsem ochoten vám pomoci. -Czech: Podělím se s vámi o pár kroků, které je třeba provést ve vašem zařízení, ano? -Francouzští rybáři hrozí narušením britských dovozů v rybářském sporu po brexitu. -Francouzští rybáři hrozí narušením britských dovozů, aby vyvinuli tlak na Londýn ohledně více licencí, jelikož napětí mezi Francií a Spojeným královstvím stoupá kvůli právům rybolovu po brexitu. -Hrozba byla vydána v sobotu několik hodin poté, co Velká Británie souhlasila s vydáním dalších 23 licencí francouzským rybářům, aby se zmírnily napětí mezi oběma sousedy, kteří se v posledních šesti měsících potýkají s rybářskou krizí. -Francie hledá dalších 81 schválení, aby dosáhla 104 licencí potřebných pro provoz svých lodí v britských a kanálských ostrovech podle dohody o brexitu podepsané loni. -Evropská unie stanovila 10. prosince jako termín pro Londýn, aby udělil licenci francouzským rybářským lodím v rámci brexitu, s Paříží hrozící evropským právním postupem v případě, že nedojde k žádnému přerušení. -Podtržením, že Francie má nárok na kolem 80 dalších britských licencí, skupina zastupující rybáře v klíčovém přístavu Boulogne-sur-Mer a další po pobřeží severního pobřeží hrozila v sobotu večer protesty. -CRPMEM rybářská průmyslová skupina pro region Hauts-de-France ve svém prohlášení uvedla, že se očekávají protesty, které budou cílit na britské dovozy. -Skupina uvedla, že její členové byli "zoufalí" zprávou o pouhých 23 nových licencích a cítili se "zrazeni" Evropskou komisí, která může proti Británii zahájit právní akci kvůli tomuto problému. -CRPMEM řekl, že protesty budou "v souladu s blokádami přístavů v Bretani, Normandii a severní Francii, které proběhly 26. listopadu." -Na tomto dni blokovaly francouzské rybářské lodě krátkodobě trajekty a další lodě v přístavech Calais, Saint-Malo a Ouistreham, zatímco vozidla byla také poslána k narušení dopravy, která se snažila použít železniční spojení Channel Tunnel. -Od té doby bylo uskutečněno několik kol jednání mezi oběma stranami, ale trvalé řešení dosud nebylo vyřešeno. -Je obrazovka šedivá a vidíte obálku knihy? -Abyste zcela vypnuli zařízení, nechte svůj prst stisknutý na tlačítku napájení po dobu 30 sekund. -Tip na čištění hardwaru Androidu -Tenké (0,3 mm - 0,5 mm) SUCHÉ mezizubní kartáčky jsou ideální pro čištění těch malých otvorů, ve kterých jsou umístěny mikrofony a reproduktory vašeho chytrého zařízení. -Jsou to levný produkt a bezpečnější než mnohé jiné metody, jako jsou alkoholy na otírání, zubní kartáčky, jehly a jehličky. -Právě jsem použil tento způsob k vyčištění mikrofonového portu na mém Samsung Galaxy Watch 4 Classic, protože při používání funkce rozpoznávání řeči neregistroval můj hlas. -Po měsících přemýšlení, budu muset zajistit náhradu záruky nebo si objednat opravu. -Po mnoha frustracích křičením na mou hodinku během telefonických hovorů, aby mě bylo slyšet a/nebo pochopeno. -Po následování rad výrobce a použití funkcí vodního zámku, resetování zařízení A obnovení továrního nastavení mého zařízení. -A po prohledávání internetu několikrát. -Zdálo se, že není žádná zaručená spokojenost. -Pak jsem měl zjevení a zkusil mezizubní kartáčky a fungovaly... -Oni pracují velmi, velmi dobře! -Po několika poklepáních a otáčeních tenkou, ale pevnou štětinou, by mělo vaše zařízení fungovat stejně jako když bylo zcela nové. -Doporučuji, aby to bylo provedeno s suchou štětkou a nebudu přijímat žádné následky, pokud se rozhodnete to použít s jakoukoli kombinací jakéhokoli čisticího prostředku. -Teplota ohřívače vody a problém s koupelnou. -Můj ohřívač vody je nastavený docela nízko. -Je to malá nádrž v skříni (žiji ve starém předválečném bytě). -Je otázka, zda se vana naplní až po okraj bez toho, aby se voda ochladila? -Je-li mi stačit jedno koupání týdně (ale sprchování každé dva dny nebo tak nějak) a mám-li dostatek vody v ohřívači na rychlé sprchy, stojí za to zvyšovat teplotu jednou týdně pro koupání? -Ušetřím více elektřiny, když ohřívám svůj hrnec na sporáku a přidávám ho do koupele, až se vyprázdní ohřívač vody? -Děkuji všem za rady! -Zvedl jsem teplotu jen trochu a to stačilo. -Komentář o tom, že vaření je neefektivní, je pravděpodobně správný, protože i když ohřívač vody běží neustále, má tolik izolace. -Je to těžké dostat se tam, takže to nechám na té teplotě a dám tomu dneska sbohem. -Druhá věc, kterou potřebujeme, abyste zkusili, je resetovat USB porty ve vašem počítači. -Můžete najít instrukce, jak to udělat, v následujícím odkazu: #URL# -Ujistěte se, prosím, že se pokusíte o vyzkoušení tří metod uvedených v tomto dokumentu. -Pokud po jejich vyzkoušení problém zůstane, ujistěte se, prosím, že nás znovu kontaktujete. -Děkuji za poslání fotografie. -Dejte mi prosím vědět, abych to mohl pro vás dále zkontrolovat. -Zkontrolováním toho znovu zde zdá se, že je tu pouze jeden kus pro kreveta knedlík. -Myslím, že je možné (ale nevím), že jim říkali, že není bezpečné řídit. -Když se tornáda blíží, obvykle počasí říká "Schovejte se teď!!" -Nevíte, jestli to bude za dvě minuty nebo za deset minut, nebo co. -Nevím, jakou právo mají skutečně zakázat lidem odejít, ale mohu pochopit, proč by jim říkali, aby se schovali. -Můžete si představit všechny ty lidi, kteří se snažili dostat ven z parkoviště, když to přistálo? -Všichni by byli zabiti. -Ale kdyby říkali "Pokračuj v práci!" -Vezměte si útočiště! -To je jiné. -Ví někdo, jestli stále pracovali nebo se někde schovávali? -Prosím, použijte funkci "přidat do košíku" k sestavení vaší objednávky, poté se přihlaste a zaplaťte jako obvykle. -Poté vám vrátíme přebytečné poštovné při odeslání. -Pokud byste chtěli vědět dopředu, jaká bude doprava, pošlete nám zprávu, ve které uvedete, jaké položky a velikosti chcete a do jaké země mají být zaslány. -Můžete odeslat mou objednávku na jinou adresu? -Pokud jste ve Velké Británii, nemáme problém poslat na jinou adresu ve Velké Británii, ale musíte si vybrat položku, která má jako výchozí dopravu podepsanou, nebo si vybrat možnost podepsaného doručení při objednávce. -Pro mezinárodní objednávky nemůžeme změnit adresu. -Pokud je chyba, prosím dejte nám vědět co nejdříve, abychom mohli objednávku zrušit a tak můžete znovu nakoupit s správnou adresou. -Mohu dostat míry položky? -Prosím, zkontrolujte oba popis v seznamu a obrázky v seznamu. -Vždy, když je to možné, budeme se snažit umístit velikostní tabulku. -Pokud nenajdete průvodce velikostmi, kontaktujte nás prosím. -Jak se oblečení srovnává s velikostmi ve mé zemi? -Pokud není stanoveno jinak, byly všechny položky navrženy pro trh ve Velké Británii. -Pokud jste v Severní Americe, velikosti v UK jsou trochu menší, takže možná budete muset jít o velikost výš. -Prosím, podívejte se na velikostní tabulky pro další pokyny. -Normálně jsou velikosti UK stejné jako velikosti EU a neměly by být upravovány. -Kdy přijde má objednávka? -Pro Velkou Británii odesíláme téměř všechny objednávky prostřednictvím Royal Mail 1. třídy. -Toto má odhadovanou dobu dodání 1-3 dny. -Pokud potřebujete doručení do druhého dne, nabízíme službu Royal Mail Special Delivery 1pm. -Pro Evropu trvá doručení objednávek mezi 3-5 dny a pro zbytek světa 5-7 dní. -Poté nemůžeme nabídnout odhad časů dodání, protože to závisí na poštovní službě a celním úřadům jednotlivých zemí mimo EU. -Můžu vyměnit za jinou velikost/položku? -Ano, existují dva způsoby, jak to lze udělat. -Prosím, kontaktujte nás a požádejte o adresu pro vrácení. -Když odešlete položku zpět, musíte přiložit poznámku s uvedením vašeho eBay ID a velikosti, kterou potřebujete. -Pokud jste provedli mezinárodní objednávku, polovina původního nákladu na dopravu bude znovu aplikována. -Použijte možnost eBay pro vrácení zboží. -Tato možnost je také vhodná, pokud byste chtěli vrácení peněz, protože jakmile obdržíme zboží zpět, vrátíme vám peníze; pokud potřebujete výměnu, prosím, zakupte správné zboží buď před nebo po vrácení, jak je požadováno. -Prosím, použijte funkci "přidat do košíku", aby vaše objednávka zůstala pohromadě. -Pokud jsou položky objednány jednotlivě, nemůžeme zaručit, že budou odeslány společně. -Jakmile jsou všechny položky ve vašem nákupním košíku, prosím, zkontrolujte a zaplaťte jako obvykle a my vám vrátíme přebytečné poštovné. -Pokud jste po označení zboží jako odeslaného ještě nedostali svůj náhradní obnos, pošlete nám prosím zprávu, abychom mohli náhradní obnos zpracovat. -Zahrnuješ účtenku? -Ne, nezahrnujeme do balíčků účtenky, pokud není požadováno. -Pokud potřebujete účtenku, pošlete nám zprávu a my vám ji můžeme poslat emailem. -Pokud potřebujete daňový doklad, kontaktujte nás a my vám ho pošleme emailem. -Čekám už dlouho na svůj objednávku a ještě nepřišla. -Je to možná ztracené? -Pro objednávky do Velké Británie, dejte svou objednávku 7 dní na příjezd, Evropa 21 dní a zbytek světa 30 dní. -Pokud váš objednávka po těchto datumech ještě nedorazila, kontaktujte nás, abychom mohli provést vyšetřování s dopravním agentem. -Prosím, vezměte na vědomí, že to může trvat až 2 týdny, ale jakmile budeme mít aktualizaci, dáme vám vědět. -Jsem mimo EU. -Musím platit nějaké clo nebo celní poplatky? -Prosím, zkontrolujte tyto informace s místními úřady. -Nepřijímáme žádnou odpovědnost za clo nebo daňové poplatky ani nebudeme platit žádné peníze k nim. -Nezměňujeme informace na celních prohlášeních, takže prosím neptejte se. -Děkujeme vám za vaši zákazníky. -Jsme malá firma sídlící na ostrově Man a pokud máte jakékoli dotazy ohledně vaší objednávky nebo jakékoli otázky, neváhejte se na nás obrátit. -Budeme se snažit vám odpovědět co nejdříve, ale může to trvat až 24 hodin. -Pokud jste po této době ještě nedostali odpověď, pošlete prosím zprávu znovu, abychom ji nezmeškali, nebo vzácně, když je problém s zprávami eBay. -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více (tři vodorovné čáry) dole na obrazovce. -Nastavení -Zobrazit informace o zařízení. -Vedle "Opravit váš účet #PRS_ORG#", klepněte na Opravit. -Opravte nyní. -Proces opravy účtu začne. -Pokud máte hodně knih, může to chvíli trvat. -Chcete-li opravit svůj účet na aplikaci #PRS_ORG#, postupujte podle níže uvedených kroků: -Z domovské obrazovky aplikace #PRS_ORG# klepněte na tlačítko Více dole na obrazovce. -Opravte svůj účet. -Pokud máte hodně položek, může to chvíli trvat, než opravíte váš účet. -Jděte zpět na svou domovskou obrazovku a klepněte na knihy nebo audioknihy a zkontrolujte, zda se objeví chybějící položka. -Prosím, vezměte na vědomí, že pokud zaplatíte za expresní dopravu, čas zpracování objednávky je stále 3 pracovní dny. -Kdykoli bude zboží odesláno, bude odesláno expresní službou, pokud jste za to zaplatili. -Obchodní dny nezahrnují soboty, neděle a státní svátky. -Mezinárodní zásilky obvykle dorazí do 11 až 22 pracovních dní, v závislosti na čase, který je potřeba strávit na celním úřadě. -Dopravní sazba - Zdarma standardní doprava, pokud je uvedeno jako zdarma v produktu. -Poznámka 1: Některé země mohou účtovat dodatečné poplatky na místním celním úřadu. -Prosím, zavolejte na vaše celní úřady, nebo to vyhledejte na Googlu pro přesné poplatky. -Poznámka 2: Všechny celní poplatky nebo daně v zemi kupujícího budou hrazeny kupujícím a my nebudeme nahrazovat žádnou částku. -Garantujeme Vaši spokojenost a nabízíme 30denní vrácení peněz (nebo výměnu) zpět. -Pokud z jakéhokoli důvodu nebudete spokojeni s vaším nákupem, nejprve nás kontaktujte, než zanecháte negativní/neutrální zpětnou vazbu, abychom mohli věci napravit! -Máte 30 dní na vrácení zboží od dne, kdy bylo objednáno. -Musíte nám poskytnout sledovací čísla, jakmile zboží zašlete zpět. -Pokud je produkt poškozený nebo jsou odstraněny nebo použity nebo nosíte vy, pak návrat je neplatný. -Nákupní cena musí být doplněna o clo, pokud je v zemi kupujícího vybíráno. -Bereme absolutní opatření, aby byly cenné šperky dobře zabaleny, aby nedošlo k poškození produktu. -Jsou dodány v elegantní krabici, ideální pro darování někomu zvláštnímu. -Zpětná vazba a DSR (podrobné hodnocení prodejce). -Naší prioritou je dosáhnout 100% spokojenosti zákazníka a zajistit, abyste měli skvělý nákupní zážitek. -Můžete se cítit bezpečně, že nám můžete důvěřovat a prosím, kontaktujte nás, pokud máte nějaké otázky nebo komentáře. -Berme vaši zpětnou vazbu s nejvyšší důležitostí. -Pokud z jakéhokoli důvodu není s našimi produkty nebo službami spokojeni, nejprve se na nás obraťte a dejte nám příležitost věci napravit. -Nechceme žádné negativní hodnocení zpětné vazby a tyto nemohou být po dání změněny, takže nám dejte příležitost poskytnout rychlejší řešení pro jakýkoli problém, který můžete mít. -Specializujeme se na šité na míru Solitaire Diamond prsteny, snubní prsteny, svatební pásky, diamantové náušnice, svatební náhrdelníky, přívěsky a volné diamantové Solitaire spolu s mnoha dárkovými předměty. -Také jsme zavedli šperky z diamantů ve 92,5 stříbrném. -Naše nabídka zahrnuje prsteny, náušnice, přívěsky a Mangalsutru. -Máme více než šest desetiletí zkušeností s výrobou šperků. -Také se zabýváme velkoobchodním a vývozním obchodem s ručně vyrobenými a strojově vyrobenými zlatými diamantovými šperky 14 K a 18 K. -Můžete to také resetovat odtamtud. -Nicméně doporučuji resetovat to z vašeho počítače, i když jste přihlášeni na svém počítači, to je pro vás, abyste si zapamatovali své heslo, protože tyto informace je důležité znát nazpaměť. -Jakmile je vaše heslo resetováno z vašeho počítače, zkuste prosím znovu přistupovat na čtečku naší dcery s vaším novým heslem. -Prosím, dejte mi vědět, jestli to funguje. -Kupoval jsem nové pneumatiky. -Našel jsem ty, které jsem chtěl na webových stránkách obchodu s pneumatikami. -Vytiskl jsem stránku a vzal jsem ji do mého místního obchodu. -Byla to součást řetězce. -Ten chlap v obchodě to zkontroloval a vyšlo najevo, že současná cena pneumatik byla vyšší než můj tisk. -Nevím, odkud si vzal vyšší cenu. -Naštěstí byl ten chlap poctivý a místo toho, aby se pokusil prodat pneumatiky za vyšší cenu, mi je prodali za cenu, kterou jsem měl na svém výtisku. -Řekl, že protože jsem měl výtisk, musel mi prodávat pneumatiky za cenu výtisku. -On byl o tom také velmi laskavý. -Kupuji od nich pneumatiky už od té doby. -Děkuji - takže tato dotaz je s skladem, jak bylo uvedeno v chatu včera, musíme čekat na odpověď na vyšetřování. -Jakmile se nám ozve zpět, to je kdy vám bude odeslán email. -Zkusil jsem volat restauraci a také jezdce, ale nebyli schopni odpovědět, omlouvám se. -Můžu vědět, jestli stále ještě očekáváte objednávku? -Pes, který neustále štěkal a jak jsem ho dostal, aby přestal. -Mí sousedé si pořídili psa před třemi lety. -Tyto sousedi a já sdílíme plot. -Odděluje naše zahrady. -No, tento pes štěká a vrčí a snaží se mě kousnout skrz plot celou dobu, kdy jsem venku na zahradě. -Zkusil jsem to ignorovat, mluvit tiše atd. -Ale tento pes je šílený. -Údržbáři se toho bojí. -Takže jsem šel a udělal to sousedské věc a zeptal se jich, jak mi pomohou zjistit, jak dostat tohoto psa, aby se uklidnil. -Nemůžu ani využít můj dvůr v tuto chvíli. -Ten pes celý den venku štěká a štěká bez přestání. -Zeptal jsem se, jestli můžu dát zdravé pamlsky skrz plot. -Majitel říká ne. -Zeptal jsem se, jestli bychom mohli jít na polovinu na nešokový obojek na psa. -Majitel říká ne. -Zeptal jsem se souseda alespoň třikrát, aby jí pomohl s jejím psem. -Frustrovaný, ale ještě neochotný zavolat na úřad pro ochranu zvířat, vymyslel jsem plán. -Koupil jsem velmi pěkný přenosný reproduktor, který je *hlasitý*. -Jako, uložil jsem a investoval do toho. -Teď, každý čas, když jdu do mé zahrady, nosím svůj reproduktor. -Zde neexistuje žádné předpisy o denní hlučnosti, zkontroloval jsem to. -Když pes začne štěkat a štěkat na plot, moji sousedé (všichni) si mohou užít trochu Lamb of God nebo Rotting Christ nebo nějakou jinou skvělou hudbu naplno. -Můj reproduktor otřásá stolkem. -Sousedi to neměli dlouho, než si dali dvě a dvě dohromady. -Pes je nyní držen převážně uvnitř a když ven přijde, je to rychlé nebo majitel s ním jde ven. -Sousedství je nyní krásně tiché. -Opravit některé gramatické chyby -PSA: NEDÁVEJTE PSŮM ŽÁDNÉ LÉKY URČENÉ PRO LIDI, JAKO JSOU LAKTIVY NEBO NYQUIL. -To může vážně poškodit a dokonce i zabít zvíře. -Také je reproduktorový systém JYX, pokud by někdo měl zájem. -Bylo to pod 200 dolarů, ale jsem chudý, takže jsem musel trochu šetřit. -Zní to skvěle za ty peníze, i tak. -Jsem ohromen a stejně tak i moji sousedé. -Lavina na lyžařském středisku ve Washingtonu zabila 1 osobu a uvěznila 5. -Lavina se v sobotu prohnala částí lyžařského střediska ve Washingtonu, které se používá k přístupu do zázemí pro lyžování, a zabila 60letého muže a dočasně uvěznila pět dalších lidí. -Hlášení o lavíně bylo kolem 10:50 ráno v oblasti Silver Basin na Crystal Mountain, která se nachází asi 85 mil (137 kilometrů) jihovýchodně od Seattle, řekl Darren Moss, poručík oddělení šerifa Pierce County. -Identita muže, který zemřel, nebyla zveřejněna, ale úřady říkají, že po vytažení z sněhu již nebyl při vědomí a přestože se jiný lyžař snažil resuscitovat, nepodařilo se mu ho zachránit. -Ostatní lyžaři ve skupině se zachránili s pomocí dvou svědků, kteří je viděli, jak jsou unášeni sněhem. -Všichni měli na sobě lavinové vysílačky. -Zatímco všichni ti, kteří byli chyceni v lavíně, byli zkušení lyžaři v horském terénu, bylo vydáno varování proti lyžování v této oblasti, která byla právě uvnitř hranic Crystal Mountain Resortu. -Soukromé lyžařské středisko určuje podmínky, ale nic nebrání lyžařům, aby tam šli, protože pozemek sousedí s veřejnými pozemky v Národním lesním území Mount Baker-Snoqualmie. -Frank DeBerry, prezident a generální ředitel resortu, řekl, že všech šest mužů mělo pasy pro výstup na lyžařský areál, což znamená, že byli registrováni u lyžařské hlídky, účastnili se orientace, jak a kde přistupovat k lyžování v terénu na pozemcích resortu a byli povinni zkontrolovat sněhové podmínky před svou výpravou. -Lyžaři mohou cestovat kdekoli chtějí ve státním lese. -Oni vyšli do lesa, ale nakonec se vrátili do hranic (rezortu), kde se stala tato událost," řekl DeBerry. -Kromě uzavření oblasti, kde došlo k sesuvu, resort dříve v ten den uzavřel Mt. Rainier Gondola kvůli větru dosahujícímu rychlosti 100 mil za hodinu (161 kilometrů za hodinu). -Lavina přišla uprostřed prvního významného sněžení sezóny. -Národní meteorologická služba uvádí, že oblast je pod varováním před zimní bouří až do nedělního rána, s možností 12 až 15 palců (38 centimetrů) sněhu pro oblasti nad 2 000 stop (610 metrů). -Měli jsme pozdní start sezóny a teď jsme se dostali od téměř žádného sněhu k obrovské sněhové bouři. -"Lidé se vzrušili," řekl DeBerry. -Všichni si musíme pamatovat, že je to sport, který přináší riziko. -Crystal Mountain je největší lyžařské středisko ve Washingtonu, které zahrnuje 2 600 akru (1 052 hektarů). -Oriflame Optimals Hydra Radiance Hydratační denní krém + Hydra Radiance Hydratační noční krém - Normální/Smíšená pleť -Sestaveno se švédskou přírodní směsí červené řasy, hnědé řasy a vodních minerálů s vitamínem C a protiprachovou aktivní látkou. -Aqua Minerals udržuje pokožku hydratovanou a pružnou. -Hydratační denní a noční krém, který zanechává žíznivou pokožku jemnou, pružnou a svěží. -Sestaveno se švédskou přírodní směsí červené řasy, hnědé řasy a vodních minerálů s vitamínem C a protiprachovou aktivní látkou. -Karty hlavních platebních karet a online bankovní převody jsou vítány. -Okamžitá platba je po vítězném dražebním příhozu požadována. -Položka bude odeslána stejný den nebo následující den po obdržení plného platby. -Doba dodání je přibližně 10-21 pracovních dní (Špatné počasí může způsobit zpoždění dodání a může trvat déle než měsíc, než se dostane.). -Za dodatečný poplatek můžeme dohodnout expresní přepravu kurýrem 5-11 pracovních dní (India post parcel). -Nabízíme slevu na kombinované poštovné při nákupu dvou nebo více položek z našeho obchodu. -Jen se nás zeptejte kliknutím na "Zeptat se". -Mezinárodní zákazníci jsou zodpovědní za clo a daně ve své zemi. -Kupující je zodpovědný za náklady na zpáteční dopravu. -Refundační lze provést pouze v případě, že není k dispozici žádná náhrada. -Česky: Všechny poplatky za dopravu, manipulaci a pojištění nejsou vratné. -Nereimbursujeme náklady na dopravu. -Naší prioritou je 100% spokojenost zákazníka. -Dáváme důležitost našim zákazníkům a poskytujeme nejvyšší kvalitu zákaznického servisu. -Etika a integrita jsou nejlepší částí našeho podnikání a věříme v poskytování nejlepší kvality produktů a služeb za nejlepší ceny. -Navíc děláme jedním z našich hlavních cílů odpovídat na otázky co nejrychleji a co nejdříve. -Naší prioritou je 100% spokojenost zákazníka. -Cílem je poskytovat přísně 5-hvězdičkovou službu ve všech kategoriích. -Udržujeme 100% spokojenost zákazníků! -Váš zpětný odkaz je pro nás velmi důležitý. -Když obdržíte položku, prosím, zanechte nám pozitivní zpětnou vazbu. -Pozitivní zpětná vazba je velmi oceněna a my také zanecháme pozitivní zpětnou vazbu. -Pokud jste z jakéhokoli důvodu nespokojeni, nezanechávejte prosím střední nebo negativní zpětnou vazbu. -Dejte nám šanci a my dáme do toho všechno. -Rádi se rychle postaráme o problém a dáme vám uspokojivou odpověď. -Zkontroloval jsem to tady a zdá se, že jezdec tam šel. -Zkontrolovali jste svou předsíň nebo recepci? -On to tam mohl nechat. -Promiňte nám prosím způsobené potíže. -Promiňte nám tyto nepříjemnosti. -Je tu něco jiného, s čím bych vám mohl pomoci? -To je poprvé a doufám, že naposledy. -Mějte krásný zbytek dne a šťastný nový rok! -Už jsem to udělal několikrát, ale nefunguje to. -Vám jsem vystavil náhradu za knihu. -Jsem velmi nespokojený s řešením, co mám dělat, když se problém objeví znovu v další knize? -Je to zadní příležitost, že se to stane. -Promiňte, z důvodu kvality musím tento chat uzavřít, pokud neobdržím odpověď do 2 minut. -Zavřu tento chat teď, protože nebyla obdržena žádná odpověď. -Ještě jsem neviděl žádné komentáře od Australanů, takže bych mohl říct pár slov. -Je obtížné najít vybavení, které je jedinečné nebo mimo hlavní proud. -Většina desek jsou masové trhy desky, jako jsou sector 9 nebo Loaded Tan Tien ... Mám oba a nezlobím se. -Pokud chci něco neobvyklého, kupuji přímo od výrobce nebo prostřednictvím Muir. -Doprava je vždy problém a vždy je drahá. -Opravdu jsem chtěl Tortugu, ale když to bylo všechno zaplaceno, bylo to více než 500 AU dolarů (včetně poštovného a kurzu). -Doprava sama stála přibližně 100 USD. -Rozumím, že tohle není něco, nad čím máte kontrolu... Jen jsem chtěl ilustrovat úvahy a obchody, které se dělají z této strany světa. -Nakonec, milujte své desky! -Moc si vážím. -Volné vzdělávání na CompTIA A+ | Bude pokrývat celý kurz -Momentálně poskytuji zdarma výcvik na kurzu CompTIA A+. -Kurz se skládá z 18 modulů a budu dělat věnovatou videa na každý modul. -Některé z těchto videí mohou být trochu dlouhé, protože to bude celý modul v každém videu, takže prosím, využijte časových razítek v popisech, pokud hledáte pouze konkrétní témata nebo se chcete jen obnovit určitými tématy. -Časové razítka jsou tam, aby vám usnadnila život, takže je to vaše vlastní vina, pokud se nakonec proháníte moduly sem a tam jako šílenec, hledající své ztracené zuby. -Udělám 20 videí pro tento kurz, první bude jen 4minutové úvodní video vysvětlující kurz, poslední bude video s tipy na zkoušku a pak samozřejmě 18 videí mezi tím budou vaše moduly. -Trénink by měl být dostatečný k tomu, abyste prošli obě mezinárodní zkoušky pro A+, a ostatní kurzy, které poskytuji, by také měly být dostatečné k tomu, abyste prošli příslušné zkoušky, pokud existuje zkouška spojená s tím konkrétním kurzem. -Pokud máte otázku ohledně konkrétního tématu v modulu nebo o kurzu obecně, kterou byste chtěli více vysvětlit, neváhejte se zeptat a já se pokusím vám pomoci, pokud jsem online. -Zde je úvod do kurzu. -Česky: Úvod do kurzu CompTIA A+ -Nabízíme devět typů plakátů: -Prosím, vyberte požadovaný formát plakátu z rozevírací nabídky. -Poster jsou odeslány v pevném kartónovém obálce A5. -Když je 6x4" (10x15 cm) příliš malé, -Poster jsou odeslány v pevném kartónovém obálce A5. -Vysokokvalitní fotolaboratoř s lesklým finišem. -Vysoký lesk dodává život tisku, čímž se barvy zdají být živé a ostré. -Poster jsou odeslány v pevném kartónovém obálce A5. -Tisknuté na super-premiérové polomatné fotografické papíře poskytuje vysokou barevnou definici s omezenou reflexí přímého světla. -A3 plakáty jsou odeslány v kartonové trubici na plakáty. -Tisknuté na vysoce kvalitním fotopapíru o hmotnosti 280g super-premium semi-lesk, poskytuje vysokou definici barev s omezenou reflexí přímého světla. -A2 Plakáty jsou odeslány v kartonové trubici na plakáty. -Naše laminované plakáty A4 a A3 jsou pokryty plastem a mají na každé straně přibližně 2mm tenký průhledný plastový rámeček. -Oni nepřicházejí s rámem. -A4 rám může být zavěšený nebo stát volně. -A4 obrazy s rámečkem přicházejí s černým dřevěným rámečkem s skleněnou přední stranou. -Obrázky jsou v pevné kartónové obálce v krabici s rámem. -Pokud potřebujete tisk s nebo bez okrajů, nechte nám prosím zprávu. -Různé počítačové obrazovky, operační systémy a dokonce i různé webové prohlížeče mají různé barevné charakteristiky, takže je téměř nemožné, aby daná barva vypadala stejně na každé obrazovce. -Pokud barvy plakátů neodpovídají vašim očekáváním, pošlete nám prosím zprávu. -Ve většině případů to můžeme změnit, aby vyhovovalo vašim potřebám. -Czech: Je to běžná funkce, kterou zařízení má, pokud chcete ušetřit více energie, můžete provést tyto kroky: -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více dole na obrazovce. -Nastavení. -Ušetření energie a soukromí. -Klepněte na seznam vedle „Automaticky usnout po“ a vyberte čas, než se váš #PRS_ORG# eReader usne. -Čím kratší doba, tím delší výdrž baterie vašeho čtečky knih. -Klepněte na seznam vedle „Automaticky vypnout po“ a vyberte čas, než se váš #PRS_ORG# eReader vypne. -Čím kratší doba, tím delší výdrž baterie vašeho čtečky knih. -Vidím zde, že tento řidič dorazil na vaši lokalitu v 12:39. Snažil se doručit tuto objednávku do 12:52. -Jezdec se pokusil odejít z objednávky na bezpečnost, ale on to neakceptoval. -Proto byl objednávka vzata jezdcem, když jel. -Vámi zakoupená položka bude odeslána prostřednictvím Royal Mail nebo národní kurýrní společnosti. -Snažíme se odeslat zboží stejný nebo následující pracovní den v závislosti na čase nákupu po obdržení platby. -12.00 poledne je časový limit. -Nezpracováváme ani nedodáváme objednávky během státních svátků nebo víkendů. -Všechny objednávky odesíláme v souladu, ale v některých případech může být možné, že vámi zakoupená položka bude vyprodaná. -V tomto případě vás budeme informovat, ať už když bude zboží znovu skladem a připravené k odeslání, nebo abychom vám nabídli alternativní možnost. -Máte právo zrušit objednávku, pokud si to přejete. -eBay poskytuje odhadované datum doručení, které nezahrnuje žádné předpokládané zpoždění poštou/kurýrem. -Toto může zahrnovat špatné počasí, poruchu systému nebo stávky zaměstnanců atd. -Česky: Tyto problémy nejsou ve vší naší kontrole, takže prosím, mějte to na paměti. -Odesíláme zboží s očekáváním, že kurýři poskytnou službu, ale někdy se můžou zklamat a to nemůže být naše vina. -Pokud není kurýr schopen doručit, měla by být příslušná doručovací společností vystavena karta, která vysvětlí, jak uspořádat opětovné doručení nebo kde je balík pro vás uložen k vyzvednutí. -Pokud byla zásilka vrácena na depo kurýra, pak vám umožní určitou dobu pro její vyzvednutí. -Pokud nebude v tomto čase vyzvednuto, zásilka bude vrácena zpět k nám. -Potom bychom od vás vyžadovali, abyste nám uhradili náklady na znovuodeslání zásilky zpět k vám. -Pokud položka již není potřeba, bude vyplacena náhrada, a to o částku poštovného. -Pokud z jakéhokoli důvodu nebudete s nákupem spokojeni, můžete vrátit položku pro vrácení peněz do 30 dnů. -Vrácení jsou přijímána pouze v případě, že položka je v jejím původním prodejním stavu, což znamená, že položky nesmí být použity, noseny, označeny, nemít žádnou vůni, žádné chlupy zvířat nebo být v takovém stavu, že ji nelze znovu prodat. -Věci musí být vráceny v jejich původním balení s všemi produktovými štítky připojenými. -Prosím, buďte opatrní, když si oblečení zkoušíte, abyste nenosili make-up, vlasové produkty, parfémy, deodoranty nebo jiné krémy nebo látky, které by mohly produkt poškodit nebo zanechat na něm stopy. -To pouze vede k tomu, že váš vrácený produkt nebudeme akceptovat pro vrácení peněz. -Budeme po vás vyžadovat platbu za poštovné, abychom vám položku vrátili. -Budeme čekat na platbu poštovného za zboží po dobu maximálně 30 dní a po této době bude zboží zlikvidováno. -Položky musí být vráceny do 30 dnů od přijetí. -Pokud je položka doručena vadná nebo jsme odeslali špatnou položku, pak zaplatíme návrat položky. -Nejjednodušší cestou by bylo otevřít žádost o vrácení prostřednictvím eBbay. -Jakmile bude přijato, prohlédneme si položku a vrátíme vám peníze na účet. -Věci, které se po nošení poškodí, budou podrobeny kontrole při návratu k nám. -Pokud se závada považuje za skutečnou výrobní vadu, budete vám vráceny peníze. -Pokud se nejedná o výrobní vadu, bude vám položka po zaplacení nákladů na její vrácení zpět zaslána. -Znovu budeme čekat 30 dní na provedení této platby, po které bude položka zlikvidována. -Nejjednodušší cestou by bylo vrátit zboží pro vrácení peněz prostřednictvím vrácení Ebay a poté jednoduše zakoupit požadovanou velikost nebo barvu zpět od nás. -Všechny vrácení jsou zodpovědností odesílatele, dokud nedorazí k nám. -Prosím, získejte Potvrzení o zaslání od pokladního pošty. -Pozitivní zpětná vazba je vždy vítána, ale pokud z jakéhokoli důvodu nastane problém s vaším nákupem, dejte nám prosím šanci tento problém vyřešit. -Doufáme, že naše zákaznická služba bude pro vás velmi uspokojivá. -Děkuji za poskytnuté informace, doufám, že se máte dobře. -Prosím, nechte mě ověřit váš účet #PRS_ORG#. -Budu rád, že vám pomůžu. -Prosím, dejte mi chvíli. -Děkuji, že jste čekali, omlouvám se, že vaše matka nedostala dárkovou kartu, prosím, potvrďte email, který byl odeslán. -Bojím se, že nebudu schopný potvrdit cenu postele, dokud nebude znovu dostupná na webu, protože byla snížena v naší zimní slevě, je pravděpodobné, že to nebude cena článku, když bude znovu přidána na web. -Prosím, počkejte na objednávku a můžete nám nahlásit, pokud někdy jídlo není horké, takže vám můžeme pomoci. -Vím, že mít kredit nezlepší vaši zkušenost s jídlem, ale dovolte mi, abych kompenzoval zpoždění. -Makešup Munster porazil Wasps v zápase Champions Cupu -Munster převálcoval Wasps v napínavě chaotickém zápase Heineken Champions Cup, který se odehrával mezi náhradními týmy obklopenými problémy s Covidem a zraněními. -První poločas, který byl jako horská dráha, skončil s Munster vedením 13-7 poté, co kapitán Wasps Brad Shields byl kontroverzně vyloučen za nebezpečný zákrok na Dave Kilcoyne. -A s Danem Frostem, který byl vyloučen na půli hrací doby, bylo jejich 13 mužů na kolenou a Munster reagoval tím, že vybudoval přesné pokusy pro debutanta Patricka Campbella a Andrewa Conwaya. -Alfie Barbearyho všestranný výkon uzavřený úžasným pokusem dal Wasps naději, ale byl neočekávaně vystřídán ve druhé půli a od okamžiku, kdy Campbell ukázal svou třídu a skóroval ve 43. minutě, to byla jednosměrná ulice. -Chybí 17 hráčů kvůli zranění, Wasps se také museli vypořádat s dalšími čtyřmi ztrátami kvůli Covidu ráno, což vedlo k rychlému překonfigurování týmu. -Munster mezitím postrádali 34 členů týmu kvůli karanténě po jejich nedávné nešťastné cestě do Jihoafrické republiky na United Rugby Championship, což vytvořilo pět debutantů v základní sestavě a dalších sedm na lavičce. -Kriticky však byli přítomni hvězdy Irska jako Tadhg Beirne, Peter O'Mahony, Conor Murray a Keith Earls, aby posílili jejich řady. -Pro všechny, kteří chyběli, to byl příjemný zážitek, kde se zdálo, že se může stát cokoli, alespoň dokud Munster neukázal neuvěřitelnou hloubku svých herních zdrojů, aby se odpoutal. -Covid-ovým ovlivněným začátkem dne včel bylo ještě zhoršeno, když se hra rozběhla, když Thomas Young byl odmítnut jistým pokusem O'Mahonym excelentním pokrytím. -Joey Carbery poslal trestný kop mezi tyče, aby vyrovnal skóre pro Munster, ale Jimmy Gopperth trefil břevno a tak pokračovala smůla dvakrát vítězného týmu v jeho nešťastném úvodu. -Ale jejich skrum byl poskytován jako opěrný bod ve hře a vyžadovalo se horečnatou obranu, aby se jejich maul držel na uzdě, dokud nevyprodukovali první z dvou rychlých obratů. -Munster bojoval o každý míč v odporu svým absentérům a jejich vítězné naděje obdržely dramatický zisk, když byla Shieldsovi udělena jeho pochybná červená karta, s rozhodčím Romainem Poitem říkajícím, že jeho rameno se dotklo krku Kilcoyneho. -Carbery vystřelil jednoduchou penaltu na tyč a i když byl na cíl brzy poté, následovala dramatická změna, když Wasps vykradli úžasný try skrze Barbearyho. -Ukončilo to vzrušující období end-to-end rugby, ve kterém soupeři střídali útoky z hlubokého pole a Barbeary, který byl u srdce domácích útoků, zasadil rozhodující úder. -Murrayův odraz poskytl Earlsovi nejjednodušší pokus, když Munster zaútočil zpět, a pak Frost odešel do sin-binu, když domácí strana byla snížena na 13. -Nezabralo to dlouho, než se projevila výhoda v personálu, když Beirne zahájil útok, který skončil skvělým zakončením zadního hráče Campbella. -A Munster byli mimo zrak v 49. minutě, když se volný pas během slibného protiútoku dostal k Conwayovi, aby ho zvedl a dokončil jednoduchý běh. -Hooker Scott Buckley, muž zápasu při svém debutu v Munsteru, byl dalším z lineoutu a to byla hra vyhrána, i když Michael Le Bourgeois vybral kvalitní linku, aby zlepšil konečné skóre Wasps. -Velká kapsa a 1 kapsa na zip. -5 palce široký a 58 palců dlouhý nastavitelný kůží ramenní popruh -Taška vyrobená v prostředí bez zvířat a kouře. -Přirozeně opálené pouze se slunečnicovým olejem, bez použití barviv nebo chemikálií. -Unikátní vlastnosti ručně vyráběných kůží tašek - -Taška je vyrobena z pravého kůže z kozího plného zrna, zpracovaného a tmavěného pouze slunečnicovým olejem. -Každá vintage kůže taška je úplně přírodní a ručně vyrobený produkt, proto se barvy a dokončení mohou lišit od jednoho kusu k druhému. -Každá taška má jedinečný antikní kůže / lehce poškozený vintage kůže vzhled. -Části z několika kůží mohou být použity k výrobě jednoho kůží kabelky. -Takže může být nějaká variace ve vzoru a textuře různých částí tašky, což vytvoří úžasně jedinečný efekt. -Kvůli různým řemeslníkům a mohou být malé rozdíly ve stylu, konstrukce tašek je zobrazena na webových stránkách. -Podšívka může být světlejší nebo tmavší barvy než je ukázáno na obrázcích. -Prosím, napište nám, abyste zjistili aktuální barvu skladu. -Pravá kůže může mít velmi malé řezy / jizvy / značky. -To neznamená, že je poškozené. -Může tam být také viditelné záhyby v kůži. -Tyto funkce ukazují pravý původ našich kůží tašek a messengerů, tvoří součást tašky a neovlivňují její odolnost. -Čistá kůže může trochu vonět, když je čerstvá, ale vůně zmizí s používáním. -Prosím, nechte to na slunci a čerstvém vzduchu po několik dní. -Může existovat místní zvyky / clo, které jsou nám neznámé a mimo naši kontrolu. -Kupující jsou zodpovědní za CLOVEKOVÉ ZÁSOBY na místě určení. -Vidím, můžu mít vaši verzi softwaru čtečky? -Najděte verzi softwaru pro svůj eReader: -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více v dolním pravém rohu obrazovky. -Nastavení. -Klikněte na Informace o zařízení. -Vedle "verze softwaru" uvidíte číslo verze vašeho e-čtečky. -Wendy Rogers volá novozélandskému premiérovi "Leninem s vlasy" a tvrdí, že ve Spojených státech jsou "satanskí komunisté". -Republikánská arizonská senátorka Wendy Rogers v neděli nazvala novozélandskou premiérku Jacindu Ardern "Leninem s vlasy" a varovala před komunismem ve Spojených státech. -Rogers se zdálo, že kritizuje reakci Ardernové na COVID, když se ve svém Tweetu odvolala na sovětského vůdce Vladimira Lenina a přiložila krátký záběr premiérky. -Rogers ve svém Tweetu nevysvětlila svou kritiku Ardernové dále. -V klipu Ardern mluvila o dezinformacích ohledně COVIDu a o úsilí Nového Zélandu o to, aby lidé byli informováni o pandemii. -"Potřebujeme více odvážných křesťanů ve vládě, aby se postavili satanským komunistům ve všech stranách," napsal arizonský senátor v dalším tweetu v neděli. -Její tweet byl přijat s posměchem od různých uživatelů sociálních médií, s jednou osobou, která tweetovala zpět: "Prosím, nabídněte své definice komunismu a křesťanství, protože si myslím, že ani jedno nerozumíte." -"Vidím, že Wendy dnes jde naplno, snaží se soutěžit s nejšílenějšími ze šílených," napsal další člověk. -Rogers byla hlasitá o svém postoji proti komunismu na sociálních médiích dříve. -V září nazvala Den práce "komunistickým svátkem" bez dalšího vysvětlení. -Její tweet byl vysmíván mezi uživateli sociálních médií, včetně The Arizona House Democrats, kteří odpověděli: „Říká srdce a duše arizonšské republikánské strany (dokáž nám, že se mýlíme).“ -"Víš, že když budeš nadále falešně označovat všechno dobré za komunistické, jenom to komunismu dodá na atraktivitě?", zeptal se další uživatel sociálních médií. -Republikánská senátorka Wendy Rogers varovala před komunisty v Americe a vyzvala k tomu, aby ve vládě bylo více "odvážných křesťanů". -Spisovatel Shiv Ramdas také odsoudil tweet, parafrázováním jejích vlastních slov: "'pracovat je komunismus.'" -Rogers často navrhoval, že Donald Trump vyhrál prezidentské volby v roce 2020 a vyzval k novým volbám, odděleně. -Volám po tom, aby byli voliči Bidena v Arizoně staženi a aby bylo provedeno nové volby. -"Arizonaští voliči nesmí být podvodně uděleni..." senátor tweetoval v červenci. -V červenci Rogers kampanil za deklaraci volby a dříve spustila petici, kterou tvrdila, že získala 663 000 podpisů. -Věci se opravdu rozjíždí! -Dejme se na to, abychom dosáhli 1 milionu co nejdříve. -Výsledky auditu se brzy dostaví, více států se připojí, napsala v září na Twitteru. -Trumpův podporovatel také prosazoval odmítnuté tvrzení o podvodu s voliči v Arizoně. -Newsweek kontaktoval kancelář senátora Rogerse kvůli komentáři. -Jdi na svou domovskou obrazovku. -Klepněte na menu (3 vodorovné čáry) na ikonu Více dole na obrazovce. -Nastavení. -Účty. -Pod #PRS_ORG#, klepněte na Odhlásit se. -Potvrzovací obrazovka se objeví. -Odhlásit se. -Další věc, kterou můžete zkusit, je provést tovární resetování vašeho zařízení a poté zkontrolovat, zda jej váš počítač detekuje. -Provedení tohoto postupu, prosím, postupujte podle následujících instrukcí: -Pokud je to možné, zálohujte knihy nebo dokumenty, které jste přidali do svého čtečky knih pomocí #PRS_ORG#, nebo které jste ručně přidali pomocí počítače. -Nemusíte zálohovat žádné knihy, které jste si koupili od #PRS_ORG#. -Jakékoli knihy, které jste zakoupili od #PRS_ORG#, můžete po provedení továrního resetu znovu stáhnout z #PRS_ORG# #PRS_ORG#. -Jdi na svou domovskou obrazovku. -Klepněte na Domů v horní části obrazovky. -Nastavení. -Zobrazit informace o zařízení. -Čeština: Klepněte na Tovární obnovení v sekci Pokročilé. -Stiskněte Nyní Resetovat. -Stále jsem s tebou. -Dlouhé fronty na trička od Banksyho podporující protestující, kteří strhávají sochy. -Desperované davy se snažící koupit trička navržená tajemným uličním umělcem Banksym byly viděny v Bristolu ve Velké Británii. -Byli propuštěni, aby podpořili protestující, kteří byli souzeni za svržení sochy otrokáře během pochodu Black Lives Matter. -Banksy navrhl limitovanou edici "suvenýrů triček" k označení soudu čtyř lidí obviněných z poškození kontroverzní sochy v Bristolu minulý rok. -Umělec napsal na Instagramu: „Všechny výtěžky půjdou obžalovaným, aby si mohli dát pivo.“ -Prodává se za 25 liber ($33) s DPH a omezeno na jeden kus na osobu ve více obchodech, tričko bylo tak žádané, že lidé čekali v řadě kolem bloků, aby si ho mohli koupit. -Skoro dvouminutové video zveřejněné na Twitteru ukazuje nekonečnou řadu zákazníků. -Česká média informovala, že "tisíce" byly nadšené získat peníze pro protestující tím, že koupí šedou tričko, které zobrazuje prázdný piedestal s nápisem "Bristol" nad ním. -Odkazuje to na svržený bronzový pomník 17. století obchodníka Edwarda Colstona, který se podílel na transatlantickém otroctví. -Aktivisté, známí jako "Colston Čtyři", čelí soudnímu procesu na Bristolském korunním soudu příští týden, obviněni z páchání trestného poškození památníku patřícího městské radě. -Muži - kteří všichni vznesli nevinu - jsou obviněni z potopení sochy "bez zákonné omluvy". -Někteří ho oslavovali za to, že po své smrti odkázal peníze na různé charitativní účely, ale v červnu 2020, když se ve městě konala protestní akce podporující hnutí Black Lives Matter (BLM), byla jeho socha napadena. -Poškozený podstavec a pomalovaná socha byla později zachráněna městskou radou z Bristolského přístavu, kde byla během nepokojů hodena, a znovu se objevila jako místní muzejní exponát, spolu s vybranou kolekcí plakátů BLM z pochodu. -Socha BLM protestujícího byla postavena na prázdném piedestalu, který dříve zaujímal Colston. -Nemohu udělat žádné změny, jakmile je objednávka vyřízena, ale když jezdec opustí restauraci, budete moci s ním kontaktovat prostřednictvím aplikace. -Můžete také sledovat svého jezdce prostřednictvím aplikace a zavolat jim, jakmile jsou blízko. -Pro budoucí objednávky můžete svému řidiči přidat instrukce tím, že upravíte své uložené adresy v aplikaci. -Bohužel ceny položek jsou takové, jak jsou zobrazeny online, nemůžeme je pro vás změnit nebo snížit. -Časové prodlevy jsou uvedeny na webových stránkách. -Nemáme skladem žádné zboží, proto jsou všechny položky vyrobeny na objednávku, odeslané na nás sem na #URL# a poté odeslané na vás. -Prosim, dovolte tyto časové limity. -Časový odstup ukazuje, kdy přijde další dávka. -Rodina uctívá "energetického" osmnáctiletého, který byl v Birminghamu bodnut k smrti. -Rodina teenagera, který byl v Birminghamu bodnut k smrti, ho popsala jako "mladého, energického 18letého" který snil o tom, že bude specialistou na digitální marketing. -Yahya Sharif byl nalezen vážně zraněný na Coventry Road v Small Heathu, těsně před 17.30 hodinou v pátek, uvedla West Midlands Police. -Policie byla na místo přivolána záchrannou službou. -Přestože se záchranáři snažili sebevíc, bylo potvrzeno, že Yahya z Nechells zemřel na místě. -Postmortální vyšetření odhalilo, že zemřel na bodnou ránu do hrudníku. -Vydané prohlášení rodiny říká: "Nemůžeme uvěřit, že Yahya zmizel z našich očí." -Stále ještě nevíme, proč byl zabit. -Mladý, energický 18letý, jeho sen byl být specialistou na digitální marketing. -Celá komunita je šokována. -Ať je s rodinou, kterou opustil, zejména s jeho rodiči, Bůh. -Detektivové shromažďují záznamy z kamer a další důkazy, aby se snažili sestavit, co se stalo, a identifikovat a vypátrat, kdo bodl teenagera. -Detektivní inspektor Hannah Whitehouse z oddělení vražd řekla: "Yahya bylo jen 18 let a měl před sebou celý život." -To bylo teď odebráno v nejtragičtějších okolnostech. -Neexistuje žádný jasný motiv pro útok a my pracujeme na plný úvazek, abychom identifikovali a vypátrali, kdo byl za to zodpovědný. -Mluvili jsme s několika svědky, ale stále potřebujeme slyšet od každého, kdo má informace, které nám mohou pomoci. -Žádám ty, kteří tam byli v tu dobu, aby udělali správnou věc, přišli a mluvili s námi a řekli nám přesně, co se stalo a proč. -To je nejméně, co rodina Yahyovi zaslouží. -Kdokoli s jakoukoli informací by měl zavolat na číslo 101, uvádějící referenční číslo 3643 10/12/21. -Rozumím, ale můj koleg vysvětlil včera, že musíme být v kontaktu s skladem, to bylo uděláno pro vás - takže čekáme na odpověď. -Jakmile budeme mít informace, můžeme vám pak sdělit, kde se nachází vaše objednávka. -Položka měla být odeslána 18.12. -Změňte nastavení písma pomocí nabídky dole. -Změňte styl písma: Klepněte na rozevírací nabídku vedle „Písmo“, abyste vybrali ze seznamu dostupných písem. -Česky: Upravte velikost písma: Přetáhněte kruhovou ikonu vedle "Velikost písma" pro změnu velikosti textu. -Česky: Upravte mezery mezi řádky: Přetáhněte kruhovou ikonu vedle "Mezery mezi řádky" pro zvýšení nebo snížení mezery mezi řádky písma. -Upravit okraje: Přetáhněte posuvník vedle Okrajů, abyste okraje zvětšili nebo zmenšili. -Nastavte zarovnání textu: Vedle "Zarovnání" vyberte svou volbu zarovnání. -Když změníte způsob, jakým vypadá text, váš čtečka knih si zapamatuje vaši oblíbenou velikost a styl a aplikuje je na ostatní knihy, které čtete. -Pokud čtete PDF, nemůžete změnit velikost ani styl textu. -Zmeškali jste Shiba Inu? -EverGrow může být další velkou kryptoměnou, která exploduje v roce 2022. -Shiba Inu je nejnovější meme-krypto, které se stalo virálním, a přestože jeho hodnota klesla o téměř 60 % od svého historického maxima, jeho tržní kapitalizace stále činí ohromujících 20 miliard dolarů, čímž se stává 12. největší kryptoměnou na světě podle hodnoty. -Investice ve výši 100 dolarů při spuštění by dnes byla v hodnotě více než 2 miliony dolarů! -Mnoho lidí jistě lituje, že propásli takové zisky, ale realita je taková, že sázka na Shiba Inu byla čistou hrou. -Shibaův běh byl kombinací velmi chytrého marketingu a velkého množství hype, které vedly houfy investorů s FOMO (strachem z toho, že něco přijdou o) k nákupu memové mince. -I sama název, který byl hold Elona Muska podporovanému Dogecoinu, byl součástí designu. -Ve skutečnosti Shiba Inu nenabízí žádnou hmatatelnou užitečnost ani hodnotu a zdá se, že se o to v budoucnu moc nesnaží. -Být na blockchainu Ethereum, bylo by tu spousta příležitostí pro vývoj, kdyby tým za Shiba Inu byl motivován to udělat. -Existují však některé kryptoměny, které se snaží vyniknout a podpořit svou oblíbenost skutečnou užitečností a základní hodnotou. -Před pouhými 10 týdny byla spuštěna měna EverGrow Coin ($EGC) týmem zkušených odborníků z oblasti financí, blockchainu a marketingu. -Jedním z převratných prvků jejich projektu je skutečnost, že token platí držitelům stabilní měnu. -Od spuštění se držitelé EverGrow Coin dočkali více než 30 milionů dolarů odměn v binance-pegged USD - stabilní, regulovanou měnu, která je 1-k-1 s USD. -Projekt podle BSCScanu aktuálně má 110 000 držitelů. -Se svou revoluční smlouvou se EverGrow Coin rychle zvýšil na více než 1 miliardu dolarů v tržní kapitalizaci, ale pak se na CoinMarketCap objevila velká chyba dat, jen týdny po spuštění, což způsobilo masovou paniku mezi investory. -S takovým novým projektem může trvat dlouho, než se vybuduje důvěra, a tento panický stav byl využit řadou článků, které se údajně platily od rivalů projektu, kteří používali nesprávná data k tomu, aby odradili investory EverGrow od projektu. -Během příštího měsíce zůstaly chyby neopraveny a hodnota společnosti EverGrow klesla pod 300 milionů dolarů. -Včera CoinMarket Cap umístil na stránku EverGrow upozornění, potvrzující, že chyba v datech byla opravena. -Cena se nyní stabilizovala a znamení návratu důvěry vidělo zvýšení o 22% od nedávných minim. -Nicméně EverGrow stále zůstává pod vysokými hodnotami dosáhnutými před touto chybou. -EverGrow je velmi odlišný od Shiba Inu. -Kromě zřejmých výhod odměn USD tým za projektem již spustil SWAP dApp na své webové stránce, nedávno odhalil nadcházející vydání Crypto Peněženky, která slibuje překonat funkce nabízené Trust Wallet nebo Safemoon Wallet a má celou řadu nástrojů, od platformy pro tvorbu obsahu po NFT Market Place & Lending, které mají přinášet trvalou hodnotu investorům. -Je EverGrow Coin další Shiba Inu? -S Shiba Inu, která nabízí velmi málo nebo žádnou užitečnost, hodnocenou kolem 66krát více než EverGrow Coin, je tu jasný argument pro inovativní a převratný projekt jako je EverGrow, aby viděl nějaký vážný růst od jejich současné nízké tržní kapitalizace. -Pokud tým bude nadále imponovat krypto komunitě svou inovací a transparentností a dokáže odstranit strach mezi investory způsobený chybami CoinMarketCap, je dobrá šance, že EverGrow Coin bude jedním z nejlepších kryptoměn, do kterých se v roce 2022 vyplatí investovat. -Irán hlásí nejnižší počet denních případů COVID-19 za více než jeden rok. -Česká ministerstvo zdravotnictví zaregistrovalo 1 686 nových denních případů COVID-19, což je nejnižší počet za posledních 460 dní, což představuje výrazný pokles případů, jak se pátá vlna pandemie uklidňuje. -Podle Press TV oznámila ministerstvo v sobotu, že 58 Íránců zemřelo na nemoc, a uvedlo, že z nově zjištěných případů bylo 286 pacientů hospitalizováno během posledních 24 hodin. -Česky: Uvedlo se, že v zemi se nakazilo 6 152 524 lidí COVID-19 a 5 963 373 z nakažených lidí se uzdravilo a bylo propuštěno z nemocnic. -Podle ministerstva jsou 3 126 pacientů s COVID-19 v jednotkách intenzivní péče (ICU) a dosud bylo v Iránu provedeno 39 951 481 diagnostických testů. -Čísla koronaviru klesala od chvíle, kdy vláda zahájila masovou vakcinaci. -Dosud obdrželo první dávku vakcíny proti COVID-19 58 595 066 lidí, 49 157 835 obdrželo druhou dávku a 2 237 841 obdrželo očkovací dávky. -Celkový počet vakcín podaných v zemi dosáhl 109 990 742 dávek. -Za posledních 24 hodin 19 provincií hlásilo téměř žádný případ úmrtí nebo pouze jedno úmrtí. -Podle nejnovějších údajů jsou osm měst ve oranžových zónách, 119 ve žlutých kategoriích a 321 měst ve modrých zónách. -Ve vysokém rizikovém červeném pásmu není žádné město. -Iranští první viceprezident Mohammad Mokhber řekl ve středu, že země je plně připravena na rozšíření očkování proti koronaviru. -Mokhber dále dodal, že dnes neexistuje žádný strach ani nedostatek v dodávkách vakcíny a že je připraveno pro třetí a čtvrtou dávku očkování. -Čteš knihy na čtečce #PRS_ORG#, že? -Na stejném čtečce klepněte prosím na opravu účtu. -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Menu v horní části obrazovky. -Nastavení. -Zobrazit informace o zařízení. -Kromě opravy vašeho účtu #PRS_ORG#, klepněte na Opravit. -Opravte nyní. -Budu více než šťastný, abych vám pomohl. -Prosím, dejte mi pár okamžiků na ověření vašich informací. -V tomto případě můžete zkusit připojit zařízení pomocí různých USB kabelů. -Jakýkoli obecný mikro-USB kabel by měl fungovat. -Také prosím zkuste použít různé USB porty ve vašem počítači. -Děkuji, chvilku prosím. -Objednal jsem vám náhradní položku, která bude odeslána 19. února. -Teď ti jenom připravím štítek na vrácení. -Tohle vybledne, když je vystaveno jakémukoli kritickému myšlení. -Nepřípustím, že existuje obrovské množství lidí, kteří neprovádějí kritické myšlení, ale ať už je to jakkoli, dokázat, že je to špatné, není žádnou zárukou, že to zmizí. -Po všem, už jsme měli forenzní audit a ruční přepočítání těchto hlasů a to nepomohlo. -Měli bychom jim prostě dál dovolit "auditovat" hlasy, dokud nedosáhnou výsledků, které chtějí? -Toto umožňuje Uri Gellerovi zkoušet své triky na Jamesi Randim. -Tady je to, kde příběh končí a lež jde zemřít. -Ne, není. -To je Uri Geller, který se snaží uplatnit svou hru na Jamese Randiho, nelíbí se mu výsledky a najímá společnost, jejíž ředitel prohlásil, že věří v Gellerovy schopnosti, aby "studovali" jeho schopnosti a zkoumali, zda Randi není komunista, který se snaží Gellera zničit. -Pokud zde nebudou žádné výsledky, požádají o další audit. -Nebo budou tvrdit, že rozřezané hlasovací lístky byly podávány kuřatům, která byla poté spálena. -Na nějakém bodě musíte udělat skutečnou práci zkoumáním reality a porovnáváním toho, co si myslí, a ukázat, kde se mýlí. -Už jsme to udělali. -Dvakrát. -Nezastavilo je to. -A není to jako by to bylo neškodné. -Jsou již obvinění, že tito lidé porušují federální zákon tím, že nezabezpečují hlasovací lístky správně. -Také zmíněno v tom článku: Tato společnost plánuje fyzicky prozkoumat části Maricopa County, aby se zeptala lidí, zda jejich hlasy odpovídají. -Jak byste se cítil/a, kdyby někdo přišel a zeptal se vás, koho jste volil/a v posledních volbách, vědět, že pokud jim to neřeknete, vaše hlasování může být označeno a zahrnuto? -Jak jste si jisti, že tato společnost bude data uchovávat v tajnosti a neumožní ostatním ve vaší komunitě, aby se o nich dozvěděli? -A byste byli stejně pravděpodobní hlasovat, kdybyste věděli, že to je možnost každou chvíli? -Vím o spoustě lidí, kteří by to nedělali. -Všechny naše komiksy jsou jako standardní odesílány v sáčcích. -Navíc se obvykle také zařadí starší položky. -Novější položky jsou zabaleny pouze. -Kromě výše uvedeného zboží máme skladem více než 250 000 komiksů, včetně starších i nových položek. -Všechny naše komiksy jsou dodávány z našeho skutečného obchodu, což nám umožňuje nabídnout obrovský sortiment komiksů prostřednictvím aukce. -Měli bychom to, co hledáte! -Pokud objednáváte více položek, požádejte prosím o fakturu s přesnou částkou PŘED zaplacením. -Tato položka je původní americký komiks a je v angličtině! -Prosím, vezměte na vědomí, že všechny naše komiksy ve všech stupních (I V NEJHORŠÍM STAVU) budou kompletní, pokud není v seznamu uvedeno jinak! -Prosím, vezměte si čas na prohlédnutí obou přiložených skenů obálky a podrobného popisu stupně kvality nahoře, abyste se ujistili, že tento konkrétní komiks je ve stavu, který požadujete. -Většina našich nabídek nabízí slevy za nákup více kusů. -Obvykle začíná od 3 nebo více položek, aby získali slevu. -Položky mohou být LIBOVOLNOU kombinací LIBOVOLNÝCH POLOŽEK zahrnutych do Multi-Buy. -Nemusí to být více kopií stejného předmětu. -Jen vyberte požadovanou celkovou množství a automaticky dostanete slevu na všechny! -Některé z našich položek zahrnují možnost umístit nejlepší nabídku. -Pokud je možnost Nejlepší nabídky, budeme zvážit jakoukoli rozumnou nabídku. -Neuvádíme žádné komiksy jako stav Mint. -Podle našeho názoru tato známka neexistuje. -Komiksy jsou masově vyráběné papírové položky, které jsou často zacházeny s malou péčí, než dokonce dorazí do obchodu nebo novinového stánku, aby byly nabídnuty k prodeji. -Každý komiks, dokonce i nový, bude mít nějakou formu drobného defektu, pokud se podíváte pořádně přes lupu. -Pokud jste odhodláni najít dokonalost komiksu nebo výsledky garantované CGC, nejlepší bude, když si komiksy před licitováním osobně prohlédnete ve našem obchodě! -Velikost úložiště dokumentů a dat Apple Music -Nedávno jsem přešel z iPhone 12 Pro na 13 Pro Max a na obou iPhoních jsem si všiml chyby, která spotřebovává můj interní úložiště. -Apple Music Documents a Data používají asi 35 GB vnitřního úložiště. -Zkusil jsem to opravit smazáním aplikace, ale protože se jedná o výchozí aplikaci, dokumenty a data nikdy z iPhoneu úplně nejsou smazány. -Myslel jsem si, že když se přesunu na nový iPhone 13 Pro, chyba zmizí, ale to nebyl případ. -Po instalaci z iCloud zálohy jsem zkontroloval aplikaci Apple Music a stále používala více než 30 GB pro dokumenty a data. -Po kontaktování dvou specialistů na podporu Apple, jeden navrhl, abych vyčistil můj iPhone a začal znovu, zatímco druhý nenabídl žádné skutečné návrhy, protože problém přesahuje to, co mohou udělat. -Také jsem zkontroloval můj iPad a zdá se, že AM používá pouze 15 GB pro dokumenty a data na něm, ale to stále není přijatelné. -Teď se obracím na komunitu, abych zjistil, jak rozšířený je tento problém, a možná získal pozornost Applu k tomuto problému. -Zkusil jsi to taky? -Můžete prosím odpojit svůj eReader od počítače a zkusit tovární reset? -Toto smaže informace na vašem čtečce knih, ale můžete si je zálohovat a později je znovu přenést. -Můžete následovat tyto kroky: -Chcete-li provést tovární reset vašeho #PRS_ORG#, postupujte podle následujících kroků: -Jdi na svou domovskou obrazovku. -Klepněte na Domů v horní části obrazovky. -Nastavení. -Zobrazit informace o zařízení. -Čeština: Klepněte na Tovární obnovení v sekci Pokročilé. -Stiskněte Nyní Resetovat. -Tento sadu makro rozšířených trubek může transformovat váš objektiv na makro objektiv. -Sada se skládá ze tří trubek různých délek, které lze použít v jakékoli kombinaci nebo samostatně k získání různých zvětšení. -Tři samostatné prsteny lze použít zvlášť s tělem fotoaparátu a adaptérem objektivu a samozřejmě bude odlišný poměr zvětšení. -Máte 8 řad různých kombinací. -Trubky prodloužení jsou kovové trubky s objektivovým závěrem na jednom konci a tělovým závěrem na druhém konci. -Sada rozšíření trubice nemá vliv na kvalitu obrazu, protože uvnitř není žádná optika. -Žádný elektronický kontakt a automatické zaostření nemůže být provedeno. -Expozice a zaostření musí být nastaveny ručně. -Nastavte fotoaparát a objektiv do manuálního režimu, vypněte a odpojte objektiv. -Připojte prodlužovací trubici mezi fotoaparát a objektiv. -Umístěte předmět blízko objektivu a použijte hodně světla. -Když jsou trubky připojeny, musíte všechno provést ručně. -A je důležité, abyste používali hodně externího světla. -Pokud to neuděláte ve světlém prostředí, můžete mít potíže vidět objekt skrz zrcátko. -Proto můžeme zboží odeslat okamžitě a co nejdříve po jeho nákupu. -Musíte platit prostřednictvím systému Paypal. -Všechny bankovní karty uvedené níže jsou akceptovány. -Pro zákaznickou pohodlnost a rychlejší doručení jsou k dispozici tyto možnosti: -Královská pošta 1. třídy s podpisem (1 pracovní den) pro velké a drahé zboží -Královská pošta sledovaná 24 (1 pracovní den) pro velké a drahé zboží -Královská pošta mezinárodní sledovaná pro velké a drahé zboží -Královská pošta mezinárodní podepsaná pro velké a drahé zboží -Ujistěte se, že vaše objednávka obsahuje správnou adresu pro doručení. -Přijímáme vrácení zboží do 60 dnů od data, kdy jste obdrželi nákup. -Zákaznická spokojenost je pro nás velmi důležitá. -Pokud máte s vaší objednávkou nějaký problém, kontaktujte nás a uděláme vše pro to, abychom vás uspokojili. -Prosím, nezanechávejte negativní zpětnou vazbu. -Garantujeme, že váš problém bude rychle vyřešen. -Pokud jste spokojeni se svým nákupem, prosím, zanechte nám pozitivní zpětnou vazbu. -Váš zpětný odkaz je pro nás velmi důležitý. -Budeme vám dávat pozitivní zpětnou vazbu. -Pokud máte jakékoli otázky, neváhejte nás kontaktovat prostřednictvím systému e-mailové komunikace eBay. -Budeme se snažit odpovědět co nejdříve během 24 hodin. -Doufáme, že nám dáte šanci zlepšit naši službu a vyřešit jakékoli problémy, které byste mohli mít. -Vidím to pořád ve svém oboru práce. -A nemusí to být ani otázka života nebo smrti, aby to bylo frustrující. -Měli jsme nedávného pacienta, který potřeboval velmi specifický zákrok na koleno, aby mohl normálně chodit a zlepšit kvalitu života. -Peer to peer selhal. -Pojišťovna říká, že to není lékařsky nutné. -Voláme o pomoc. -Říkají znovu ne. -Jdeme se odvolat k třetí straně. -Podáváme všechny relevantní medicínské výzkumy podporující potřebu postupu. -Dokonce jsme zahrnuli i druhý názor jiného chirurga mimo našeho programu — ano, doporučuje postup. -O 24 hodin později nám odpověděli definitivním "Ne". -Nezbytné z hlediska lékařského. -Můj chirurg se rozčílí a říká: "DOBŘE!" -Ale ty mi budeš říkat, který postup bys doporučil, protože neznám žádný jiný, který by pomohl tomuto chudákovi dítěti. -Samozřejmě, oni ne. -A tento kluk je v průšvihu. -Žádná jiná možnost. -Jak se ukázalo, tento postup je obecně nenáviděn pojišťovnami, protože je poměrně drahý. -Vždycky musíme bojovat o to, ale obvykle souhlasí po odvolání. -Tentokrát ne. -Systém je tak rozbitý. -Na vaší webové stránce nebylo nic o tak dlouhé době dodání. -Při objednávce je to do uvedeného časového limitu. -Časové prodlevy jsou uvedeny na webových stránkách. -Nemáme skladem žádné zboží, proto jsou všechny položky vyrobeny na objednávku, odeslané na nás sem na #URL# a poté odeslané na vás. -Přesuňte přívěs! -Před lety jsem pracoval ve dřevěné dílně. -Šel jsem na instalaci s majitelem a když jsme se vrátili, zaparkoval prázdnou přívěs blízko popelnice. -Žádný zvláštní důvod, tam bylo jen místo, takže to je tam, kde to nechal. -Ráno přišel do práce a Jerry (ne jeho skutečné jméno) přišel ke mně, vypadající naštvaně kvůli něčemu. -Nic nového, vždycky byl trochu zamračený starý chlap. -Rozmluva probíhala něco podobného níže (před 18 lety, takže si to přesně nepamatuji). -Jerry: Parkoval jsi tu přívěs u popelnice? -Ne, včera jel majitel. -Jerry: Nemůžeš tam zaparkovat tu přívěs, pak se nemůžu dostat k popelnici! -Já to tam nezaparkoval, majitel ano, ale můžu to přemístit. -Jerry: Nevím, proč bys tam ten přívěs parkoval. -Víte, že potřebujeme přístup k popelnici. -Ale já to tam nezaparkoval. -Proč se s majitelem o tom nerozhlížíš? -Jerry: blah blah blah tvá vina, čert vem ty děti, žádný respekt, blah blah blah -Já to nebyl. -Rozhovor pokračoval v tomto směru několik minut, s ním mě kritizoval za to, že jsem nechal přívěs v tom způsobu, ve kterém jsem ho nechal. -Od toho dne až do doby, než jsem odešel z toho dílny o několik let později, kdykoli jsem pracoval pozdě (což bylo častěji než obvykle) a 5x8 přívěs byl v dílně, vzal jsem jazyk a přitáhl ho až k popelnici, aby ho Jerry našel ráno. -Prosím, navštivte následující odkaz a postupujte podle kroků k vytvoření nového hesla. -Dejte mi vědět, jestli jste byli schopni vytvořit své nové heslo a přihlásit se s ním. -Rozumím, mohl byste prosím zkontrolovat, jestli se ebook dá otevřít? -Dokázal/a jsi najít elektronickou knihu? -Kvůli nereagování a z důvodu kvality musím ukončit tento chat, neváhejte nás kontaktovat ohledně jakéhokoli dotazu nebo požadavku. Rádi vám s tím pomůžeme. -Mějte krásný den, Na shledanou! -V neděli nejlepší: Vstupte do New Yorku 1880s v HBO "The Gilded Age". -Varování: klobouk a slunečník! -Seriál "Zlatá éra", který vytvořil Julian Fellowes ("Downton Abbey") a napsal Fellowes a Sonja Warfield, má premiéru příští měsíc na HBO. -Nastaveno v New Yorku 1880s, sleduje Marian Brook (Louisa Jacobson, vlevo nahoře) a nadějnou spisovatelku Peggy Scott (Denée Benton, vpravo) jak se nově setkávají s starými penězi společností. -Také ve obsazení jsou Christine Baranski, Cynthia Nixon, Carrie Coon a Morgan Spector, mezi mnoha dalšími. -Dobrá zábava na zimu, že? -Kostýmy, které vypadají, že jsou opravdu bohaté, jsou navrženy Kasií Walickou-Maimone, jejíž předchozí práce zahrnovala "The Goldfinch", "A Quiet Place" a "Moonrise Kingdom". -"Gilded Age" začíná streamovat na HBO Max 24. ledna. -Jižní Afrika vzdává hold poslednímu apartheidnímu vůdci De Klerkovi. -Jižní Afrika v neděli vyjádřila oficiální uznání FW de Klerkovi, poslednímu prezidentovi bílé vlády, který osvobodil Nelsona Mandelu z vězení a vedl zemi z apartheidu do demokracie. -De Klerk zemřel 11. listopadu ve věku 85 let po boji s rakovinou. -Bylo vyhlášeno čtyři dny národního smutku ve jeho čest. -Sloužil jako prezident od roku 1989 do roku 1994 a je nejvíce zapamatován pro vedení přechodu Jižní Afriky od bílé většinové vlády k prvním víceročním volbám v roce 1994. -De Klerk také sdílel Nobelovu cenu míru s Mandelou v roce 1993 po jeho osvobození z vězení v roce 1990. -Mandela se poté stal prvním černým prezidentem Jižní Afriky po vítězství jeho strany Africké národní shromáždění v roce 1994 ve volbách. -Prezident Cyril Ramaphosa navštívil v neděli ráno protestantskou Groote Kerk v Kapském Městě - jednu z nejstarších jihoafrických církví -, aby vzdal hold De Klerkovi ve formě eulogie. -Elita Georgiadis, vdova po De Klerkovi, řekla přibližně 200 účastníkům: „Často byl nesprávně pochopen kvůli jeho přehnané správnosti.“ -Nikdy nezapomenu na tohoto muže, který mě uchvátil, který mě donutil chtít mu pomoci dosáhnout této obrovské úlohy před ním. -Soukromá mše a národní hymna předcházely slavnosti, které zahrnovaly portrét De Klerka mezi dvěma svíčkami a sbor ozdobený bílými květinami. -Přestože měl De Klerk pozitivní pověst v zahraničí, v Jižní Africe rozdělil názory a jeho smrt vyvolala smíšené reakce. -Kritici říkají, že zůstal nerozlučně spojen s trestnými činy z období apartheidu a mohl by za ně být odpovědný, kdyby déle žil. -De Klerk zastupoval Národní stranu, která v roce 1948 formálně zavedla rasovou segregaci a odepření volebního práva většině ne-bílých obyvatel Jižní Afriky. -Venku před kostelem držela malá skupina protestujících cedule s nápisy "Spravedlnost odmítnuta" a "Spravedlnost pro oběti apartheidu" a byli rychle odvedeni policií. -Okolí bylo uzavřeno pro dopravu a pod silnou ochranou. -Komentáře v jeho posledních letech také poškodily obraz De Klerka v důsledku kritiky za jeho selhání omluvit se oficiálně za zločiny apartheidu. -V roce 2020 popřel, že apartheid je zločin proti lidskosti, než své prohlášení stáhl a omluvil se. -De Klerkovy nadace vydala pohřební video, ve kterém se omlouvá "za bolest, zranění, ponížení a škody, které apartheid způsobil" ne-bílým obyvatelům Jižní Afriky. -Pro Vaši informaci, pošlu Vám přepis naší konverzace. -Pokud budete mít další otázky nebo obavy, můžete vždy odpovědět na tento e-mail a my vám budeme moci dále pomoci. -Naše soustředěná kombinace oddanosti a odbornosti přináší našim zákazníkům výhody. -Norton předčil konkurenci ve většině renomovaných srovnávacích testech a pouze Norton získal PC Magazine Editors’ Choice Award 34krát, včetně 11 let po sobě – více než jakákoli jiná bezpečnostní společnost. -Co to pro tebe znamená? -Když si koupíte Norton Security, dostanete jeden z nejlepších bezpečnostních produktů na trhu dnes. -Zahrnujeme záruku ochrany, kterou může poskytnout pouze Norton. -Jsme tak sebevědomí ve své schopnosti vás udržet v bezpečí, že nabízíme záruku vrácení peněz: Pokud se na vašem počítači nebo Macu objeví virus, který naši odborníci Norton nemohou odstranit, vrátíme vám peníze*. -S Norton Security Deluxe můžete rychle a snadno zabezpečit své zařízení. -Norton Security Deluxe poskytuje jednoduchý pohled, který podrobně popisuje stav ochrany vašeho zařízení. -Z jednoho ovládacího panelu můžete sledovat nastavení zabezpečení a ochrany identity a dokonce si prohlédnout historii skenovaných souborů a analyzovaných stahování. -Norton Security Deluxe zahrnuje přístup k odborné pomoci od certifikovaných techniků Nortonu online. -Pokud budete potřebovat pomoc kdykoli, naši podpůrní zástupci jsou připraveni vám pomoci 24 hodin denně, sedm dní v týdnu. -Aktivujte se, zaregistrujte se online a uložte si své fakturační údaje do svého účtu Norton. -Automaticky se obnovuje každý rok, pokud není obnovení před dnem, kdy budete účtováni v my.norton.com nebo kontaktováním podpory Nortonu, zrušeno. -Obnovení předplatného je účtováno za cenu obnovení nalezenou na norton.com/pricing. -Cena je podmíněna změnou, ale před fakturací je odesláno upozornění e-mailem. -Podle politiky zrušení a vrácení peněz společnosti NortonLifeLock po aktivaci můžete smlouvu zrušit a požádat o plnou náhradu do 60 dnů od nákupu a pro každé roční obnovení do 60 dnů od účtování. -Předplatné začíná po online aktivaci. -Pro spuštění služby stáhněte/nainstalujte na každé zařízení a/nebo dokončete nastavení. -Aktualizace a funkce mohou být přidány, upraveny nebo odstraněny v souladu s licenční smlouvou a služební smlouvou. -Sběr dat, ukládání a používání k účelům správy a obnovení předplatného podléhajících Globálnímu prohlášení o ochraně soukromí společnosti NortonLifeLock. -Vrhněte se do hlubokého příběhu uvězněného v rozsáhlém světě Black Desert, který čeká na to, aby byl objeven. -Společně s Černým duchem, společníkem, jehož osud je propletený s jejich vlastním, hráči odhalí tajemství Černých kamenů a historii jejich korumpujícího účinku. -Hráči si užijí dechberoucí grafiku s neuvěřitelnou úrovní personalizace postav ve 19 třídách postav. -Každá třída nabízí intuitivní dovednostní boj, vybavený sadou unikátních dovedností, které lze volně kombinovat do vzrušujících a účinných kombinací, které vás vždy drží na nohou. -Černé pouštní Prestige Edition je živý svět MMORPG s hodnotou $140 bonusového obsahu. -Zažijte rychlé akční boje, lovte monstra a obří bosse, bojujte s přáteli ve gildě o ovládnutí uzlů a hradů a trénujte různé životní dovednosti, jako je rybaření, obchodování, tvoření, vaření, plachtění a mnohem více! -Robustní nástroje pro tvorbu postav - Vytvořte si postavu, kterou chcete hrát. -Bezproblémový pohyb po celém světě - Žádné časové prodlevy nejsou nutné, jak se prozkoumáváte. -Kombinovaný, nezaměřený boj - Účastněte se rychlého a akčního boje s dovednostmi, které lze spojovat do komb. -Unikátní počasí a klima - Počasí a klima budou mít různé účinky na různé zóny, na které hráči mohou přizpůsobit. -Cyklus dne/noci - Spolu s unikátními změnami počasí a klimatu se hra točí kolem cyklu dne/noci, který mění chování NPC a spouští různé události v závislosti na čase dne. -Instancované hráčské bydlení - Od stanů až po paláce a všechno mezi tím, hráči mohou zařídit a přizpůsobit si své vlastní domovy a mohou najmout NPC aby udržovali jejich místo čisté nebo nakupovat věci na trhu. -Boj na koni - Využijte své důvěryhodné jezdce na bojišti a využijte jejich pohyblivosti a účinnosti v boji. -Pamatujte si však, že koně budou potřebovat péči, ubytování a ochranu, protože mohou zemřít v boji. -Boss Hunts - Skup se se svými přáteli nebo ostatními hráči a lovte pole bossy a světové bossy, abyste získali tu vzácnou loot. -Obležení - Masivní volné pro všechny bitvy gildy! -Připojte se k gildě a účastněte se denních node wars nebo týdenních conquest wars proti mnoha dalším soutěžícím gildám. -Vyhrajte uzel nebo hrad a reklamujte jej po dobu jednoho týdne, abyste mohli sbírat daně a zvýšit fondy vaší gildy. -Vyrobte si loď a vyplujte na rozsáhlé oceány, abyste rybařili, lovili mořské monstra a bosse, prozkoumávali pod vodou a sbírali, plnili úkoly, obchodovali a mnohem více. -Těžení a chov - Chytit a ochočit divoká koně a slony ve volné přírodě, aby se stalo vaším hřebcem. -Můžete také chovat koně pro lepší potomky s vylepšenými statistikami a dovednostmi jízdy. -Řemesla - Užívejte si všechny aspekty řemesel v Black Desertu, od nástrojů, zbraní, brnění, šperků, lodí, kostýmů, oblečení a dalšího. -Všechno skoro lze vyrobit ve světě Black Desert. -Profese - Vezměte si účast a rozvíjejte svou postavu do profese, která může pomoci vašemu příjmu. -S profesemi jako sběr, zpracování, vaření, alchymie, trénink, rybaření, lov, obchodování, zemědělství a plavba si můžete vybrat, jak chcete hrát Black Desert Online. -Budu odstraňovat a znovu přidávat knihu a poté budete moci vyřešit problém s vaší aplikací #PRS_ORG# pomocí dvou postupů, abyste zjistili, zda to problém vyřeší. -Prosím, dva minuty. -Je to hotovo. -Teď prosím zkuste provést tento postup ve vaší aplikaci: -Chcete-li opravit svůj účet v aplikaci Android, postupujte podle níže uvedených kroků: -Klepněte na ikonu #PRS_ORG# v horní části obrazovky. -Jdi na domovskou obrazovku. -Klepněte na ikonu Menu v horní části obrazovky. -Nastavení. -Posuňte se dolů a klepněte na Opravit váš účet. -Oprava tlačítka. -Když dokončíte, pokračujte prosím tímto postupem: -Klikněte na tlačítko "Odhlásit se" ve své aplikaci #PRS_ORG#. -Klepněte na ikonu Více dole na obrazovce. -Nastavení. -Odhlásit se z #PRS_ORG#. -A poté se, prosím, znovu přihlaste, aby se účet aktualizoval. -Jak to šlo? -Vidím zde, že k vaší objednávce ještě nebyl přidělen žádný jezdec. -Nicméně to zaznamenám do záznamů. -Můžete také použít aplikaci k volání nebo chatování s nimi, jakmile budou blízko místa, budete mít možnost kontaktovat jezdce. -Ano, otevírám účet. -Prosím, následujte další proces. -Chcete-li opravit svůj účet v aplikaci Android, postupujte podle níže uvedených kroků: -Klepněte na ikonu #PRS_ORG# v horní části obrazovky. -Jdi na domovskou obrazovku. -Klepněte na ikonu Menu v horní části obrazovky. -Nastavení. -Posuňte se dolů a klepněte na Opravit váš účet. -Oprava tlačítka. -VP−730 je 9−vstupní škálovač/přepínač pro analogové video, digitální video, vyvážené stereo a S/PDIF audio signály. -Přepočítává kompozitní, s−Video (Y/C), komponentní video (YUV), HDMI, počítačové grafické video a JPEG soubory na vybrané rozlišení počítačové grafiky nebo HDTV na stejných výstupních portech − jeden HDMI a dva 15−pin HD. -Zahrnuje zesilovač pro napájení reproduktorů. -Jednotka poskytuje bezproblémové přepínání mezi zdroji prostřednictvím technologie FTBTM (fade-thru-black). -HQV® Video zpracování - HQV (Hollywoodská kvalita videa) zpracování představuje nejmodernější technologii zpracování videa, s nejvyšší kvalitou de-interlacingu (s 3:2 a 2:2 pull down), redukcí šumu a škálováním pro standardní i vysokorozlišovací signály. -Fade-Thru-Black (FTBTM) přepínání - Video se stínověním do černé a poté nový vstup se stínověním z černé pro hladké, bezchybné přepínání. -Výstupní signál poskytuje stálou synchronizaci, takže obrazovka nikdy nezamrzne. -K-IIT XLTM technologie vložení obrazu do obrazu - Ultra stabilní schopnost obrazu v obraze, obrazu a obrazu a rozdělení obrazovky. -Jakýkoli zdroj videa může být vložen do nebo umístěn vedle zdroje počítačové grafiky nebo naopak s ovládáním pozicování a velikosti okna. -Vstupy videa - 2 univerzální video každé na 3 BNC (kompozitní, s−Video, komponentní), 4 počítačové grafiky/komponentní video (15−pin HD), 2 HDMI a 1 USB (pro data JPEG). -HDCP kompatibilní - Licenční dohoda HDCP (ochrana vysokého rozlišení obsahu) umožňuje přenos chráněných dat na HDMI vstupu pouze na HDMI výstup. -Více možností pro výběr poměru stran - 4x3 nebo 16x9, anamorfní, letterbox a uživatelem definovaná nastavení. -Společný AFV (Audio-Follow-Video) - Pro každý analogový video vstup podporuje vložený zvuk na 2 HDMI vstupech a výstupech. -Audio vstupy - 6 vyvážených nebo S/PDIF audio (každý vybíratelný) na terminálových bloků, jeden pro každý z 2 univerzálních videí a 4 počítačových grafických vstupů. -Vestavěný ProcAmp - Barva, odstín, ostrost, kontrast a jas jsou nastaveny individuálně pro každý vstup. -Jednotka byla plně otestována ve všech vstupech a výstupech. -Jednotka bude vyžadovat konektor pro výstup reproduktoru. -Úžasné. -Ale dobře ti tak. -Ano, když mi bylo 16, aplikoval jsem a dostal nabídku na práci v restauraci. -Myčka nádobí. -První směnu mi měli zavřít. -Sobota. -Pracovali jsme až do pozdních 1 hodin ráno. -Ukončil jsem to druhý den. -Nejlepší způsob, jak ztratit nového mladého zaměstnance, je ho šokovat tím. -Stejné se stalo mému příteli poté, co jsem pracoval pro Pizza Hut několik let (nezavřeli mě až po několika měsících, kdy jsem se začal učit), dostal jsem mu tam práci na místě. -Oni pokračovali v tom, že ho dali na dvě šaty po sobě. -On to skončil. -Pokud neoznámíte pracovní pozici jako noční směnu, očekávejte, že pokud jim to oznámíte příliš brzy, ztratíte své zaměstnance. -Poté, prosím, smažte svou autorizaci. -Deaktivujte svůj Ereader. -Zapněte svůj eReader. -Připojte svůj eReader k počítači pomocí Micro USB kabelu. -Na vašem čtečce knih: Klepněte na tlačítko Připojit. -Na vašem počítači: Otevřete #PRS_ORG#. -Pod položkou "Zařízení" klepněte pravým tlačítkem myši na čtečku #PRS_ORG#. -Klikněte na "Odstranit autorizaci zařízení". -Klikněte na tlačítko OK na potvrzovací obrazovce. -Zrušit autorizaci #PRS_ORG# -Pro odstranění autorizace #PRS_ORG#, klikněte na Nápověda > Odstranit autorizaci. -V otevřeném vyskakovacím okně zadejte heslo k účtu, který jste použili k autorizaci #PRS_ORG#. -Klikněte na "Smazat autorizaci" -Byla tyto kroky užitečné? -Bohužel jsem neobdržel odpověď déle než dvě minuty. -Pro účely kvality bude tento chatovací rozhovor uzavřen, nezapomeňte, že se můžete vždy vrátit a my budeme rádi, že budeme pokračovat ve vaší asistenci. -Boris Johnson se nachází na hranici přízně u toryských poslanců. -Boris Johnson je dlouho považován za krále návratů. -Někteří konzervativní poslanci doufají, že on bude pokračovat v této sérii tím, že se zachrání před klesajícími průzkumy veřejného mínění v důsledku řady stran v Downing Street v rozporu s covidovými zákony. -Premiér se zamotal do sebe samého, když opakovaně popíral, že byla porušena nějaká pravidla, než se objevily další zprávy a důkazy, které naznačovaly opak. -Nejprve byl video No 10 poradců smějících se, zatímco diskutovali o vánočním setkání 18. prosince minulého roku. -Pak Dominic Cummings, bývalý nejbližší poradce Johnsona, slíbil, že byly pořízeny fotografie oslav, a tak s napětím čekali kritici vlády, až se objeví. -Když byla v neděli zveřejněna fotografie, na které Johnson na obrazovce vedl vánoční kvíz pro zaměstnance, kteří se připojili z No 10 a z domova, nebylo to úplně to, co někteří mysleli, že bude konečně znamenat jeho pád, nicméně. -Obrázek Sunday Mirror ukazuje Johnsona se dvěma poradci, kteří byli oblečeni do stříbrných ozdob a s čepicí Santa - nebyli od sebe sociálně distancováni a jasně se účastnili společenské události, zatímco míchali domácnosti. -Ale mohlo to být horší. -Existovaly další stranické akce v No 10 a v hlavním sídle Konzervativní strany, na kterých lidé popíjeli hojné množství alkoholu, hráli stranické hry, vyměňovali si dárky pod stromečkem a sociálně se bavili až do pozdních hodin, podle zdrojů, které informovaly média včetně Guardianu, Mirroru, BBC a Times. -Ministři budou potichu vzdychat úlevou, že zatím nebyly unikly žádné fotografie těchto scén. -Zatímco Johnsonovo účast na kvízu porušila pravidla, podle Keira Starmera, vůdce Labouristů a bývalého ředitele veřejného žalobce, si myslí poslanci Toryů, že lidé se na fotografii podívají a posoudí, že ve skutečnosti pořádá virtuální kvíz - běžný pohled během pandemie. -Personál, volající z jiných místností v č. 10, zatímco pijí a nesdílejí prostor, nemůže být viděn. -V neděli Nadhim Zahawi trval na tom, že obrázek byl jen příkladem Johnsonova "poděkování svým zaměstnancům" a použil ho k potlačení stranického skandálu jako "hype". -Řekl LBC: „Na té titulní stránce si myslím, že vaši posluchači na to budou koukat a uvidí premiéra ve své kanceláři, se dvěma blízkými lidmi, bez alkoholu, trávícího 10 až 15 minut, aby poděkoval a motivoval svůj personál, který přichází, protože nemůžou pracovat z domova.” -Už bylo učiněno mnoho škod, s vzbouřenými poslanci rozzuřenými nad tím, že premiér umožnil, aby se ujal „jedno pravidlo pro ně“, od Cummingsa po Matta Hancocka a nedávno i Owena Patersona. -Johnson se nachází na hraně přízně u svých vlastních poslanců; pokud se objeví další fotografie, mohou ho přinutit překročit tu hranici. -Můžete se kdykoliv vrátit, protože naše okno chatovací služby je otevřené 24 hodin denně, 7 dní v týdnu. -Upřímně doufám, že se vám podaří najít řešení. -Děkuji vám za kontaktování #PRS_ORG#, bylo mi potěšením vám dnes pomoci. -Doufám, že máte skvělý večer. -Měli jsme vypnutí proudu několikrát. -Krok 1: Okamžitě někoho na dveře. -Oni jsou teď bezpečností. -Nenechte nikoho vejít a všímejte si lidí, když odcházejí (zejména dětských rukou). -Krok 2: Pokud tam nejsou, zavolejte manažerovi obchodu. -Krok 3: Ti, kteří jsou u pokladen a kdokoli jiný, mohou počkat několik minut, abychom zjistili, zda nám mohou pomoci náhradní generátory. -Krok 4: Projděte se po obchodě a vyžádejte si odchod všech nezaměstnanců. -Každý vůz přivezený na přední stranu. -Krok 5: Projděte se košíky a hledejte všechno, co je studené a produkty. -Krok 6: Vraťte uvedené studené/ovoce. -Krok 7: Pokryjte všechny neotevřené studené potraviny, jako je sýr/maso/zelenina atd. -Krok 8: Podepsat naše jména na list papíru, když jsme odešli, abychom se odhlásili. -Někteří byli povoleni odejít dříve, zejména pokud se necítili pohodlně ve tmě nebo neměli ještě 6 hodin do konce. -Je to opravdu tmavé, i dopředu. -Nemůžeme nechat zákazníky jen tak viset. -Nejsem si jistý, proč některé pokladny stále měly nějakou energii, zatímco ostatní ne. -Nemyslím si, že bychom měli nějaký způsob, jak je mohli zaplatit. -Myslím, že by se věci mohly skenovat, ale nikdy bychom nedůvěřovali zákazníkům, aby platili později. -Jednou to trvalo jen asi 3 hodiny, než se elektřina zase zapnula. -Měli jsme pár z nás, aby zůstali, takže kdyby to, jak jim řekla elektrárenská společnost, mělo udělat, mohli bychom znovu otevřít. -Nemám nic proti tomu, pokud máme možnost zůstat nebo odejít, pomáhat při zachování produktu co nejlépe. -Nemít na výběr a ohrožovat zákazníky, to je můj limit. -Pro Vaši informaci, pošlu Vám přepis naší konverzace. -Pokud budete mít další otázky nebo obavy, můžete vždy odpovědět na tento e-mail a my vám budeme moci dále pomoci. -Děkuji vám za kontaktování #PRS_ORG#, bylo mi potěšením vám dnes pomoci. -Doufám, že máte skvělý den. -Snažím se zavolat jezdci, ale on mě nerozumí. -Prosím, zavolejte jezdci, jakmile se blíží k místu adresy uvedenému v objednávce pomocí aplikace. -Děkuji za informace. -Budu více než šťastný, abych vám pomohl. -Prosím, dejte mi chvíli na ověření účtu. -Děkuji za čekání. -Omlouvám se, že zažíváte tento problém, udělám vše pro to, abych vám pomohl. -Prosím, dejte mi vědět, jaký je váš #PRS_ORG# model. -Vím, že je to v čínštině, nemusíte používat vnitřní funkci vašeho zařízení ani správný jazyk k provedení těchto posledních kroků, které byly odeslány. -Prosím, udělejte mi laskavost a přečtěte si je nejdříve a poté je provádějte. -Instrukce jsou ručně resetovat váš zařízení. -Není potřeba správný jazyk. -Nicméně, pokud si přejete vyžádat vrácení, mohu vám také pomoci. -Moc se omlouvám za nepříjemnosti, které jste zažili. -Stále hledáme způsoby, jak zlepšit naše služby a toto bude zaznamenáno jako zpětná vazba jednomu z našich ceněných zákazníků. -OK, můžete zkusit provést tovární reset vašeho zařízení, abyste zjistili, zda to tento problém vyřeší. -Rozumím, že jste to už zkusili vypnout a znovu zapnout bez úspěchu, že? -Bohužel to momentálně není skladem, jen se podívám, jestli se to vrací. -Prosím, buďte se mnou chvíli trpěliví. -Toto bylo zrušeno, takže se nebude vracet do skladu, omlouváme se. -Planeta Jupiter konečně opustila sluneční soustavu mé kanceláře. -Před několika lety jsem napsal příspěvek o mé kancelářské nepřítelkyni, ženě jménem PlanetJupiter ve svých příbězích. -Není tu moc co říct. -Naposledy jsem ji viděl před Koronou, zhubla a zdálo se, že se při obědě snaží dodržovat skupiny potravin, i když stále používala svůj elektrický invalidní vozík a byla trochu smradlavá. -Zeptal jsem se jí, jak se má, jako se ptám všech mých spolupracovníků, když je vidím. -Česky: „Není to tak dobré, OP, zjistil jsem, že mám cukrovku, takže musím jíst méně sacharidů.“ -K jejímu malému uznání, oběd měl rýži z květáku místo běžné. -Jsem z Středozápadu a vždy jsem byl milý k PJ, takže jsem jí řekl, že mi to moc líto, což bylo hrozné. A co tento projekt, na kterém jsme oba? -Bude také pracovat pozdě, aby to stihla do soudního termínu? -Ano, OP. -Ušetřuji peníze na přestěhování. -To je opravdu vzácné. -Můj stát má nejnižší výstěhování ze všech států, kdykoli. -Kde se stěhuje? -Do dalšího středozápadního města, které hodně pracuje v masném průmyslu. -Doufám, že ji nepopletli s býkem! -Ukázalo se, že dokumentování mého a ostatních jejího pomalého/špatného práce, usínání u stolu, obtěžování ostatních a smradu, způsobilo, že přišla o pozice u všech firem, které často najímají mě, ji a ostatní na dočasnou práci, kromě jedné. -Takže se tak nějak musí přestěhovat tam, kde je rodina ve městě. -Ona půjde zkazit jiné pracoviště, ale alespoň ne moje. -To už ale nezáleží, protože jsem dostal mnohem lepší pracovní pozici na dálku. -Nemůžete zadat datum schůzky, musíte objednat a my pak můžeme položky držet pro vás, můžeme je držet nejdříve po dobu tří měsíců. -Je tu něco jiného, co bych vám mohl pomoci s odpoledne? -Děkuji vám za to, že jste si dnes udělali čas na rozhovor se mnou a doufám, že jsem dokázal vyřešit váš dotaz. Pokud byste nevadilo, abyste hodnotili naši dnešní konverzaci na základě mých zákaznických dovedností, byl bych vám velmi vděčný. Tlačítko pro hodnocení najdete v tomto chatu. -Doufám, že máte skvělý den a prosím, vraťte se k nám, pokud budete potřebovat další pomoc. -Czech: -Připojte svůj eReader k zdroji napájení jedním z následujících způsobů: -Zapněte počítač a nejprve připojte přiložený USB napájecí kabel k počítači a poté k vašemu čtečce knih. -Zasuňte zástrčku ze zdroje napájení (není součástí dodávky) do zásuvky a poté připojte svůj eReader k zdroji napájení. -Stiskněte a podržte tlačítko napájení, dokud se světelný indikátor na horním pravém rohu vašeho čtečky knih nezhasne. -Uvidíte obrazovku "Vypnuto", když je váš čtečka eknih vypnutá. -Uvolněte tlačítko napájení. -Stiskněte a podržte tlačítko napájení na vašem čtečce knih po dobu 30 sekund. -Čekejte, až se objeví obrazovka Obnovení. -Uvolni tlačítko napájení. -Váš obrazovka čtečky se ztmaví a začne proces obnovení. -Je tu něco jiného, s čím bych vám mohl pomoci? -Libye: plán na prezidentské volby 24. prosince se blíží k zhroucení -Šance, že Libye uspořádá své první prezidentské volby v dlouho plánovaném termínu 24. prosince, se v neděli zdály být blízko zhroucení, protože orgán dohlížející nad hlasováním nebyl schopen oznámit schválené kandidáty kvůli stále přetrvávajícím právním pochybnostem. -S volbami méně než dva týdny pryč a prakticky žádný čas na kampaně, odložení by představovalo hořkou ránu pro naděje mezinárodního společenství na sjednocení hluboce rozdělené země. -Cizí mocnosti se také obávají, že celkový momentum směrem k demokracii může vyprchávat. -V krátkodobém horizontu se musí dohodnout, zda bude pokračovat přechodná vláda, aby se vyplnila politická prázdnota a zabránilo se návratu k občanské válce. -Série soudních rozhodnutí zrušila rozhodnutí libyjské volební komise zablokovat významné osobnosti, včetně Saifa al-Islama Kaddáfího, syna bývalého diktátora, aby kandidovali na prezidenta. -Interimní premiér Abdul Hamid Dbeibah a válečník Khalifa Haftar, hlavou samozvané Libyjské národní armády, byli schváleni komisí, ale následně napadli jiné strany. -Ve stanovisku v sobotu uvedlo, že nemůže oznámit jména schválených kandidátů z téměř 100, kteří se přihlásili, protože je „odhodláno vyčerpat všechny prostředky řízení, aby bylo zajištěno, že jeho rozhodnutí odpovídají vydaným rozsudkům“. -Rivalní frakce si navzájem vyčítají, že vyhrožovaly nebo kupovaly soudní úředníky, aby obnovili své kandidáty, a komise se snaží zjistit, zda byla rozhodnutí platná. -V případě Dbeibah se zavázal jako podmínka stát se dočasným premiérem, že nebude kandidovat ve volbách, ale od té doby se ve soudním řízení argumentovalo, že to byl morální závazek bez právní síly. -Saif Gaddafi byl v roce 2015 odsouzen v nepřítomnosti za válečné zločiny za jeho účast na boji proti revoluci, která svržela jeho otce Muammara Gaddafiho. -On popírá špatné jednání. -Přítomnost desítek tisíc cizích bojovníků, najatých vojáků a domorodých milicí činí zemi jako hořlavinu a existují obavy, že volby provedené s spornými kandidáty by pouze vedly k výsledku, který nebude uznán. -V znamení napětí kolem cizích sil Francie tlačí EU, aby se ve středu dohodla na uvalení sankcí na soukromou ruskou vojenskou společnost Wagner Group, která podle ní působí v Libyi a Sahelu. -Moskva popírá, že Wagner je spojen s ruským státem a řekla, že odvetí proti sankcím EU uvaleným na její občany. -Mezinárodní společenství schopnost vyžadovat, aby libyjská politická třída dodržela datum voleb 24. prosince, které bylo poprvé dohodnuto v únoru, bylo omezeno jmenováním speciálního vyslance OSN Jána Kubiše, který rezignoval tři týdny před volbami po méně než roce ve funkci. -UN generální tajemník António Guterres od té doby jmenoval Stephanie Williams, bývalou důraznou zástupkyni zvláštního vyslance, aby působila jako jeho zvláštní poradce. -Rusko vetovalo její jmenování plným vyslancem, ale ona má hluboké znalosti Libye a loni projevila ochotu čelit těm v politické třídě, kteří jsou proti volbám. -Misie OSN vydala prohlášení, v němž vyzývá všechny strany, aby nezvrátily dosavadní úspěchy, a ukazuje na registraci téměř 3 milionů voličů, úspěšnou distribuci volebních karet a velký počet kandidátů na prezidentské a parlamentní volby jako na známky hlubokého lidového podpory pro volby. -Americký velvyslanec v Libyi Richard Norland řekl: "Odmítnutí jít k volbám a mobilizace k blokování bude mít za následek, že osud a budoucnost země bude v rukou těch uvnitř Libye a jejich zahraničních podporovatelů, kteří upřednostňují sílu střelných zbraní před sílou hlasování." -Promiňte, ale nevidím, že byste se přihlásili do svého účtu, pokud nemáte jiný účet. -Pokud je toto případ, dejte mi prosím vědět, na jaký email jste se již přihlásili na čtečce knih. -Děkuji za informace. -Budu více než šťastný, abych vám pomohl. -Rád tě poznávám. -Doufám, že máte skvělý den! -Omlouvám se, ale nemohu najít účet pod zadanou e-mailovou adresou. -Zákazník se na mě naštval, protože jsem nevěděl, že potřebuje pomoc. -Pracuji v obchodě se zbožím a nakupuji objednávky pro vyzvednutí/doručení. -Často mám zákazníky, kteří se ptají, kde je nějaká věc, a ptají se ve formě pozdravu + otázky, nebo jen otázky. -Také mám zákazníky, kteří jen říkají Ahoj/Dobré ráno/atd. -Prošel jsem kolem zákazníka, který pozdravil, a já jsem mu pozdravil zpět, pak jsem čekal několik sekund, abych viděl, jestli má nějakou otázku. -On nic jiného neřekl, takže jsem pokračoval a pokračoval v nákupu. -Potom znovu řekl "ahoj?" s neomaleným tónem a naštvaně se mě zeptal, jestli tady pracuji. -Řekl jsem, že ano, a on se znovu zeptal na položku ve zlomyslném tónu. -Ukázal jsem, kde jsem si myslel, že to bude, a řekl jsem, že si myslím, že by to tam mělo být, ale vypadá to, že jsme to neměli. -Potom to jen řekl "zapomeň na to" naštvaně a odešel. -Jak jsem měl vědět, že potřebuje pomoc? -On právě řekl "ahoj", což říká hodně zákazníků zdvořile. -Tento je jediný zákazník, který se mi jen pozdravil, aniž by se zeptal na něco, a pak očekával, že mu budu vědět, že potřebuje pomoc. -On nic neřekl neslušného, ale jeho tón hlasu byl celou dobu extrémně naštvaný, i když jsem se snažil mu pomoci. -Děkuji za čekání. -Špatný pořad byl vybrán dříve, to je důvod, proč jsem se dříve zmatil. -Myslel jsem, že to už bylo doručeno. -Zkontroloval jsem správné pořadí a můžu vidět, že jezdec se právě snaží to vyzvednout. -On bude tam za 10-15 minut. -Liz Truss slibuje dalších 75 milionů liber v pomoci Afghánistánu na schůzce G7. -Česky: Liz Truss oznámila, že Velká Británie poskytne Afghánistánu dalších 75 milionů liber v humanitární pomoci, aby pomohla řešit jeho se zhoršující se humanitární situaci. -Ministr zahraničí řekl, že závazek pomůže zachránit životy a „podpoří stabilitu v oblasti“. -Po diskusích mezi ministry zahraničí G7 v Liverpoolu v sobotu o tom, jaké koordinované akce lze podniknout v Afghánistánu a jak se zapojit do vlády Talibanu, následuje. -Militární skupina se v srpnu vrhla do útoku na Kábul v bleskovém postupu, když 20 let okupace středoasijské země bylo ukončeno spěšným spojeneckým odchodem. -Paní Trussová řekla: „Velká Británie poskytuje v Afghánistánu v této zimě zásadní humanitární pomoc.“ -Fondy oznámené dnes budou zachraňovat životy, chránit ženy a dívky a podporovat stabilitu v oblasti. -Jsme odhodláni udělat vše, co můžeme, pro lidi v Afghánistánu. -Doplňková finanční podpora přinese Velké Británii letos závazek ve výši 286 milionů liber vůči Afghánistánu. -Bude to použito k poskytování podpory obětem násilí založeného na pohlaví a k financování základních služeb ochrany dětí. -Organizace Spojených národů a humanitární agentury budou prioritně pomáhat těm, kteří jsou nejvíce ohroženi, včetně domácností vedených ženami a osobami se zdravotním postižením, uvedlo Ministerstvo zahraničních věcí, společného království a rozvoje (FCDO). -Úředníci řekli, že žádné financování nepůjde přímo skrze Taliban, místo toho bude směřovat skrze Fond humanitární pomoci pro Afghánistán, Program pro potravinovou pomoc OSN (WFP) a další organizace. -WFP obdrží 34 milionů liber z financování oznámeného v neděli. -David Beasley, ředitel organizace, řekl, že dar „nám pomůže zachránit mnoho životů.“ -"Co vidíme na zemi je srdcervoucí - 23 milionů lidí čelí vážnému hladu v zemi zničené suchotou, konfliktem a ekonomickou krizí," řekl. -Ženy a děti nesou největší břímě tohoto utrpení a jak se blíží tvrdá zima, stále více lidí se dostává do stavu podvýživy a hladomoru každý den. -Tento týden varoval hlavní humanitární představitel OSN, že ekonomický pád Afghánistánu se "odehrává před našima očima" a vyzval mezinárodní společenství, aby podniklo kroky k zastavení "volného pádu" předtím, než dojde k dalším úmrtím. -Martin Griffiths řekl: „Každým týdnem to je stále horší a horší.“ -Oznámení o financování přichází po tom, co ministři tento týden čelili nezvyklým otázkám ohledně úsilí o odchod z Afghánistánu po svědectví vyšetřovatele poslancům. -Raphael Marshall, který pracoval pro Ministerstvo zahraničí během operace Pitting, tvrdil, že pouze 5 % afghánských občanů, kteří se pod jedním britským schématem ucházeli o útěk, obdrželo pomoc v důsledku „dysfunkčního“ a „chaotického“ zacházení se situací. -Pan Marshall řekl Poslanecké sněmovně zahraničních věcí, že někteří z těch, kteří doufali v útěk, byli po zanechání v Kábulu zavražděni. -On také tvrdil, že Boris Johnson požádal o to, aby byla k dispozici "značná kapacita" pro evakuaci zvířat ze útulku, který provozuje bývalý královský námořník Paul "Pen" Farthing, čímž ohrozil životy vojáků, aby jim pomohl opustit soukromě financovaný letoun. -Předseda vlády označil tyto tvrzení za "úplný nesmysl". -V neděli v Muzeu Liverpoolu bude paní Trussová mít diskuse s ministry zemí Asociace národů jihovýchodní Asie, kteří se účastní schůzky G7 poprvé - většinou virtuálně. -Ministr zahraničí zdůrazní důležitost spolupráce s "ekonomikami budoucnosti" jihovýchodní Asie k řešení současných výzev, kterým čelí Západ, uvedlo FCDO. -Pozvání asijským ministrům přišlo po zveřejnění Velké Británie v březnu integrované zahraniční politiky, která se zaměřuje na „naklonění“ směrem k Indo-Pacifiku, což je pohyb vnímaný jako snaha omezit rostoucí vliv Číny v této oblasti. -Scholz a polský premiér diskutují o migraci, energetice a EU. -Německý nový kancléř Olaf Scholz přijel v neděli do Varšavy na jednání s polským premiérem Mateuszem Morawieckim o migraci, energetice, záležitostech Evropské unie a napětí na východě hranic bloku. -Byl uvítán Morawieckim se vojenskými poctami před kanceláří polského premiéra. -Byl to jeden z brzkých návštěv Scholze po jeho přísaze do koaliční vlády ve středu. -Polsko je hlasitým odpůrcem potrubí Nord Stream 2, které bude přenášet ruský plyn přímo do Německa, říkajíc, že to činí Evropu závislou na dodávkách Ruska a vystavuje ji tlaku ze strany Moskvy. -Německý regulátor pozastavil schvalovací postup pro dokončenou ropovodní trasu kvůli právním problémům. -Vláda ve Varšavě je také zapojena do stále se zostřujícího sporu s Evropskou komisí, výkonnou mocí EU, která odmítá poskytnout Polsku fondy na obnovu po pandemii s odůvodněním, že politiky vlády oslabují tamní soudní nezávislost. -Scholz a Morawiecki se také mají bavit o složitých vzájemných vztazích pod novou vládou Německa. -Dobré sousedské vztahy jsou stále přehlíženy druhou světovou válkou, zejména pod současnou pravicovou vládou Polska, která tvrdí, že Německo Polsku dluží náhradu za škody způsobené během války. -Agnieszka Lada-Konefal, zástupce ředitele Německého institutu pro polské záležitosti v Darmstadtu v Německu, očekává, že vláda Scholze bude pokračovat v dialogu a kontaktu s Polskem, které je důležitým členem na východním příkopu EU a pátým největším obchodním partnerem Německa. -Návštěva přichází 30 let po tom, co oba parlamenty ratifikovaly smlouvu o dobrých sousedských vztazích a přátelské spolupráci. -Německá nová zahraniční ministryně Annalena Baerbocková byla ve Varšavě v pátek. -Ona vyjádřila německou podporu Polsku, které uzavřelo svou východní hranici pro migranty, kteří údajně dostávají podporu od belaruské vlády, aby hledali nelegální cestu. -Ona také vyzvala k humanitárnímu zacházení s migranty uvízlými na hranici. -Polsko a EU říkají, že vláda běloruského prezidenta Alexandra Lukašenka se snaží destabilizovat blok tím, že podněcuje migraci do jeho zemí. -V pátek se Scholz setkal s francouzským prezidentem Emmanuel Macronem v Paříži a později s úředníky EU a NATO v Bruselu. -Scholz, politik středolevicového směru, se stal devátým německým kancléřem po druhé světové válce, otevírajícím novou éru pro největší ekonomiku a nejlidnatější zemi EU po 16letém působení Angely Merkelové. -Jeho vláda se skládá z koalice jeho středolevicových sociálních demokratů, ekologických Zelených a pro-business Svobodných demokratů. -Můžeme zkusit ruční reset. -Připojte svůj eReader k zdroji napájení jedním z následujících způsobů: -Zapněte počítač a nejprve připojte přiložený USB napájecí kabel k počítači a poté k vašemu čtečce knih. -Zasuňte zástrčku ze zdroje napájení (není součástí balení) do zásuvky a poté připojte svůj eReader k zdroji napájení. -Stiskněte a podržte tlačítko napájení, dokud se světelný indikátor na horním pravém rohu vašeho čtečky knih nezhasne. -Uvidíte obrazovku "Vypnuto", když je váš čtečka eknih vypnutá. -Uvolněte tlačítko napájení. -Stiskněte a podržte tlačítko napájení na vašem čtečce knih po dobu 30 sekund. -Čekejte, až se objeví obrazovka Obnovení. -Uvolni tlačítko napájení. -Váš obrazovka čtečky se ztmaví a začne proces obnovení. -Deluxe ruční / bateriově poháněná vakuová pumpa na penis, vyrobená společností VVI Ltd. Anglie, vám umožní zvládnout vaši erektilní dysfunkci, obecně známou jako ED. -Erektilní dysfunkce může být emocionálně a finančně náročná, proto Encore poskytuje jeden z nejdostupnějších penisových pump na trhu. -Tento vícepohonný vysavač má speciální úchopovou rukojeť zabudovanou do hlavy čerpadla, která uživateli poskytuje vynikající kontrolu nad procesem čerpání a vysávání. -Vacuum terapie byla prokázána jako účinná při léčbě erektilní dysfunkce u více než 95% mužů bez vážných vedlejších účinků nebo léků. -Čerpadlo hlavy a válec jsou oba kryty životní zárukou výrobce, což znamená, že Encore nahradí buď část v případě poškození nebo selhání. -Po malém cvičení se terapie vakuem s tímto systémem stává snadnou a pohodlnou. -Navíc VVI zahrnuje několik dalších položek do tohoto sady, které činí proces rychlým a uživatelsky přívětivým. -Patentovaný výstřelovací prstenec, násypka a mazivo obsažené v sadě pomáhají aplikovat napěťové pásky po čerpání. -Napínací pásky, také známé jako penisové kroužky, pomáhají udržet erekci, jakmile byla dosažena pomocí pumpy. -Tento sada obsahuje různé pásky napětí ve všech nejpopulárnějších velikostech, aby uživatel mohl najít nejefektivnější úroveň napětí. -Aby to dokončilo, celá sada se vejde do elegantního a diskrétního přenosného pouzdra, které se vejde téměř kamkoli. -VVI Medical chápe, že mnoho jednotlivců chce udržet svůj sexuální život v soukromí, proto budeme při zasílání tohoto produktu dbát nejvyšší diskrétnosti. -Obdržíte svůj zásilku Encore Deluxe Manuální/Bateriově poháněné Vakuové Erekční Penisové Pumpy v obyčejné krabici. -Žádný lékařský předpis není potřeba k nákupu této pumpy. -Můžete prosím zkusit uskutečnit nákup na počítači na webové stránce? -Platforma může mít nějaké problémy. -Byl jsi schopen zkusit nákup na počítači na webové stránce? -Kvůli nereagování a z důvodu kvality musím ukončit tento chat, neváhejte nás kontaktovat ohledně jakéhokoli dotazu nebo požadavku. Rádi vám poskytneme pomoc. -Mějte krásný den, Na shledanou! -Prosím, podívejte se na kartu Platby a poštovné pro naše aktuální sazby. -Naše standardní služba je zaslání poštou. -Premium podepsané a kurýrní služby jsou k dispozici. -Pokud nejsou uvedeny náklady pro váš stát, kontaktujte nás pro cenovou nabídku. -Dodání menších fotografií až do velikosti 16x12" do Evropy obvykle trvá 5 - 15 pracovních dní od odeslání a do zbytku světa 7 - 20 pracovních dní, prostřednictvím letecké pošty. -Dodání velkých fotografií 20x16" a 24x20" se obvykle doručí do 7 - 20 pracovních dnů do Evropy a zbytku světa. -Kombinujeme dopravu na objednávky pro stejného zákazníka. -Prosím, vyberte si všechny fotografie, které byste chtěli, a po dokončení se pouze jednou zaregistrujte, abyste automaticky obdrželi slevu na poštovné. -Mezinárodní kupující prosím všimněte si: naše velké fotografie jsou zasílány v poštovních trubkách. -Prosím, vezměte na vědomí, že ve některých zemích místní poštovní služby nedoručují poštovní trubky spolu s dopisy a malými balíčky. -Pro tento důvod se uváděná dodací lhůta týká širokého rozsahu. -Poštovní společnosti umožňují až 25 pracovních dní pro doručení zásilek standardním leteckým způsobem. -Prosím, proto umožněte kolem 25 pracovních dní od odeslání, než se na nás obrátíte ohledně podezřelého problému s doručením. -Nabízíme služby Premium Airmail s prioritním zpracováním a sledováním. -Obecně je doručení rychlejší prostřednictvím těchto služeb, ale buďte si vědomi, že nejde o časově omezené nebo zaručené služby a poštovní společnosti aplikují stejnou úroveň služeb až do 25 pracovních dnů pro doručení. -Pokud potřebujete svou objednávku naléhavě, vyberte si možnost expresního kurýrního poštovného (pokud není pro váš stát zobrazena, kontaktujte nás pro vyčíslení). -Váš objednávka bude doručena FedExem během několika dnů. -Pokud byste potřebovali poradit ohledně doporučeného způsobu zaslání do vaší země, kontaktujte nás - máme roky zkušeností a jsme více než šťastní, abychom vám poradili. -Organizace ve střehu, zatímco technici bojují o opravu chyby ve softwaru. -Kritická zranitelnost ve široce používaném softwarovém nástroji - rychle využitá v online hře Minecraft - se rychle vyvíjí jako vážné ohrožení pro organizace po celém světě. -Adam Meyers, senior vice president of intelligence ve společnosti pro bezpečnostní služby Crowdstrike, řekl: „Internet je teď v plamenech.“ -On řekl: "Lidé se snaží opravit a všichni se snaží toho využít." -Řekl v pátek ráno, že během 12 hodin od zveřejnění existence chyby byla "úplně zbraňována", což znamená, že zločinci vyvinuli a rozšířili nástroje pro její využití. -Závažnost chyby může být nejhorší počítačovou zranitelností objevenou za poslední roky. -Bylo to odhaleno ve všudypřítomném nástroji pro cloudové servery a podnikový software, který se používá v celém průmyslu a ve vládních institucích. -Pokud není opraveno, umožňuje to zločincům, špionům a programátorským začátečníkům snadný přístup k vnitřním sítím, kde mohou krást cenná data, instalovat malware, mazat důležité informace a mnohem více. -Cyberové útoky jsou nyní považovány za největší hrozbu pro finanční stabilitu. -„Myslím si, že je těžké najít společnost, která není v ohrožení,“ řekl Joe Sullivan, hlavní bezpečnostní důstojník společnosti Cloudflare, jejíž online infrastruktura chrání webové stránky před škodlivými útočníky. -Nečíslné miliony serverů mají nainstalováno a odborníci říkají, že dopady nebudou známy několik dní. -Amit Yoran, generální ředitel bezpečnostní společnosti Tenable, to nazval "největším a nejkritičtějším zranitelností poslední dekády" - a možná největší v historii moderního počítačového věku. -Zranitelnost, označená jako "Log4Shell", byla hodnocena 10 na stupnici od jedné do deseti Apache Software Foundation, který řídí vývoj tohoto softwaru. -Kdokoli s exploitem může získat plný přístup k neopravenému počítači, který používá software. -Odborníci řekli, že extrémní snadnost, s jakou zranitelnost umožňuje útočníkovi přístup k webovému serveru - bez požadavku na heslo - je to, co ji činí tak nebezpečnou. -Novozélandský tým pro nouzovou reakci na počítačové hrozby byl mezi prvními, kteří oznámili, že chyba je "aktivně využívána ve volné přírodě" jen hodiny po tom, co byla veřejně oznámena ve čtvrtek a oprava byla vydána. -Zranitelnost, která se nachází v open-source softwaru Apache, který se používá k provozování webových stránek a dalších webových služeb, byla 24. listopadu nahlášena Nadačnímu fondu společností Alibaba, uvedla. -Trvalo to dva týdny, než se vyvinula a uvolnila oprava. -Aktualizujte své platební informace, prosím, postupujte podle těchto kroků: -Přihlaste se do svého účtu #PRS_ORG#. -Klikněte na "Můj účet" a v menu vyberte "Nastavení účtu". -Vyberte záložku "Informace o platbě". -Pod „Informace o platbě“ vyberte typ kreditní karty a zadejte číslo karty, bezpečnostní kód (CVV), jméno na kartě a datum expirace. -Klikněte na "Uložit”. -Zkusil jsi tyto kroky? -Váš účet je anjahoehn. -Ve vašem účtu je uvedeno, že jedinou možností, jak se přihlásit do vašeho účtu #PRS_ORG#, je #PRS_ORG#. -Vaše uživatelské jméno je anjahoehne. Poslal jsem odkaz pro obnovení vašeho hesla. -Prosím, zkontrolujte prosím svou poštu. -Čekám tady na tebe -Jak to šlo? -Dostal jsi odkaz na obnovení hesla? -Jsi tam? -Poslal jsem další odkaz pro obnovení vašeho hesla. -Prosím, zkontrolujte svou poštu. -Pro kvalitativní účely budu muset uvolnit tento chat, pokud se v příštích 2 minutách neobjeví žádná interakce. -Děkuji vám za kontaktování #PRS_ORG#, bylo mi potěšením vám dnes pomoci. -Doufám, že máte skvělý den. -Jaký byl rozloučení s Ethou? -První rychlé upozornění: Nejsem uživatel účtu, ale její manželka. -Mám povolení používat tento účet, protože jsem prostě špatný v technice a trochu mi to pomáhá s problémy se zdravím duševním (a vidím ironii technofoba se ptát na Redstone Titan :P). -Druhé a mnohem důležitější výhrada: Nechci vyvolávat dramata ani podezřívat, že něco nebylo v pořádku. -Podle mého názoru to byla jen změna větru, která nevyhovovala všem. -Jsem jen starý fanoušek, který uspokojuje nostalgickou vlnu. -S tím vyřešeným >.<... -Býval jsem obrovským fanouškem Mindcracku už ve starých časech. -Nikdy jsem nezmeškal vydání od GuudeBoulderfist, miloval kolaborace atd. -Zatímco jsem sledoval náhodný YouTube kanál, narazil jsem na video, které popisuje historii kanálu Etho. -Na konci se dotklo Mindcracku, že se stává komerční. -Jak to neviděl v pozitivním světle a nevyhnutelné odmítnutí podepsat související smlouvy. -Znovu, ani jít 'pro' ani chtít to udržet na úrovni není špatné rozhodnutí a vím, že lidé jdou různými směry a tak. -Rychlé Googlení mě dovedlo k tomuto starému vláknu, které ukazuje jeho stranu věci. -Většinou věci, které víme, je třeba říct opatrně, ale co nejvíce zaujme na první pohled, je to, že jste viděli věci v jiném světle, ale celkově jste zůstali v dobrých vztazích. -Celý tento příběh se odehrál poté, co jsem se sám přesunul k jiným věcem, takže pro mě to je všechno trochu nové. -Co hledám je druhá strana obrázku? -Řekl jsem, že má duševní zdraví není nejlepší a vidět starou skupinu, ke které jsem byl jako fanoušek připojen, rozcházet se bez nějakých hloupých explozí, které jsou příliš běžné ve polarizované veřejné diskusi, by mohlo být příjemné. -Jaká byla reakce od "staré partičky"? -Dělali jste stále věci společně? -Odcizili jste se pomalu? -Stále si povídat nebo pozvat jeden druhého na akce? -Znovu neočekávám nic dramatického ani nevidím lidi na krku jeden druhému. -Naopak. -Myslím, že je to nějakým způsobem nenápadné uzavření něčeho malého ve mém životě, aby to odráželo trochu pozitivního vlivu na mou psychicky zmatenou osobu. -P.S. Nemohl jsem si nevšimnout charitativní sbírky, kterou jsi měl a obrovské množství, které jsi vybral. -To je úžasné jako čert! -Vysvětlení dvojitého výpadku elektrického proudu v Gabbě a proč by se to mohlo stát znovu -Rizika vyplývající z výpadku proudu v rámci kompozice vysílání Gabba se pravděpodobně nezlepší před další sérií Ashes, protože orgány kriketu čekají na podrobnosti plánů na rozsáhlou modernizaci stadionu pro pořádání Olympijských her v roce 2032. -Zdroje řekly The Age a The Sydney Morning Herald, že Gabba je jediným velkým stadionem v australském cricketu, kde hlavní napájecí zdroj na stadionu není dostatečný pro spuštění obrovského množství přenosových vozů a zařízení potřebných pro odesílání obrazu po celém světě. -Primární a záložní generátory napájející globální vysílání Gabba Testu se na čtvrtý den na asi 25 minut vypnuly. -Protože základní výkon v obvodu je potřeba dodat světelné věže Gabba - jedna z nich slavnostně zhasla během zápasu Big Bash League v roce 2019 - a samotné hřiště. -V důsledku toho vysílači čerpají svůj hlavní zdroj energie z obrovského, naftou poháněného generátoru najatého pro Test match, s nouzovým zdrojem energie, který má být získán z nouzového generátoru. -Na čtvrtý den Testovacího zápasu selhala primární generátor spolu s záložním generátorem, což způsobilo jejich současné vypnutí a vedlo k úplnému nebo částečnému ztrátě obrazu a DRS po dobu téměř 30 minut. -Společnost NEP, která poskytuje venkovní vysílací vozy a další zařízení Foxu a Seven, požádala o vysvětlení od společnosti, která poskytla generátory. -Všechny ostatní stadiony, které budou použity pro Ashes - Adelaide Oval, MCG, SCG a Bellerive Oval v Hobartu - poskytnou hlavní napájení pro vysílání, zatímco dieselový generátor bude záložním zdrojem. -Tento rozdíl, který v minulosti způsobil významnou úzkost u pořadatele Fox Cricket, byl zesílen během Ashes Testu omezeným počtem produkčního a technického personálu, který mohl sledovat mnoho metaforických míčů, které byly během zápasu ve vzduchu. -Cricket Australia bylo varováno Foxem po několik měsíců, že z technického hlediska by bylo bezpečnější hrát zápas jinde, ale pokud by zůstal na Gabbě, existovaly by "velká rizika" spojená s kostrou posádky povolenou do Queenslandu. -Nerezová ocel vyrobená rovně, údržba břitvy usnadněna s vyměnitelnými čepelemi! -Tento holící strojek je blízkým příbuzným holícího strojku Straight/Cut Throat, dává vám tu starou barberskou vintage atmosféru za zlomek ceny a prakticky žádnou údržbu! -Používáním nahraditelných standardních dvouhranných břitů, stejně jako u klasického bezpečnostního holícího strojku - což znamená, že se nemusíte starat o broušení a nabroušení a přesto si užívat blízkost holení přímočarým břitem! -Perfektní pro začátečníky, kteří chtějí vyzkoušet umění holení pomocí pravých břitví. -Tří- nebo pětilisté holicí strojky podráždí pokožku mnohem více a musíte je tlačit silně proti pokožce, abyste je mohli použít. -Proto je tento holící produkt tak skvělý a často se používá pro lepší péči o pleť než běžný holič. -Tvá tvář ti později poděkuje. -Připraveno k použití s jedním balením břitv -Přichází v dárkové krabici Haryali London Design -Obrázky jsou skutečnými položkami, takže si můžete být jisti, že to, co vidíte, je to, co dostanete. -Haryali London nástroje mají životní záruku proti vadám materiálu a zpracování. -Jakýkoli výrobek, který se ukáže jako vadný, bude opraven nebo vyměněn bezplatně. -Garantujeme proti rozbití, selhání spojů a korozi při běžném používání. -Záruka se nevztahuje na běžné opotřebení a používání přístrojů za jejich limity. -Toto také vylučuje nesprávné použití nástroje tak, jak byl tento nástroj navržen a měl být používán. -Navíc jsou vyloučeny z této záruky i nástroje poškozené zneužitím nebo náhodnou událostí. -PayPal – Je to jediná forma platby, kterou akceptujeme. -Pokud zákazníci nejsou s naším produktem plně spokojeni, jednoduše nám vrátí položku v nepoužitém stavu a my zpracujeme vrácení peněz, jakmile položka bude přijata. -Pokud máte nějaké otázky, kontaktujte nás prosím přes kartu „Zeptat se“, která se nachází na spodní straně stránky s nabídkou. -Naše spokojenost zákazníka je na prvním místě našich priorit. -Cílem je poskytnout našim zákazníkům příjemný nákupní zážitek. -Pokud máte jakékoli otázky nebo problémy, kontaktujte nás prosím prostřednictvím zprávy eBay a my se budeme snažit odpovědět na všechny dotazy do 24 hodin. -Pokud z nějakého důvodu nebudete s vaším nákupem úplně spokojeni, než zanecháte negativní zpětnou vazbu, prosím, kontaktujte nás, protože vyřešíme problém pro vás. -Pokud máte zájem o další produkty, podívejte se prosím do našeho obchodu na eBay. -Sen o udržení všech dětí v bezpečí o Vánocích. -Její bratr (skoro dva) musel být přesvědčen, aby neodešel s malým Ježíškem. -Tam byl obvyklý jemný chaos, který doprovází každé shromáždění dětí ve věku do tří let. -Všichni byli tak potěšeni, že se to podařilo, když tolik dalších vánočních akcí bylo zrušeno, když se objevila další varianta Covidu. -Moje vnučka má čtyři roky, což znamená, že polovina jejího života - polovina jejího života! - byla poznamenána pandemií. -Od té doby, co se správně probudila, neví nic jiného než nošení roušek, posedlost mytím rukou a držení se odstupu. -Na několika příležitostech (skrze různá uzamčení) když jsem ji viděl, nevěděl jsem, jestli bych ji měl políbit nebo ne. -Jaký druh zprávy to posílá do přijímajícího, velmi pozorného mozku malého dítěte? -Bojím se přemýšlet. -Říkám to ne jako někdo, kdo je proti uzavření nebo odstupu. -Za všechnu kritiku naší vlády se nikterá země nedopálila přesně. -Od začátku roku 2020 to bylo dva kroky vpřed a jeden zpět (a někdy naopak). -A vědělo se - i když mnozí z nás si během těch prvních slunných měsíců užívali luxusu, že nemusí ven - že po celé Británii jsou lidé, pro které být doma je peklo, ne nebe. -Děti jako Arthur Labinjo-Hughes, který se stával neviditelným bez školního personálu, aby přemýšlel, proč je tak hubený a nemocný, bez sousedů, bez procházejících, bez nic. -Jsi na stránce knihy? -Můžete upravit velikost písma, fonty, řádkování a zarovnání, aby čtení bylo pro vaše oči příjemnější. -Při čtení stiskněte prostředek stránky, aby se zobrazilo čtení menu. -Klepněte na ikonu Text. -Bylo to naším jezdcem nechtěně rozlito. -Pro opětovné doručení vám nebudeme účtovat dvakrát. -Posíláme vám pouze novou objednávku. -Vaše opětovné doručení je nyní připravováno restaurací. -Prosím o vaši trpělivost a počkejte, až váš objednávka bude doručena do #NUMBER# minut. -Ano...my všichni máme zbraně. -I i děti. -Všichni kolem sebe chodíme jako bychom byli ve Divokém západu. -Ani nevím, kde začít. -Myslíte opravdu, že by narkoman měl drahou zbraň a pak si našetřil dost peněz na náboje? -Crackový hlava není "profesionální lupič". -Pokud neslyšíte, že lidé jsou bodáni, tak co? -Bodnutí nedostávají stejnou pozornost od médií jako střelby. -Jen proto, že tisk to nezdůrazňuje, neznamená to, že se to neděje. -Co to má společného s rasou? -Podpora svalové struktury a aktivity s přídavkem Muscle Maintenance. -Společná pomoc pro psy je vysoce specifickým doplňkem pro klouby a svaly s glukosaminem pro psy, navrženým pro podporu pohyblivosti. -Společná pomoc pro psy může být podávána všem psům jakéhokoli věku na úrovni „Obecné podpory“, aby se udržela volnost pohybu a stav svalů po celý jejich život. -Pro starší a pracující psy nebo ty, které mají sníženou svalovou hmotu, se doporučuje krmit Joint Aid for Dogs na úrovni „plné podpory“. -Jaké jsou klíčové výhody používání Joint Aid pro psy? -Udržuje pružnost pohybu u všech pracovních a domácích psů bez ohledu na věk, velikost a úroveň cvičení. -Podporuje tvorbu chrupavky, šlach, vazů, synoviální tekutiny a svalů. -Pomáhá udržovat přirozené protizánětlivé akce metabolismu psa. -Poskytuje jedinečnou kombinaci 22 aktivních nutraceutik. -Obsahuje unikátní systém Oatinol™ Delivery pro udržení vysoké rychlosti absorpce živin. -Obsahuje vysoké úrovně Omega 3 pro podporu optimálního zdraví a výkonu. -Vyrábí se jako chutné a snadno krmitelné 2mm kuličky. -Může být podáváno všem psům bez ohledu na věk, velikost nebo úroveň cvičení. -Pro pokračující podporu se doporučuje krmnou směs Joint Aid podávat denně. -Měřítko je součástí balení. -Nutraceuticals jsou nutriční látky, které poskytují další zdravotní přínosy. -Přes přidání následujících nutraceutik Joint Aid poskytuje doplňkovou podporu pro všechny psy. -Vysoké hladiny 5 konkrétních stravovacích aminokyselin, nezbytných pro produkci svalové tkáně. -Chondroitin je nezbytný pro odolnost chrupavky. -Udržuje normální enzymatickou aktivitu a schopnost držet vodu, aby poskytla zdravou odolnost proti stlačení. -Kolagen má velkou pružnou sílu a poskytuje rámec, který dává tkáním jejich pevnost a odolnost. -Můžu vidět, že detaily odpovídají. -Omlouvám se, ale zdá se, že vaše původní objednávka byla omylem rozlitá, proto musel můj kolega udělat novou objednávku. -Nová objednávka je číslo #NUMBER# a bude tam za pouhých 20 minut. -Jezdec to vyzvedává a doručí co nejdříve. -Tento arabský stát plánuje zvýšit obchod s Ruskem. -Spojené arabské emiráty plánují zvýšit svůj obchodní obrat s Ruskem na 20 miliard dolarů během příštích pěti let, oznámil ministr zahraničního obchodu Thani bin Ahmed Al Zeyoudi. -"Spolupracujeme s ruskou stranou na zvýšení obchodního obratu na 20 miliard dolarů během příštích pěti let a na pokračování investic do dalších oblastí ekonomické spolupráce," řekl Al Zeyoudi v sobotu během plenárního zasedání mezinárodního fóra Expo-2020 v Spojených arabských emirátech, které bylo kvůli pandemii Covid-19 odloženo. -Podle úředníka jsou "vztahy mezi Abú Zabí a Moskvou strategické." -On poznamenal, že až 90 % všech ruských investic do arabského světa jsou provedeny v Spojených arabských emirátech. -Spojené arabské emiráty také významně investují do Ruska, což tvoří asi 80 % všech arabských investic do ekonomiky Ruska. -Al Zeyoudi uvedl, že počet ruských společností v Spojených arabských emirátech dosáhl téměř 4 000. -Podle ministra již Spojené arabské emiráty investují do několika ruských sektorů, včetně petrochemického průmyslu, ropy a plynu, automobilového průmyslu a přístavů, a plánují rozšířit tento seznam. -V roce 2020 dosáhl obchodní obrat mezi oběma státy 3,3 miliardy dolarů a v prvních 10 měsících roku 2021 jeho objem překročil 4 miliardy dolarů a dosáhl nového rekordu, uvedl minulý týden ruský premiér Michail Michustin. -Podle Ministerstva ekonomiky hlavně Rusko letos vyváželo do Spojených arabských emirátů minerální produkty, drahé kameny a kovy, zatímco ruské dovozy z arabské země zahrnovaly stroje, zařízení a vozidla. -Jak dlouho trvá, než se malware infikuje do vašeho nového počítače? -Pokud používáte bezplatný nebo jiný nekvalitní bezpečnostní software, možná to nebude trvat dlouho. -Kyberzločinci jsou sofistikovanější než kdy dříve a používají rozmanitou paletu nástrojů k získání přístupu k vašim informacím. -Jiné bezpečnostní řešení prostě nemají zdroje, aby mohly držet krok s novými hrozbami, jak se objevují. -Čím jsou hrozby horší, my se jenom zlepšujeme. -Naše týmy bezpečnostních odborníků neustále analyzují nové hrozby a vymýšlejí nové způsoby, jak chránit vaše zařízení před nimi. -Soustředíme se výhradně na bezpečnost a jsme v tom nejlepší. -Naše soustředěná kombinace oddanosti a odbornosti přináší našim zákazníkům výhody. -Norton předčil konkurenci ve většině renomovaných srovnávacích testech a pouze Norton získal PC Magazine Editors’ Choice Award 34krát, včetně 11 let po sobě – více než jakákoli jiná bezpečnostní společnost. -Co to pro tebe znamená? -Když si koupíte Norton Security, dostanete jeden z nejlepších bezpečnostních produktů na trhu dnes. -Zahrnujeme záruku ochrany, kterou může poskytnout pouze Norton. -Jsme tak sebevědomí ve své schopnosti udržet vás bezpečné, nabízíme záruku vrácení peněz: -Pokud naši odborníci Norton nemohou odstranit virus, který se objevil na vašem počítači nebo Macu, vrátíme vám peníze* -S Norton Security Deluxe můžete rychle a snadno zabezpečit své zařízení. -Norton Security Deluxe poskytuje jednoduchý pohled, který podrobně popisuje stav ochrany vašeho zařízení. -Z jednoho ovládacího panelu můžete sledovat nastavení zabezpečení a ochrany identity a dokonce si prohlédnout historii skenovaných souborů a analyzovaných stahování. -Zatímco se snažíme zajistit, aby informace o produktech na našich webových stránkách byly správné, občas mohou výrobci měnit své seznamy ingrediencí. -Skutečné balení produktu a materiály mohou obsahovat více a/nebo odlišné informace než ty, které jsou zobrazeny na našich webových stránkách. -Všechny informace o produktech na našich webových stránkách jsou poskytovány pouze pro informační účely. -Doporučujeme, abyste se nepřímo spoléhali na informace prezentované na našich webových stránkách. -Prosím, vždy si přečtěte štítky, varování a pokyny poskytované s produktem před jeho použitím nebo konzumací. -V případě jakýchkoli bezpečnostních obav nebo pro jakékoli další informace o produktu, prosím pečlivě přečtěte si pokyny uvedené na etiketě nebo obalu a kontaktujte výrobce. -Obsah na této stránce není určen jako náhrada za radu poskytnutou lékařem, lékárníkem nebo jiným licencovaným zdravotnickým pracovníkem. -Kontaktujte okamžitě svého zdravotního poskytovatele, pokud podezříváte, že máte zdravotní problém. -Informace a prohlášení o produktech nejsou určeny k diagnostice, léčbě, léčení nebo prevenci jakéhokoli onemocnění nebo zdravotního stavu. -Organicsbeauty nepřijímá žádnou odpovědnost za nepřesnosti nebo nepravdivé informace o produktech od výrobců nebo jiných třetích stran. -Toto nemá vliv na vaše zákonná práva. -Všechny objednané položky budou odeslány do 3-5 pracovních dnů po obdržení potvrzení platby prostřednictvím PayPalu. -Používáme renomované kurýry k odeslání našich zásilek, jako je FedEx, DHL, TNT nebo EMS. -Číslo sledování bude poskytnuto po odeslání balíků. -Normální doba dodání je 6-8 pracovních dní od okamžiku odeslání zboží. -Prosím, vezměte na vědomí, že čas dodání může být delší v některých odlišných přepravních podmínkách, jako je proclení celního režimu, nedostatek správné adresy, změna adresy nebo nějaké jiné důvody. -Pokud máte jakékoli dotazy nebo problémy, neváhejte nás kontaktovat prostřednictvím systému zpráv eBay nebo klikněte na kartu "Zeptat se prodejce" pod každým výpisem. -Odpovíme do 24 hodin. -Prosím, vezměte na vědomí, že clo, daň z přidané hodnoty, karanténní poplatky, poplatky za změnu adresy nebo jakékoli jiné daně nejsou zahrnuty v ceně zboží nebo v poštovném. -Tyto poplatky jsou na zodpovědnosti kupujícího. -Prosím, zkontrolujte u celního úřadu ve vaší zemi, jaké jsou tyto další náklady nebo daně atd., předtím, než budete dražit / kupovat tyto položky. -Nemáme žádnou kontrolu nad celními poplatky nebo časem pro celní proces, ani nad jinými poplatky; proto je čas doručení pouze orientační. -Prodávající nejsou zodpovědní za časy přepravy služby dopravy. -Čas přepravy se může lišit zejména během špičkových období. -Celní poplatky obvykle účtují dopravní společnosti nebo se sbírají při doručení balíků. -Pokud máte s produktem nějaký problém, prosím okamžitě nás kontaktujte, protože zajišťujeme rychlé a nejlepší řešení pro jakýkoli problém s našimi produkty. -Omlouváme se, ale nemůžeme změnit adresu, jakmile byla již umístěna. -V tomto případě navrhuji, abyste zavolali jezdci, až bude blízko, abyste mohli upravit adresu. -Pro to udělejte, jednoduše přejděte na stránku s objednávkou, klepněte na „Nápověda a podpora“ a vyberte možnost „Zavolat jezdci“. -Česky: Protesty proti Assamu CAA oblečení vzdávají hold lidem, kteří zemřeli během protestů. -Pět agitátorů bylo zabito během proti-CAA bouří v Assamu před dvěma lety. -Několik organizací v Assamu v neděli vzdalo hold pěti agitátorům, kteří byli před dvěma lety zabiti během proti-CAA protestů, a rozhodlo se obnovit hnutí proti tomuto zákonu. -Na rezidenci Sama Stafforda, jednoho z agitátorů, kteří zemřeli, byly uspořádány památné schůze a na hřišti v Guwahati se účastníci rozhodli znovu zintenzivnit protesty proti Zákonu o občanství (novela). -Krishak Mukti Sangram Samiti (KMSS), které bylo mezi prvními skupinami, které organizovaly protesty proti CAA po jeho schválení ve Sněmovně, vzdalo hold agitátorům v Samově domě v Hatigaonu. -Sibsagar MLA Akhil Gogoi, který byl během protestů vůdcem Krishak Mukti Sangram Samiti a byl za svou účast ve vzpourách uvězněn, při kladení věnců u fotografií těch, kteří zemřeli, řekl, že politické strany a "nacionalistické organizace" by měly vést obnovení hnutí. -Komentující artistickou frakci, která se stala středem pozornosti v roce 2019, řekl: "Nemůžeme očekávat, že budou organizovat protesty." -Jejich pomoc je klíčová, ale neměli by být obviňováni z toho, že neobnovili hnutí. -Všechny Studentské Unie Assamu (AASU), další klíčový hráč ve vzpourě, uspořádala památník na hřišti Střední školy Hatigaon. -Na této příležitosti řekl hlavní poradce AASU Samujjal Kumar Bhattacharya: "Je špatné říkat, že proti-CAA hnutí už zemřelo." -Ztratilo svou intenzitu kvůli zahájení zkoušek (v lednu 2020) a poté pandemii a uzavřením. -Obnovíme agitaci s plnou intenzitou znovu. -"Nenecháme naše oběti marně," řekl. -Pan Bhattacharya řekl, že proti-CAA protesty budou opět pan-Northeast jako v roce 2019. -Zpěvák-skladatel hudby Zubeen Garg, který sehrál vedoucí roli ve protestech v roce 2019, také vyjádřil svou úctu na programu pořádaném AASU. -Nemůžeme přijmout CAA a to je jisté. -Vláda se snaží nás zmást, ale my jim nedovolíme, aby nás donutili to přijmout," řekl. -Několik organizací, včetně AASU, North East Students' Organisation (NESO) a Assam Jatiya Parishad (AJP), si připomnělo "černý den" 11. prosince k označení dvou let od schválení CAA ve Sněmovně. -Dobré odpoledne, děkujeme, že jste se dnes spojili s námi, jste přes #NAME#. -Mohl byste potvrdit číslo objednávky, jméno na účtu, e-mailovou adresu a dodací adresu, prosím? -Jednu chvíli, nech mě opravit soubory pro tebe. -Ujistěte se, že na vašem čtečce knih provádíte následující kroky. -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více dole na obrazovce. -Nastavení. -Zobrazit informace o zařízení. -Vedle "Opravit váš účet #PRS_ORG#", klepněte na Opravit. -Opravte nyní. -Které tituly ti chybí? -Prosím, postupujte podle níže uvedených kroků pro provedení opravy synchronizace ve vašem #PRS_ORG# (před zahájením budete potřebovat připojení Wi-Fi): -Jdi na svou domovskou obrazovku. -Klepněte na ikonu Více v dolním pravém rohu obrazovky (3 vodorovné čáry). -Nastavení. -Zobrazit informace o zařízení. -Kromě opravy/obnovení vašeho účtu #PRS_ORG#, klepněte na Oprava/Obnovení. -Opravit nyní/Obnovit -Až bude synchronizace dokončena, prosím, klepněte znovu na Synchronizovat nyní, abyste nainstalovali dostupné aktualizace. -Prosím, dejte mi vědět, jestli můžete stáhnout a otevřít svou knihu teď. -Ahoj, jsi pořád tam? -Neslyšeli jsme od tebe. -Můžeme být odpojeni. -Ukončím tuto chatovací relaci. -Pokud hledáte, jak se opět spojit s podporou zákazníků, můžete nás kontaktovat na #URL# a člen našeho týmu bude rád, že vám pomůže. -Proč byli Skyler a Walt Jr. tak naštvaní, že Walt pracoval na domě v druhé sezóně? -Konkrétně 2.10 "Přes" -Walt nahradí ohřívač vody a poté nahradí desky, které zřejmě nebyly nutně hnijící. -Proč se Skyler zdá tak naštvaný kvůli tomu? -Už úplně znechucená se ptá: "Dnes vůbec půjdeš do práce?" -Před týdnem nebo dvěma byla o něm nadšená, že bude celou dobu odpočívat a uzdravovat se. -Rozumím, že je v tomto vztahu nešťastná, ale Walter Jr. se zdá být nejasně rozhořčený a úplně zmatený Waltovými rekonstrukcemi. -Jsem si také vědoma, že Skyler otevřeně flirtuje s Tedem v naději, že se někdo bude konečně chovat k ní jako k prioritě, zatímco nosí dítě, zatímco Walt udělal všechno jen o sobě od svých padesátých narozenin. -Stále mi přijde divné, když to znovu sleduji, že Sky a Jr. jsou tak neuvěřitelně naštvaní, že Walt dělá něco produktivního doma, než lže, nebo zabíjí lidi, nebo dělá drogy. -Jen opravuji dům jako by to měl dělat majitel a mám k tomu jen volný čas. -Rozumím také, že to je jen další forma zoufalství, aby se pokusil udržet svou roli manžela a rodinného muže, přestože den nebo dva předtím donutil svého teenage syna k pití tequily. -Je jasné, že se snaží získat zpět jejich přízeň tím, že vychvaluje problém, který není okamžitou prioritou, aby to vypadalo, jako by udělal skvělou práci a je skvělá osoba! -On jasně neumí řídit škody. -Bez ohledu na to, reakce jeho ženy a syna mě stále dráždila a cítil jsem se v této situaci nucen zdůraznit Waltův zoufalý pokus napravit ošklivé chyby. -Iowa Santa se po padesáti letech stáhne do důchodu. -Český Santa z Iowa, který udělal dětem radost už po padesát let, říká, že je připravený odložit červený kabát a užít si klidnější Vánoce. -Dave Stoufer odhalil, že zdravotní problémy a související s věkem vedly k jeho rozhodnutí odejít ze "nejdelší práce", kterou kdy měl. -Jeho žena Rachel Nicola řekla, že ačkoli je velmi hrdá na práci, kterou její manžel udělal, aby přinesl radost tolika lidem, těší se na to, že bude mít více času na oslavu Vánoc s ním. -Těžké sněžení způsobuje zkázu v Srbsku a většině Balkánu. -Těžké sněžení způsobilo v neděli většinou balkánských zemí chaos, který narušil veřejnou dopravu. -Letecké spojení bylo v neděli zrušeno na hlavním letišti v Bělehradu a mnoho oblastí hlásilo výpadky elektrického proudu a poškození budov. -Mnoho západní Srbska bylo bez elektřiny, jak varovaly úřady před nezbytnou cestou a vyzvaly lidi v Srbsku, aby šetřili energii. -Do Belgrade byly poškozeny auta a budovy kvůli sněhu. -Několik letů z a do hlavního letiště v Bělehradě bylo zrušeno kvůli počasí a krátkému výpadku proudu do hlavního terminálu, uvedla bělehradská média. -Dálnice vedoucí k letišti byla uzavřena na několik hodin kvůli dopravní zácpě způsobené sněžením. -Cestující na místní vlak do Bělehradu byli uvězněni ve sněhu po dobu sedmi hodin, než jim byla poskytnuta autobusová doprava do hlavního města. -Záchranné služby pomáhají orgánům při čištění, zatímco byl vydán další varování před sněhem a ledem. -Mezitím, v Bulharsku, silné deště a velké povodně zasáhly jižní části země během víkendu, což donutilo tamní úřady vyhlásit stav nouze. -Nejhůře postižené oblasti byly v oblasti Smolyan, blízko hranice s Řeckem, kde řeky prorazily své břehy a způsobily přetečení silnic a zaplavení domů. -Několik kamionů bylo uvězněno v sesuvu půdy na meziměstské silnici. -Silné větry narušily dodávky elektrické energie ve stovkách vesnic, uvedly úřady. -Ve středním Albánii se orgány mobilizovaly, aby zvládly záplavy po třech dnech neustálého deště a sněžení. -Vjosa řeka na jihu zaplavila mnoho oblastí. -Starší pár, který přespal na střeše svého domu na jihu Albánie, byl ráno zachráněn policií. -Mnoho silnic bylo dočasně uzavřeno sesuvy půdy na jihu. -V jiných částech severovýchodu a jihovýchodu země silné sněžení ztížilo nebo dočasně uzavřelo dopravu. -Skvělé!! -Jsem rád, že jsi nyní přistoupil k svému e-knize!! -Pro vaši referenci, pošlu vám transkript naší konverzace. -Pokud budete mít jakékoli další otázky nebo obavy, můžete vždy odpovědět na tento e-mail a my vám budeme moci dále pomoci. -Je tu něco jiného, s čím bych vám dnes mohl pomoci? -Když jde o nákup kvalitního vybavení, spacák by měl být na prvním místě. -Můžete šetřit na všech druzích vybavení, ale ne na spacáku. -Mnoho času, když jste venku na táboření nebo na expedicích, bude stráveno ve vašem spánku a s Snugpak máte zajištěnou kvalitu. -Tento britský vyrobený spací pytel spojuje malou velikost balení s vážným výkonem a je oblíbenou volbou. -Mnoho lidí vnímá Softie 12 Osprey jako nejlepší čtyřsezónní syntetickou výplň spacího pytle, která je k dispozici. -Od roku 1987 nastavuje standard pro výkon velikosti zimního batohu, který ostatní mají následovat. -Ti, kteří vědí o Softie 12 Osprey, buď ji použili, nebo si přáli, aby ji měli. -Používá se od výšin skotských hor až po dno vaší sněžné jámy. -Softie 12 Osprey, stejně jako mnoho dalších spacích pytlů z naší Softie Original Series, bylo přiděleno NATO Stock Number. -Quilted horní část tašky je šitá, plisovaná a vybavená šňůrkou, takže se táhne do tvaru, podobně jako kapuce na bundě. -Aby se zabránilo zaseknutí dvoucestného zipu buď do zipového pláště nebo do okrajů tašky, je za zipem sešita "protizasekávací páska" z weboviny. -Spony pro upevnění a zavěšovací záložky Uvnitř jsou poskytovány záložky pro udržení volného obalu na svém místě, odpovídající pozici, kterou poskytujeme na našich obalech. -Vnější kapsy umožňují snadno pověsit tašku na větrání a sušení. -Zúžení tašky do kruhového dna vytváří tvar "mumií", který je snadno zahřátý a minimalizuje hmotnost použitého materiálu. -Těžko vidět na obrázku, ale zipová klapka protéká celou délkou tašky, za zipem, aby se zabránilo úniku tepla skrz zipovou oblast. -Koupit levou a pravou ruku, aby se vytvořila dvojice (prosím zkontrolujte při objednávání). -Přichází kompletní s kompresním sáčkem, aby se taška zmenšila, když není v používání. -Může být použito s panelem Snugpak Exanda pro vytvoření širšího spacího pytle pro větší pohodlí. -Tento spací pytel může být udělán extra dlouhý. -Jednoduchý profilovaný spací pytel s jednou vrstvou měkké izolace. -Snugpak sídlí v seznamované továrně postavené v 1800s na okraji krásných Yorkshire Dales. -Jsou velmi hrdí na to, že jsou jedním z posledních výrobců kvalitních spacích pytlů a izolovaného oblečení nejen v Velké Británii, ale po celé Evropě. -Máme oddanou pracovní sílu ve naší továrně v West Yorkshire v severní Anglii, kteří jsou vyškoleni k používání nejmodernějších strojů a tradičních šicích technik, aby naše nápady přivedly k životu. -Left Limited je oficiálním dodavatelem pro Snugpak a v našem EBAY obchodě nabízíme širokou škálu jejich vybavení. -Left LTD je vedoucí dodavatel vybavení pro ozbrojené síly a průmysl osobní ochrany. -Popis, prosím, posuňte se dolů na konec výpisu pro více obrázků. -Zde máme na prodej pravý Longines použitý chronograf náramkové hodinky ciferník. -Číselník je černé barvy s bílými značkami a datumovou otvorou v dolním sub-číselníku. -Dial je ve velmi dobrém, ne-li novém starém skladovém stavu. -Zadní strana ciferníku není označena. -Ručička měří 37 mm v průměru a nohy ručičky jsou přibližně na 7 a 37. -Prosím, podívejte se na obrázky pro více podrobností. -Garance je zaručena pro autenticitu tohoto ciferníku. -Platba se očekává do 5 pracovních dnů. -Přijímáme platbu přes Paypal, bankovní převod nebo platbu při odběru. -Nemáme možnost přímo přijímat platební karty nebo debetní karty, ale tyto jsou přijatelné prostřednictvím Paypalu. -V některých případech můžeme přijímat pouze bankovní převod, například pro mezinárodní transakci, kde má kupující velmi nízkou nebo žádnou zpětnou vazbu. -Česky: Pro domácí dopravu používáme 3 různé typy. -Možnosti určené se liší v závislosti na aukci. -Normálně používáme Royal Mail první třídy zaznamenané pro balíky pod hodnotou 40 liber a Royal Mail speciální doručení pro položky nad 40 liber. -Kompenzace za speciální doručení jsou 500 liber, 1000 liber a 2500 liber a my budeme hradit za Vaši zásilku odpovídající částku, pokud je tento servis použit. -Třetí službu, kterou používáme ve Velké Británii, je kurýrní doručení, které bude obvykle Citylink do 5.30 hodin následující den. -Používáme tuto službu pouze pro těžké nebo objemné položky. -Pro mezinárodní dopravu používáme dva různé metody. -Hlavní způsob doručení je Royal Mail mezinárodní podepsaný. -Toto je služba, která vyžaduje podpis při doručení, ale je sledována pouze v rámci Spojeného království. -Nicméně, potvrzení o doručení je dostupné online. -Maximální úroveň náhrady za tuto službu je 500 liber a časy doručení se liší podle destinace. -Také k dispozici za příplatek, pokud je požadováno, jsou mezinárodní doručení následující den prostřednictvím FEDEX Global Express. -Toto je pouze na základě citace a musíte nám poskytnout vaši adresu pro vypracování cenové nabídky. -Maximální úrovně kompenzace na tuto službu je 1000 dolarů Podmínky prodeje. -Všechny prodeje jsou konečné a očekáváme platbu do 5 pracovních dnů. -Nabízíme 30denní politiku vrácení peněz pro zboží, pokud je přijato zpět ve stejném stavu, ve kterém bylo odesláno, s všemi původními obalovými materiály a nebylo s ním manipulováno. -Rezervujeme si právo stanovit omezení platebních podmínek pro zboží, které je odesíláno do určitých mezinárodních destinací, jako jsou ty, kde je vysoké riziko podvodu. -Prodáváme zde na eBay už více než deset let a nabízíme vysoce kvalitní zboží za skvělé ceny. -Oba kupujeme a prodáváme hodinky značek Premium online i offline a všechny naše hodinky jsou kontrolovány hodináři vyškolenými programem WOSTEP (Watches of Switzerland Training Enterprise Program). -Tam, kde je uvedeno, budou hodinky dodány s mechanickou zárukou. -Záruka nezahrnuje zneužití nebo zneužívání hodinek a doporučuje se, aby všechny staré hodinky byly před ponořením testovány na vodotěsnost. -Pokud si přejete kontaktovat nás, můžete tak učinit pomocí tlačítka Kontaktovat prodejce na výpisu. -Vždy nás zajímá, co mají noví dodavatelé a můžeme také nabídnout velkoobchodní ceny na některé položky, které prodáváme kromě hodinek. -Jsme hrdí na to, že jsme nezávislí a nejsme podporováni, schváleni ani doporučeni žádnou značkou, kterou prodáváme, včetně Rolexu. -Vážíme si našich zpětných vazeb, protože věříme, že to hodně říká o tom, jak se staráme o zákazníky. -Vždy po obdržení zpětné vazby od našich zákazníků jim také zanecháváme zpětnou vazbu, protože nám to umožňuje vědět, že zakoupený produkt byl obdržen a že zákazník je s ním spokojen. -Pokud však nejste v žádném ohledu spokojeni, dejte nám prosím vědět před odchodem zpět, abychom mohli zkusit napravit jakékoli problémy. -Získejte supersize obrázky a bezplatné hosting obrázků -Pozor prodejci - Získejte šablony pro hostování obrázků, plánování na Auctiva.com. -Sledujte počet zobrazení stránek s bezplatným počítadlem Auctiva. -Joe Biden lituje selhání při zastavení globálního oteplování po smrtelných tornádech. -Prezident Joe Biden v sobotu litoval, že svět selhal v zastavení globálního oteplování, po tom, co se vyjádřil k smrtícím tornádám, které se prohnaly několika státy. -Všichni víme, že všechno je intenzivnější, když se otepluje klima. -"Všechno," řekl. -A samozřejmě to má nějaký dopad tady. -Alespoň 30 tornád bylo hlášeno ve šesti různých státech, což způsobilo široké zničení, a se očekává, že bude z této bouře zabito více než 100 lidí. -Prezident řekl, že nezná plný rozsah příspěvku globálního oteplování k smrtelným bouřím, které označil za jednu z "největších výbuchů tornád v historii". -Řekl, že požádá Agenturu pro ochranu životního prostředí o vyšetření. -Biden řekl: "Vše, co vím, je, že intenzita počasí po celém světě má nějaký dopad jako důsledek oteplování planety." -Prezident pochválil reportéra, který se ho zeptal na změny klimatu. -"Jako obvykle, vždycky se ptáš na nejlepší otázku," řekl s ironickým smíchem. -"Jak tohle zvládneme?", pokračoval on. -Částí toho je uznání, že pravděpodobnost menšího počtu katastrof způsobených počasím, bez pokračování v boji proti globálnímu oteplování, se prostě nestane. -Biden řekl, že byl šokován rekordními požáry v zemi během roku 2021, vyjádřil obavy, že globální oteplování bylo hlavním přispěvatelem. -On tedy musíme jednat, řekl. -Biden řekl, že první krok je zachránit životy a postarat se o rodiny, které byly postiženy bouřemi. -Slibuji ti. -Cokoli je potřeba. -Biden řekl: "Vláda federace bude poskytovat cokoli, co je potřeba." -Řekl, že bude nadále pečlivě sledovat obnovu po bouři a udělá vše, co bude federální vláda potřebovat. -Chci, aby lidé ve všech těchto státech věděli. -Projdeme to. -Řekl: "Projdeme to společně a federální vláda se nevzdá." -Tohle je jeden z těch časů, kdy nejsme Demokraté ani Republikáni. -Prezident řekl, že navštíví postižené oblasti po bouři, když bylo jasné, že se nebude míchat do místních záchranných úsilí. -On řekl: "Plánuji jít." -Norton Security Deluxe zahrnuje přístup k odborné pomoci od certifikovaných techniků Nortonu online. -Pokud budete potřebovat pomoc kdykoli, naši podpůrní zástupci jsou připraveni vám pomoci 24 hodin denně, sedm dní v týdnu. -Chcete-li se ucházet o službu Virus Protection Promise, musíte si zakoupit, obnovit nebo aktualizovat svou předplatnou Norton přímo od společnosti Symantec nebo se přihlásit k službě Norton Automatic Renewal. -Pokud zástupce služby Symantec není schopen odstranit virus z vašeho zařízení, pak můžete obdržet plnou náhradu skutečné ceny zaplacené za předplatné Norton, nebo pokud se jedná o balíček Norton, celkovou cenu balíčku Norton zaplacenou (netto z jakýchkoli slev nebo náhrad obdržených a odečtením jakýchkoli poštovného, manipulačních poplatků a příslušných daní, s výjimkou určitých států a zemí, kde jsou poštovné, manipulační poplatky a daně vratné) a pouze pro aktuální placenou službu předplatného nebo balíčku předplatného. -Předtím, než je váš zařízení infikováno virem, musí být předplatné Nortonu nainstalováno a aktivováno. -Refunda NEAPLIKUJE se na žádné škody způsobené viry. -Podívejte se na webovou stránku Nortonu pro více podrobností. -Chránit to, co je důležité, s nejlépe hodnocenou bezpečnostní službou. -Váš online život a skutečný život se slučují do jednoho bezproblémového zážitku a potřebujete zabezpečení, které vás ochrání před viry, krádeží identity a dalšími digitálními hrozbami, aby se nestaly skutečnými bolestmi hlavy. -Vidíme více, analyzujeme více a zastavujeme více online hrozeb. -Autorka knihy 'Rozhovor s upírem' Anne Rice zemřela ve věku 80 let. -Ona zemřela kvůli komplikacím vyplývajícím z mrtvice, řekl Christopher Rice. -Největším úspěchem Riceové bylo její první román "Rozhovor s upírem", který byl vydán v roce 1976 a představil postavu upíra Lestata, který byl hlavní postavou ve 13 knihové sérii Kronik, z nichž nejnovější byla vydána v roce 2018. -Rice řekl na přednášce na Southern Illinois University v roce 2010: „Měl jsem představu o Lestatovi jako o muži akce, muži, který může dělat věci, které já nemůžu.“ -"Interview s upírem" bylo zfilmováno úspěšným filmem v roce 1994, což pomohlo obnovit zájem o žánr upírů, který pokračoval seriálem "The Vampire Diaries" a filmovou sérií "Twilight". -I když žila většinu svého života v Kalifornii, Rice byla rodilou Neworleánčankou a podle její biografie na webu nastavila mnoho svých příběhů právě tam. -Christopher Rice, syn Riceových, řekl, že byl u postele své matky, když zemřela. -Anne Rice bude pohřbena v soukromém obřadu v New Orleansu, s veřejnou památnou slavností plánovanou na příští rok, řekl. -Děkuji vám za to, že jste si dnes udělali čas na rozhovor se mnou a doufám, že jsem dokázal vyřešit váš dotaz. Pokud byste nevadilo, abyste hodnotili naši dnešní konverzaci na základě mých zákaznických dovedností, byl bych vám velmi vděčný. Tlačítko pro hodnocení najdete v tomto chatu. -Doufám, že máte skvělý den a prosím, vraťte se k nám, pokud budete potřebovat další pomoc. diff --git a/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/version.js b/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/version.js deleted file mode 100644 index d0f510447f57b96aad5bf60c59f6cd3d741bfb8d..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/version.js +++ /dev/null @@ -1,100 +0,0 @@ -module.exports = version; -module.exports.pin = pin; - -var fs = require('fs'); -var path = require('path'); -var exec = require('child_process').exec; -var root = null; - -function pin() { - return version().then(function (v) { - version.pinned = v; - }); -} - -function version(callback) { - // first find the package.json as this will be our root - var promise = findPackage(path.dirname(module.parent.filename)) - .then(function (dir) { - // now try to load the package - var v = require(path.resolve(dir, 'package.json')).version; - - if (v && v !== '0.0.0-development') { - return v; - } - - root = dir; - - // else we're in development, give the commit out - // get the last commit and whether the working dir is dirty - var promises = [ - branch().catch(function () { return 'master'; }), - commit().catch(function () { return '<none>'; }), - dirty().catch(function () { return 0; }), - ]; - - // use the cached result as the export - return Promise.all(promises).then(function (res) { - var branch = res[0]; - var commit = res[1]; - var dirtyCount = parseInt(res[2], 10); - var curr = branch + ': ' + commit; - if (dirtyCount !== 0) { - curr += ' (' + dirtyCount + ' dirty files)'; - } - - return curr; - }); - }).catch(function (error) { - console.log(error.stack); - throw error; - }); - - if (callback) { - promise.then(function (res) { - callback(null, res); - }, callback); - } - - return promise; -} - -function findPackage(dir) { - if (dir === '/') { - return Promise.reject(new Error('package not found')); - } - return new Promise(function (resolve) { - fs.stat(path.resolve(dir, 'package.json'), function (error, exists) { - if (error || !exists) { - return resolve(findPackage(path.resolve(dir, '..'))); - } - - resolve(dir); - }); - }); -} - -function command(cmd) { - return new Promise(function (resolve, reject) { - exec(cmd, { cwd: root }, function (err, stdout, stderr) { - var error = stderr.trim(); - if (error) { - return reject(new Error(error)); - } - resolve(stdout.split('\n').join('')); - }); - }); -} - -function commit() { - return command('git rev-parse HEAD'); -} - -function branch() { - return command('git rev-parse --abbrev-ref HEAD'); -} - -function dirty() { - return command('expr $(git status --porcelain 2>/dev/null| ' + - 'egrep "^(M| M)" | wc -l)'); -} diff --git a/spaces/zhaoys/wfms-kuiwenc/next.config.js b/spaces/zhaoys/wfms-kuiwenc/next.config.js deleted file mode 100644 index 7a9bb1021ffacfbef461a3f9307449a9332ca997..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/next.config.js +++ /dev/null @@ -1,42 +0,0 @@ -/** @type {import('next').NextConfig} */ - -const nextConfig = { - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.rules.push({ - test: /\.svg$/i, - issuer: /\.[jt]sx?$/, - use: ['@svgr/webpack'], - }) - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/zideliu/styledrop/timm/data/dataset.py b/spaces/zideliu/styledrop/timm/data/dataset.py deleted file mode 100644 index 99d99917b282c60be78d6b2f6faa313eaadd9225..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/data/dataset.py +++ /dev/null @@ -1,215 +0,0 @@ -""" Quick n Simple Image Folder, Tarfile based DataSet - -Hacked together by / Copyright 2020 Ross Wightman -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch.utils.data as data - -import os -import re -import torch -import tarfile -from PIL import Image - - -IMG_EXTENSIONS = ['.png', '.jpg', '.jpeg'] - - -def natural_key(string_): - """See http://www.codinghorror.com/blog/archives/001018.html""" - return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] - - -def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): - labels = [] - filenames = [] - for root, subdirs, files in os.walk(folder, topdown=False): - rel_path = os.path.relpath(root, folder) if (root != folder) else '' - label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') - for f in files: - base, ext = os.path.splitext(f) - if ext.lower() in types: - filenames.append(os.path.join(root, f)) - labels.append(label) - if class_to_idx is None: - # building class index - unique_labels = set(labels) - sorted_labels = list(sorted(unique_labels, key=natural_key)) - class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} - images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] - if sort: - images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) - return images_and_targets, class_to_idx - - -def load_class_map(filename, root=''): - class_map_path = filename - if not os.path.exists(class_map_path): - class_map_path = os.path.join(root, filename) - assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename - class_map_ext = os.path.splitext(filename)[-1].lower() - if class_map_ext == '.txt': - with open(class_map_path) as f: - class_to_idx = {v.strip(): k for k, v in enumerate(f)} - else: - assert False, 'Unsupported class map extension' - return class_to_idx - - -class Dataset(data.Dataset): - - def __init__( - self, - root, - load_bytes=False, - transform=None, - class_map=''): - - class_to_idx = None - if class_map: - class_to_idx = load_class_map(class_map, root) - images, class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) - if len(images) == 0: - raise RuntimeError(f'Found 0 images in subfolders of {root}. ' - f'Supported image extensions are {", ".join(IMG_EXTENSIONS)}') - self.root = root - self.samples = images - self.imgs = self.samples # torchvision ImageFolder compat - self.class_to_idx = class_to_idx - self.load_bytes = load_bytes - self.transform = transform - - def __getitem__(self, index): - path, target = self.samples[index] - img = open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB') - if self.transform is not None: - img = self.transform(img) - if target is None: - target = torch.zeros(1).long() - return img, target - - def __len__(self): - return len(self.samples) - - def filename(self, index, basename=False, absolute=False): - filename = self.samples[index][0] - if basename: - filename = os.path.basename(filename) - elif not absolute: - filename = os.path.relpath(filename, self.root) - return filename - - def filenames(self, basename=False, absolute=False): - fn = lambda x: x - if basename: - fn = os.path.basename - elif not absolute: - fn = lambda x: os.path.relpath(x, self.root) - return [fn(x[0]) for x in self.samples] - - -def _extract_tar_info(tarfile, class_to_idx=None, sort=True): - files = [] - labels = [] - for ti in tarfile.getmembers(): - if not ti.isfile(): - continue - dirname, basename = os.path.split(ti.path) - label = os.path.basename(dirname) - ext = os.path.splitext(basename)[1] - if ext.lower() in IMG_EXTENSIONS: - files.append(ti) - labels.append(label) - if class_to_idx is None: - unique_labels = set(labels) - sorted_labels = list(sorted(unique_labels, key=natural_key)) - class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} - tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] - if sort: - tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) - return tarinfo_and_targets, class_to_idx - - -class DatasetTar(data.Dataset): - - def __init__(self, root, load_bytes=False, transform=None, class_map=''): - - class_to_idx = None - if class_map: - class_to_idx = load_class_map(class_map, root) - assert os.path.isfile(root) - self.root = root - with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later - self.samples, self.class_to_idx = _extract_tar_info(tf, class_to_idx) - self.imgs = self.samples - self.tarfile = None # lazy init in __getitem__ - self.load_bytes = load_bytes - self.transform = transform - - def __getitem__(self, index): - if self.tarfile is None: - self.tarfile = tarfile.open(self.root) - tarinfo, target = self.samples[index] - iob = self.tarfile.extractfile(tarinfo) - img = iob.read() if self.load_bytes else Image.open(iob).convert('RGB') - if self.transform is not None: - img = self.transform(img) - if target is None: - target = torch.zeros(1).long() - return img, target - - def __len__(self): - return len(self.samples) - - def filename(self, index, basename=False): - filename = self.samples[index][0].name - if basename: - filename = os.path.basename(filename) - return filename - - def filenames(self, basename=False): - fn = os.path.basename if basename else lambda x: x - return [fn(x[0].name) for x in self.samples] - - -class AugMixDataset(torch.utils.data.Dataset): - """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" - - def __init__(self, dataset, num_splits=2): - self.augmentation = None - self.normalize = None - self.dataset = dataset - if self.dataset.transform is not None: - self._set_transforms(self.dataset.transform) - self.num_splits = num_splits - - def _set_transforms(self, x): - assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' - self.dataset.transform = x[0] - self.augmentation = x[1] - self.normalize = x[2] - - @property - def transform(self): - return self.dataset.transform - - @transform.setter - def transform(self, x): - self._set_transforms(x) - - def _normalize(self, x): - return x if self.normalize is None else self.normalize(x) - - def __getitem__(self, i): - x, y = self.dataset[i] # all splits share the same dataset base transform - x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) - # run the full augmentation on the remaining splits - for _ in range(self.num_splits - 1): - x_list.append(self._normalize(self.augmentation(x))) - return tuple(x_list), y - - def __len__(self): - return len(self.dataset) diff --git a/spaces/zideliu/styledrop/timm/models/tresnet.py b/spaces/zideliu/styledrop/timm/models/tresnet.py deleted file mode 100644 index e371292f7ded9af1a18c0ff91b6b2aff886ce77f..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/models/tresnet.py +++ /dev/null @@ -1,293 +0,0 @@ -""" -TResNet: High Performance GPU-Dedicated Architecture -https://arxiv.org/pdf/2003.13630.pdf - -Original model: https://github.com/mrT23/TResNet - -""" -import copy -from collections import OrderedDict -from functools import partial - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .helpers import build_model_with_cfg -from .layers import SpaceToDepthModule, AntiAliasDownsampleLayer, InplaceAbn, ClassifierHead, SEModule -from .registry import register_model - -__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] - - -def _cfg(url='', **kwargs): - return { - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': (0, 0, 0), 'std': (1, 1, 1), - 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', - **kwargs - } - - -default_cfgs = { - 'tresnet_m': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth'), - 'tresnet_l': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), - 'tresnet_xl': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), - 'tresnet_m_448': _cfg( - input_size=(3, 448, 448), pool_size=(14, 14), - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), - 'tresnet_l_448': _cfg( - input_size=(3, 448, 448), pool_size=(14, 14), - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), - 'tresnet_xl_448': _cfg( - input_size=(3, 448, 448), pool_size=(14, 14), - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth') -} - - -def IABN2Float(module: nn.Module) -> nn.Module: - """If `module` is IABN don't use half precision.""" - if isinstance(module, InplaceAbn): - module.float() - for child in module.children(): - IABN2Float(child) - return module - - -def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): - return nn.Sequential( - nn.Conv2d( - ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), - InplaceAbn(nf, act_layer=act_layer, act_param=act_param) - ) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): - super(BasicBlock, self).__init__() - if stride == 1: - self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) - else: - if aa_layer is None: - self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) - else: - self.conv1 = nn.Sequential( - conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), - aa_layer(channels=planes, filt_size=3, stride=2)) - - self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - reduction_chs = max(planes * self.expansion // 4, 64) - self.se = SEModule(planes * self.expansion, reduction_channels=reduction_chs) if use_se else None - - def forward(self, x): - if self.downsample is not None: - residual = self.downsample(x) - else: - residual = x - - out = self.conv1(x) - out = self.conv2(out) - - if self.se is not None: - out = self.se(out) - - out += residual - out = self.relu(out) - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, - act_layer="leaky_relu", aa_layer=None): - super(Bottleneck, self).__init__() - self.conv1 = conv2d_iabn( - inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) - if stride == 1: - self.conv2 = conv2d_iabn( - planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) - else: - if aa_layer is None: - self.conv2 = conv2d_iabn( - planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) - else: - self.conv2 = nn.Sequential( - conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), - aa_layer(channels=planes, filt_size=3, stride=2)) - - reduction_chs = max(planes * self.expansion // 8, 64) - self.se = SEModule(planes, reduction_channels=reduction_chs) if use_se else None - - self.conv3 = conv2d_iabn( - planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - if self.downsample is not None: - residual = self.downsample(x) - else: - residual = x - - out = self.conv1(x) - out = self.conv2(out) - if self.se is not None: - out = self.se(out) - - out = self.conv3(out) - out = out + residual # no inplace - out = self.relu(out) - - return out - - -class TResNet(nn.Module): - def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, no_aa_jit=False, - global_pool='fast', drop_rate=0.): - self.num_classes = num_classes - self.drop_rate = drop_rate - super(TResNet, self).__init__() - - # JIT layers - space_to_depth = SpaceToDepthModule() - aa_layer = partial(AntiAliasDownsampleLayer, no_jit=no_aa_jit) - - # TResnet stages - self.inplanes = int(64 * width_factor) - self.planes = int(64 * width_factor) - conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) - layer1 = self._make_layer( - BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) # 56x56 - layer2 = self._make_layer( - BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) # 28x28 - layer3 = self._make_layer( - Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) # 14x14 - layer4 = self._make_layer( - Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) # 7x7 - - # body - self.body = nn.Sequential(OrderedDict([ - ('SpaceToDepth', space_to_depth), - ('conv1', conv1), - ('layer1', layer1), - ('layer2', layer2), - ('layer3', layer3), - ('layer4', layer4)])) - - self.feature_info = [ - dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? - dict(num_chs=self.planes, reduction=4, module='body.layer1'), - dict(num_chs=self.planes * 2, reduction=8, module='body.layer2'), - dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), - dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), - ] - - # head - self.num_features = (self.planes * 8) * Bottleneck.expansion - self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) - - # model initilization - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') - elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # residual connections special initialization - for m in self.modules(): - if isinstance(m, BasicBlock): - m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero - if isinstance(m, Bottleneck): - m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero - if isinstance(m, nn.Linear): - m.weight.data.normal_(0, 0.01) - - def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - layers = [] - if stride == 2: - # avg pooling before 1x1 conv - layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) - layers += [conv2d_iabn( - self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] - downsample = nn.Sequential(*layers) - - layers = [] - layers.append(block( - self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append( - block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) - return nn.Sequential(*layers) - - def get_classifier(self): - return self.head.fc - - def reset_classifier(self, num_classes, global_pool='fast'): - self.head = ClassifierHead( - self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) - - def forward_features(self, x): - return self.body(x) - - def forward(self, x): - x = self.forward_features(x) - x = self.head(x) - return x - - -def _create_tresnet(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - TResNet, variant, default_cfg=default_cfgs[variant], pretrained=pretrained, - feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs) - - -@register_model -def tresnet_m(pretrained=False, **kwargs): - model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) - return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) - - -@register_model -def tresnet_l(pretrained=False, **kwargs): - model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) - return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) - - -@register_model -def tresnet_xl(pretrained=False, **kwargs): - model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) - return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) - - -@register_model -def tresnet_m_448(pretrained=False, **kwargs): - model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) - return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) - - -@register_model -def tresnet_l_448(pretrained=False, **kwargs): - model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) - return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) - - -@register_model -def tresnet_xl_448(pretrained=False, **kwargs): - model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) - return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs)