diff --git a/spaces/1368565466ki/ZSTRD/monotonic_align/core.py b/spaces/1368565466ki/ZSTRD/monotonic_align/core.py deleted file mode 100644 index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kundli 5.5 Full Version for Free from a Trusted Source.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kundli 5.5 Full Version for Free from a Trusted Source.md deleted file mode 100644 index e58bd6b1957dc7baaee3123551945c8ac3941efc..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kundli 5.5 Full Version for Free from a Trusted Source.md +++ /dev/null @@ -1,36 +0,0 @@ - -

How to Download Kundli 5.5 Full Version for Free

-

Kundli is a software that helps you to create and analyze your horoscope based on the principles of Vedic astrology. It can help you to understand your personality, career, health, marriage, and more. Kundli 5.5 is one of the most popular and trusted versions of Kundli software that has been used by millions of people around the world.

-

download kundli 5.5 full version


Download Filehttps://byltly.com/2uKzOH



-

However, Kundli 5.5 is not a free software and you need to pay a license fee to use it. But what if you want to download Kundli 5.5 full version for free? Is it possible? And is it safe? In this article, we will answer these questions and show you how to download Kundli 5.5 full version for free from a reliable source.

-

Why Download Kundli 5.5 Full Version for Free?

-

There are many reasons why you may want to download Kundli 5.5 full version for free. Some of them are:

- -

Whatever your reason may be, downloading Kundli 5.5 full version for free can be a good option for you if you do it from a trustworthy source. However, you should also be aware of the risks and disadvantages of doing so.

-

-

What are the Risks and Disadvantages of Downloading Kundli 5.5 Full Version for Free?

-

Downloading Kundli 5.5 full version for free may seem like a great idea, but it also comes with some risks and disadvantages that you should consider before doing so. Some of them are:

- -

Therefore, you should be careful and cautious when downloading Kundli 5.5 full version for free and make sure that you do it from a reputable source that offers a safe and secure download.

-

How to Download Kundli 5.5 Full Version for Free from a Reliable Source?

-

If you have decided to download Kundli 5.5 full version for free, then you need to find a reliable source that offers a safe and secure download. One such source is https://www.kundlidownload.com/kundli-55-download/, which is a website that provides various versions of Kundli software for free download.

-

To download Kundli 5.5 full version for free from this website, you need to follow these steps:

-
    -
  1. Go to https://www.kundlidownload.com/kundli-55-download/ and click on the "Download Now" button.
  2. -
  3. Save the installer file on your computer and run it.
  4. -
  5. Follow the instructions on the screen to install Kundli 5.5 on your computer.
  6. -
  7. Launch the software and enjoy creating and analyzing your horoscope.
  8. -
-

Congratulations! You have successfully downloaded Kundli 5.5 full version for free from a reliable source. You can now use the software for your personal or educational purposes without any limitations.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Advanced PDF Editor 3.10 Serial Number Benefits and Features.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Advanced PDF Editor 3.10 Serial Number Benefits and Features.md deleted file mode 100644 index 3364f4e8a78dff991ebe09ad2b574e7a5b3546dc..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Advanced PDF Editor 3.10 Serial Number Benefits and Features.md +++ /dev/null @@ -1,31 +0,0 @@ -
-

Foxit Advanced PDF Editor 3.10 Serial Number: What You Need to Know

-

If you are looking for a powerful and easy-to-use PDF editor that can handle any kind of PDF document, you might want to check out Foxit Advanced PDF Editor 3.10. This software lets you make extensive editing directly in a PDF file by adding text, graphics, drawings, and images, merging and splitting text, and applying photo editing operations without requiring additional components installed .

-

But before you can enjoy all the features of Foxit Advanced PDF Editor 3.10, you need to have a valid serial number or activation code that will unlock the full version of the software. In this article, we will tell you everything you need to know about Foxit Advanced PDF Editor 3.10 serial number, including how to get it, how to use it, and what benefits it offers.

-

foxit advanced pdf editor 3.10 serial number


Download 🆓 https://byltly.com/2uKyw2



-

Features of Foxit Advanced PDF Editor 3.10

-

Foxit Advanced PDF Editor 3.10 is more than just a simple PDF editor. It offers a range of advanced editing capabilities for PDF documents with more complex layout. Here are some of the features that you can access with Foxit Advanced PDF Editor 3.10 serial number:

- -

How to Get Foxit Advanced PDF Editor 3.10 Serial Number

-

To get Foxit Advanced PDF Editor 3.10 serial number, you need to purchase the software from Foxit or one of its authorized resellers. You can choose between one-time purchase or annual subscription plans depending on your needs. You can also download a free trial version of the software for evaluation purposes before buying it.

-

Once you have purchased the software, you will receive an email from Foxit with your serial number or activation code along with instructions on how to download and install the software on your computer. You can also find your serial number or activation code in your online account at https://www.\uE000foxit\uE001.com/my-account/my-products.html.

-

To use Foxit Advanced PDF Editor 3.10 serial number, you need to follow these steps:

-
    -
  1. Download Foxit Advanced PDF Editor 3.10 from the official website at https://www.\uE000foxit\uE001.com/downloads/ or from a trusted source.
  2. -
  3. Install the software on your computer by following the installation wizard.
  4. -
  5. Open the software and go to Help menu > About Foxit Advanced PDF Editor > License Information.
  6. -
  7. Enter your serial number or activation code that you received from Foxit or a reseller in the corresponding field. 0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Driver Camara Web Hp Oem Wb918la Abm.md b/spaces/1gistliPinn/ChatGPT4/Examples/Driver Camara Web Hp Oem Wb918la Abm.md deleted file mode 100644 index 71d869a6c15e2357a229e66db8a2e8fa0b35a851..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Driver Camara Web Hp Oem Wb918la Abm.md +++ /dev/null @@ -1,12 +0,0 @@ - -

    meko's had a tough year because their teams suffered at both campos and lotus. at lotus, they suffered because the english team was bought by renault, and it took until the middle of the season to swap it.

    -

    driver camara web hp oem wb918la abm


    DOWNLOAD ————— https://imgfil.com/2uxZRy



    -

    alonso not only said this year's indianapolis race would be its last, but he also said it would be his last time in the series. he insisted the indy 500 was one of the most "special events" he has ever experienced.

    -

    my team, carlin, has been working hard for the last three years to achieve this ambitious target. it has been an incredible journey so far, and it is only right that we are all rewarded for all the hard work that has been put in.

    -

    sette camara is no stranger to the trials of city street circuits. the brazilian driver made his debut at the gruelling macau grand prix in 2015, his first attempt saw him finish down in 22nd, but demonstrated his potential by smashing the lap record by an impressive 1.5 seconds.

    -

    i am really happy to have been selected to join the ferrari driver academy. it is a great way to end what has been a really good racing season for me. just spending a week in maranello was in itself an amazing experience, especially getting to drive at the fiorano track.

    -

    -

    my team, carlin, has been working hard for the last three years to achieve this ambitious target. it has been an incredible journey so far, and it is only right that we are all rewarded for all the hard work that has been put in.

    -

    tuukka taponen, the 18-year-old finn started his karting career in the ok karting championship in spain, where he had his first taste of competition and circuit racing. on march 26, he won the title in the ok karting class at the spanish campillos track, held the following week, the ok colombian championship.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fangoria Magazine All Issues Cbr HOT!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Fangoria Magazine All Issues Cbr HOT!.md deleted file mode 100644 index 0ba7eeac1d02242257a7e1678ff08a140e719258..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Fangoria Magazine All Issues Cbr HOT!.md +++ /dev/null @@ -1,38 +0,0 @@ -

    Fangoria Magazine All Issues Cbr


    Download Zip ★★★★★ https://imgfil.com/2uxYmz



    - -Fangoria Magazine - -The first issue of Fangoria Magazine came out in October 1988. The magazine was aimed at hardcore horror fans. The magazine was originally published by Apopka Publications, Inc, which was created by a bunch of "horror and exploitation" movie fans who had worked at movie theaters in Florida. - -The name "Fangoria" was a reference to the cult comedy flick, Pink Flamingos. - -At its height, the magazine reached a monthly circulation of over 150,000. - -The Fangoria Magazine Archive - -The Fangoria Magazine Archive was started in 2008 by Dave and Alan Wiater, both of whom were editors at Fangoria Magazine. The archive includes over 12,000 issues of Fangoria Magazine. - -Anthologies - -Beginning in 2004, Fangoria Magazine started to compile anthologies of horror films from around the world. The films chosen for the anthologies ranged from mainstream horror films to independent horror films. - -In 2011, Fangoria launched a series of books called The Fangoria Film Guide Collection. Each book would contain classic horror films edited by Fangoria Magazine'''s editors. These books would also include limited editions of Fangoria Magazine'' for collectors. - -Fangoria On Demand - -In 2011, Fangoria Magazine started Fangoria On Demand. The online streaming service allows fans to watch horror movies on demand. - -On Demand exclusive content includes the red carpet interviews from the Fantastic Fest, Rue Morgue and Screamfest film festivals. Fangoria has also released interviews with George A. Romero, Guillermo Del Toro, Wes Craven, Joe Dante, Mick Garris, John Landis, Wes Craven and Sean Cunningham. - -Fangoria Presents: Tales From The Crypt - -In 2006, Fangoria started Fangoria Presents: Tales From The Crypt. The program would bring fans horror stories from the past from some of the best horror writers of the 1980s. - -Fangoria's Holiday Horror Show - -Starting in 2008, Fangoria Magazine'' began to host their annual horror show in Orlando. The Fangoria Holiday Horror Show is now a three-day long event. The 2011 show was the 10th anniversary show. - -Each year, Fangoria Magazine hosts 3D screenings of horror classics, discussion panels, celebrity guests, and a costume contest. Since the first show in Orlando, the show has taken place in Hollywood, Las Vegas, New York, San Francisco, 4fefd39f24
    -
    -
    -

    diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black GBWhatsApp APK The Ultimate Guide to the Best WhatsApp Mod.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black GBWhatsApp APK The Ultimate Guide to the Best WhatsApp Mod.md deleted file mode 100644 index 21979036937bd468bd7b6713150307c8f1f7652c..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black GBWhatsApp APK The Ultimate Guide to the Best WhatsApp Mod.md +++ /dev/null @@ -1,118 +0,0 @@ - -

    Black GB WhatsApp APK Download: What You Need to Know

    -

    If you are looking for a way to enhance your WhatsApp experience, you might want to try Black GB WhatsApp. This is a modified version of the popular chat app that offers more features, customization options, and privacy settings than the official one. In this article, we will tell you what Black GB WhatsApp is, what it can do, and how you can download and install it on your Android device.

    -

    black gb whatsapp apk download


    Download Zip ===> https://urlin.us/2uSVAw



    -

    What is GB WhatsApp?

    -

    GB WhatsApp is a free-to-use chat platform that comes as a modification of the official WhatsApp application. It was created by OriginaI lnc, a third-party developer that is not affiliated with WhatsApp Inc. GB WhatsApp allows you to access all the basic features of the original app, such as sending and receiving messages, calls, media, and documents. However, it also adds some extra features and customizability capabilities that are not available on the official version. Some of these features are:

    -

    Features of GB WhatsApp

    - -

    Benefits of GB WhatsApp

    -

    Using GB WhatsApp gives you more control over your chat experience than the original app. You can customize the interface, theme, font, and notification settings according to your preference. You can also enjoy more advanced messaging features, such as sending large APK files, copying statuses to your clipboard, increasing your status length, and creating longer group names. Moreover, you can protect your privacy by limiting how people track your online activities.

    -

    What is Black GB WhatsApp?

    -

    Black GB WhatsApp is a special version of GB WhatsApp that has a dark theme and a black icon. It is designed for users who prefer a sleek and elegant look for their chat app. It has all the features and benefits of GB WhatsApp, but with a different color scheme. Some of the features of Black GB WhatsApp are:

    -

    Features of Black GB WhatsApp

    - -

    Benefits of Black GB WhatsApp

    -

    Using Black GB WhatsApp gives you a stylish and sophisticated chat experience that stands out from the crowd. You can enjoy the dark theme and the night mode that enhance your visual comfort and performance. You can also avoid getting banned by WhatsApp for using a modified version with the anti-ban feature.

    -

    black gb whatsapp apk download latest version
    -black gb whatsapp apk download anti ban
    -black gb whatsapp apk download for android
    -black gb whatsapp apk download 2023
    -black gb whatsapp apk download filehippo
    -black gb whatsapp apk download get droid tips
    -black gb whatsapp apk download modded version
    -black gb whatsapp apk download free
    -black gb whatsapp apk download no ads
    -black gb whatsapp apk download with extra features
    -black gb whatsapp apk download official website
    -black gb whatsapp apk download update
    -black gb whatsapp apk download new version
    -black gb whatsapp apk download without root
    -black gb whatsapp apk download for pc
    -black gb whatsapp apk download online
    -black gb whatsapp apk download from apkpure
    -black gb whatsapp apk download 4.1.0
    -black gb whatsapp apk download 57mb
    -black gb whatsapp apk download for ios
    -black gb whatsapp apk download link
    -black gb whatsapp apk download original inc
    -black gb whatsapp apk download may 2023
    -black gb whatsapp apk download with theme customization
    -black gb whatsapp apk download with multilanguage support
    -black gb whatsapp apk download with advanced privacy options
    -black gb whatsapp apk download with more emojis
    -black gb whatsapp apk download with dual account feature
    -black gb whatsapp apk download with enhanced messaging experience
    -black gb whatsapp apk download with large file sharing capability
    -black gb whatsapp apk download with status copying feature
    -black gb whatsapp apk download with group name editing feature
    -black gb whatsapp apk download with broadcast message feature
    -black gb whatsapp apk download with last seen hiding feature
    -black gb whatsapp apk download with blue tick hiding feature
    -black gb whatsapp apk download with typing notification hiding feature
    -black gb whatsapp apk download with more security features
    -black gb whatsapp apk download with backup and restore feature
    -black gb whatsapp apk download with auto reply feature
    -black gb whatsapp apk download with schedule message feature
    -black gb whatsapp apk download with pin chat feature
    -black gb whatsapp apk download with lock chat feature
    -black gb whatsapp apk download with call blocker feature
    -black gb whatsapp apk download with video call feature
    -black gb whatsapp apk download with voice call feature
    -black gb whatsapp apk download with sticker pack feature
    -black gb whatsapp apk download with gif support feature
    -black gb whatsapp apk download with dark mode feature
    -black gb whatsapp apk download with night mode feature

    -

    How to Download and Install Black GB WhatsApp?

    -

    If you want to try Black GB WhatsApp on your Android device, you need to follow some simple steps to download and install it. However, before you do that, you need to make sure that you meet the requirements for Black GB WhatsApp.

    -

    Requirements for Black GB WhatsApp

    - -

    Steps to Download and Install Black GB WhatsApp

    -
      -
    1. Go to the official website of Black GB WhatsApp and download the latest APK file. You can also use this link: Black GB WhatsApp APK Download.
    2. -
    3. Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    4. -
    5. Locate the downloaded APK file on your device and tap on it to start the installation process.
    6. -
    7. Follow the instructions on the screen and grant the necessary permissions to the app.
    8. -
    9. After the installation is complete, open the app and verify your phone number. You can also restore your WhatsApp data if you have a backup.
    10. -
    11. Enjoy using Black GB WhatsApp on your device.
    12. -
    -

    How to Use Black GB WhatsApp?

    -

    Using Black GB WhatsApp is similar to using the official WhatsApp app. You can send and receive messages, calls, media, and documents with your contacts and groups. You can also access the extra features and customization options that Black GB WhatsApp offers. Here are some tips on how to use Black GB WhatsApp:

    -

    How to Customize Black GB WhatsApp

    -

    You can change the appearance and settings of Black GB WhatsApp according to your liking. To do this, go to Menu > GB Settings and explore the various options available. You can change the theme, font, wallpaper, notification tone, chat bubble color, and more. You can also enable or disable features such as auto-reply, message scheduler, anti-delete messages, and more.

    -

    How to Switch Between Black GB WhatsApp and Official WhatsApp

    -

    If you want to use both Black GB WhatsApp and official WhatsApp on your device, you can do so easily. You can switch between them by tapping on their icons on your home screen or app drawer. However, you need to use different phone numbers for each app, as you cannot use the same number for both apps at the same time.

    -

    Conclusion

    -

    Black GB WhatsApp is a great alternative to the official WhatsApp app if you want more features, customization options, and privacy settings. It has a dark theme and a black icon that give it a unique and elegant look. It also supports dark mode and night mode for better visual comfort and performance. You can download and install Black GB WhatsApp on your Android device by following the steps mentioned above. However, you should be aware that using a modified version of WhatsApp may violate its terms of service and may result in your account being banned or suspended. Therefore, use it at your own risk and discretion.

    -

    FAQs

    - -

    I hope this article has helped you learn more about Black GB WhatsApp and how to download and install it on your Android device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Octopus Mod APK - A Fun and Addictive Game for Android - Download Now.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Octopus Mod APK - A Fun and Addictive Game for Android - Download Now.md deleted file mode 100644 index eb1feef99c59a2aaf0542f1f61679645b4b8d8b2..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Octopus Mod APK - A Fun and Addictive Game for Android - Download Now.md +++ /dev/null @@ -1,82 +0,0 @@ -
    -

    Download Crazy Octopus Mod Apk: A Fun and Addictive Casual Game

    -

    If you are looking for a casual game that is easy to play but hard to master, then you should try Crazy Octopus. This is a game where you control a cute octopus that has to avoid obstacles and collect coins and gems. You can also customize your octopus with different hats, glasses, and accessories. In this article, we will tell you more about Crazy Octopus and how you can download the mod apk version that gives you unlimited money and other benefits.

    -

    What is Crazy Octopus?

    -

    Crazy Octopus is a casual game developed by \uE000Mod\uE001droid.com, a website that provides modded versions of popular android games. Crazy Octopus was released in 2023 and has gained a lot of positive reviews from players. The game has a simple but colorful graphics style, a catchy soundtrack, and a smooth gameplay. The game is suitable for all ages and can be played offline or online.

    -

    download crazy octopus mod apk


    Download Zip ✵✵✵ https://urlin.us/2uSTXt



    -

    Features of Crazy Octopus

    -

    Some of the features of Crazy Octopus are:

    - -

    How to play Crazy Octopus

    -

    The gameplay of Crazy Octopus is very simple but challenging. You have to guide your octopus through a series of obstacles like rocks, sharks, submarines, mines, and more. You have to avoid hitting them or you will lose a life. You have three lives in each level and you can earn more by collecting hearts. You also have to collect coins and gems that are scattered along the way. You can use them to buy new items for your octopus or upgrade your power-ups. The game has many levels with increasing difficulty and variety. You can also play in endless mode where you have to survive as long as possible.

    -

    How to download crazy octopus mod apk for free
    -Crazy octopus mod apk latest version 2023
    -Crazy octopus mod apk unlimited money and gems
    -Download crazy octopus mod apk for android devices
    -Crazy octopus mod apk gameplay and features
    -Crazy octopus mod apk review and rating
    -Best site to download crazy octopus mod apk
    -Crazy octopus mod apk offline mode and multiplayer
    -Crazy octopus mod apk cheats and hacks
    -Download crazy octopus mod apk from APKCombo[^1^]
    -Crazy octopus mod apk installation guide and tips
    -Crazy octopus mod apk comparison with other games
    -Crazy octopus mod apk download link and file size
    -Crazy octopus mod apk update and patch notes
    -Crazy octopus mod apk requirements and compatibility
    -Download crazy octopus mod apk for PC and laptop
    -Crazy octopus mod apk trailer and screenshots
    -Crazy octopus mod apk support and feedback
    -Crazy octopus mod apk alternatives and similar games
    -Download crazy octopus mod apk from Google Play Store
    -Crazy octopus mod apk bug fixes and improvements
    -Crazy octopus mod apk rewards and achievements
    -Download crazy octopus mod apk for iOS and iPhone
    -Crazy octopus mod apk developer and publisher
    -Crazy octopus mod apk genre and category
    -Download crazy octopus mod apk from Amazon Appstore
    -Crazy octopus mod apk pros and cons
    -Crazy octopus mod apk FAQ and answers
    -Download crazy octopus mod apk for Windows Phone
    -Crazy octopus mod apk release date and history

    -

    Why download Crazy Octopus mod apk?

    -

    Although Crazy Octopus is a free game, it has some limitations that can affect your gaming experience. For example, you have to watch ads to get extra lives or coins. You also have to spend real money to buy some items or power-ups. If you want to enjoy the game without any restrictions, then you should download the mod apk version of Crazy Octopus.

    -

    Benefits of Crazy Octopus mod apk

    -

    The mod apk version of Crazy Octopus gives you many benefits that make the game more fun and easy. Some of the benefits are:

    - -

    How to download and install Crazy Octopus mod apk

    -

    If you want to download and install Crazy Octopus mod apk on your android device, then you have to follow these steps:

    -
      -
    1. Go to \uE000Mod\uE001droid.com and search for Crazy Octopus.
    2. -
    3. Click on the download button and

      wait for the download to finish.

    4. -
    5. Go to your file manager and locate the downloaded file.
    6. -
    7. Tap on the file and allow the installation from unknown sources.
    8. -
    9. Wait for the installation to complete and launch the game.
    10. -
    11. Enjoy playing Crazy Octopus mod apk with unlimited money and lives.
    12. -
    -

    Conclusion

    -

    Crazy Octopus is a fun and addictive casual game that you can play anytime and anywhere. You can control a cute octopus that has to avoid obstacles and collect coins and gems. You can also customize your octopus with different hats, glasses, and accessories. If you want to enjoy the game without any limitations, then you should download the mod apk version of Crazy Octopus that gives you unlimited money, lives, items, power-ups, and no ads. Download Crazy Octopus mod apk now and have a blast!

    -

    FAQs

    -

    Here are some frequently asked questions about Crazy Octopus mod apk:

    - - : \uE000Mod\uE001droid.com - https://moddroid.com/crazy-octopus.html

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dark Riddle 13.5.0 APK A Thrilling Adventure Game with Puzzles and Mystery.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dark Riddle 13.5.0 APK A Thrilling Adventure Game with Puzzles and Mystery.md deleted file mode 100644 index b14596e525dbb09ad8981a7ab2133e10cfbe6fe9..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dark Riddle 13.5.0 APK A Thrilling Adventure Game with Puzzles and Mystery.md +++ /dev/null @@ -1,125 +0,0 @@ - - - - - -
    -

    Dark Riddle 13.5.0 APK: A Thrilling Adventure Game

    -

    Do you love adventure games with suspense, mystery, and humor? If yes, then you should try Dark Riddle, a first-person adventure thriller with an interactive environment and interesting quests. In this game, you have to solve puzzles and uncover the secrets of a suspicious neighbor who lives across the street from you.

    -

    dark riddle 13.5.0 apk


    DOWNLOAD > https://urlin.us/2uSXSi



    -

    In this article, we will tell you everything you need to know about Dark Riddle 13.5.0 APK, the latest version of this popular game. We will explain what is Dark Riddle, what's new in Dark Riddle 13.5.0 APK, how to download and install Dark Riddle 13.5.0 APK, how to play Dark Riddle, and why should you play Dark Riddle.

    -

    What is Dark Riddle?

    -

    Dark Riddle is a game developed by PAGA GROUP, a Ukrainian game studio that specializes in creating adventure games with immersive stories and realistic graphics. Dark Riddle was first released in 2019 and has since gained over 50 million downloads on Google Play Store. It is also available on iOS devices.

    -

    Dark Riddle is a game that combines elements of horror, comedy, and puzzle-solving. You play as a curious protagonist who wants to find out what your neighbor is hiding in his basement. You have to explore an unusual city where you can find many useful and unique items to interact with. You will meet a police officer, a seller of alien devices, and other strange characters along your journey.

    -

    Dark Riddle is a game that challenges your creativity, logic, and courage. You have to use your wits and skills to outsmart your neighbor and sneak into his house without getting caught. You will also discover a dark riddle that involves aliens, secret experiments, and a mysterious organization.

    -

    What's New in Dark Riddle 13.5.0 APK?

    -

    Dark Riddle 13.5.0 APK is the latest version of the game that was released on June 16, 2023. It has some new features, bug fixes, and improvements that make the game more enjoyable and exciting. Here are some of the highlights of Dark Riddle 13.5.0 APK:

    -

    New Features

    -
      -
    • A new chapter in the story that reveals more secrets and surprises.
    • -
    • A new location to explore: the neighbor's laboratory.
    • -
    • A new character to meet: the neighbor's assistant.
    • -
    • A new item to use: the alien device that can manipulate time and space.
    • -
    • A new mode to play: the multiplayer mode that allows you to play with your friends online.
    • -
    -

    Bug Fixes and Improvements

    -
      -
    • Fixed some crashes and errors that occurred in the previous versions.
    • -
    • Improved the performance and stability of the game.
    • -
    • Improved the graphics and sound quality of the game.
    • -
    • Improved the user interface and controls of the game.
    • -
    • Improved the balance and difficulty of the game.
    • -
    -

    How to Download and Install Dark Riddle 13.5.0 APK?

    -

    If you want to play Dark Riddle 13.5.0 APK, you have two options to download and install it on your Android device. You can either download it from Google Play Store or from APKCombo, a third-party website that provides free APK files for various apps and games. Here are the steps for both options:

    -

    Download from Google Play Store

    -
      -
    1. Open Google Play Store on your device and search for "Dark Riddle".
    2. -
    3. Select the game from the search results and tap on "Install".
    4. -
    5. Wait for the download and installation process to complete.
    6. -
    7. Once done, you can launch the game from your app drawer or home screen.
    8. -
    -

    Download from APKCombo

    -
      -
    1. Open your web browser and go to https://apkcombo.com/dark-riddle/com.Nobodyshot.killerNeighbor/.
    2. -
    3. Select the version 13.5.0 from the drop-down menu and tap on "Download APK".
    4. -
    5. Wait for the download process to complete and locate the APK file in your device's storage.
    6. -
    7. Before installing the APK file, make sure you enable "Unknown Sources" in your device's settings. This will allow you to install apps from sources other than Google Play Store.
    8. -
    9. Tap on the APK file and follow the instructions on the screen to install it.
    10. -
    11. Once done, you can launch the game from your app drawer or home screen.
    12. -

    How to Play Dark Riddle?

    -

    Now that you have downloaded and installed Dark Riddle 13.5.0 APK, you are ready to play this amazing game. Here are some tips and tricks on how to play Dark Riddle:

    -

    Explore the City

    -

    The game starts with you arriving in a strange city where you have rented an apartment. You can explore the city and interact with various objects and characters. You can find clues, items, and quests that will help you in your adventure. You can also use your phone to call your friends, order pizza, or play mini-games.

    -

    dark riddle game download apk
    -dark riddle latest version apk
    -dark riddle mod apk unlimited money
    -dark riddle classic apk
    -dark riddle adventure thriller apk
    -dark riddle neighbor secrets apk
    -dark riddle offline apk
    -dark riddle hack apk
    -dark riddle free download apk
    -dark riddle android game apk
    -dark riddle 21.1.0 apk
    -dark riddle 20.0.0 apk
    -dark riddle 19.0.0 apk
    -dark riddle 18.0.0 apk
    -dark riddle 17.0.0 apk
    -dark riddle 16.0.0 apk
    -dark riddle 15.0.0 apk
    -dark riddle 14.0.0 apk
    -dark riddle 13.5.1 apk
    -dark riddle 13.4.0 apk
    -dark riddle 13.3.0 apk
    -dark riddle 13.2.0 apk
    -dark riddle 13.1.0 apk
    -dark riddle 13.0.0 apk
    -dark riddle 12.5.0 apk
    -dark riddle old version apk
    -dark riddle new update apk
    -dark riddle full version apk
    -dark riddle premium apk
    -dark riddle pro apk
    -dark riddle cracked apk
    -dark riddle unlocked apk
    -dark riddle cheats apk
    -dark riddle tips and tricks apk
    -dark riddle walkthrough guide apk
    -dark riddle gameplay video apk
    -dark riddle review and rating apk
    -dark riddle best alternative games apk
    -download and install dark riddle 13.5.0 apk on android device
    -how to play dark riddle 13.5.0 on pc with emulator

    -

    Sneak into the Neighbor's House

    -

    Your main goal is to sneak into your neighbor's house and find out what he is hiding in his basement. You have to be careful and avoid being detected by him or his security system. You can use different strategies and tactics to distract him, such as throwing objects, making noises, or setting traps. You can also use some of the items you find in the city, such as a crowbar, a flashlight, or a drone.

    -

    Solve Puzzles and Uncover Secrets

    -

    Once you are inside the neighbor's house, you have to solve various puzzles and riddles that will lead you to his basement. You will also discover some shocking secrets and mysteries that involve aliens, experiments, and a mysterious organization. You will have to make some choices that will affect the outcome of the story.

    -

    Why Should You Play Dark Riddle?

    -

    If you are still wondering why you should play Dark Riddle, here are some reasons why this game is worth your time and attention:

    -

    Interactive Environment and Interesting Quests

    -

    Dark Riddle has a rich and interactive environment that allows you to interact with almost everything you see. You can open doors, drawers, windows, and cabinets. You can pick up, throw, or use objects. You can also talk to different characters and complete various quests that will reward you with items, money, or information.

    -

    Unique Items and Characters

    -

    Dark Riddle has a lot of unique items and characters that make the game more fun and entertaining. You can find and use items such as a banana peel, a rubber duck, a fire extinguisher, or a laser pointer. You can also meet characters such as a police officer, a seller of alien devices, a hacker, or an alien.

    -

    Free Game with Optional In-App Purchases

    -

    Dark Riddle is a free game that you can download and play without spending any money. However, if you want to enhance your gaming experience, you can also buy some optional in-app purchases that will give you access to more items, features, or modes. For example, you can buy coins, gems, skins, weapons, or the premium version of the game.

    -

    Conclusion

    -

    In conclusion, Dark Riddle 13.5.0 APK is a thrilling adventure game that will keep you hooked for hours. You will enjoy exploring the city, sneaking into the neighbor's house, solving puzzles, and uncovering secrets. You will also love the interactive environment, the unique items and characters, and the free game with optional in-app purchases.

    -

    If you are looking for a game that combines horror, comedy, and puzzle-solving, then you should definitely try Dark Riddle 13.5.0 APK. You will not regret it!

    -

    FAQs

    -
      -
    • Q: Is Dark Riddle 13.5.0 APK safe to download and install?
    • -
    • A: Yes, Dark Riddle 13.5.0 APK is safe to download and install from Google Play Store or APKCombo. It does not contain any viruses or malware that could harm your device or data.
    • -
    • Q: How much storage space does Dark Riddle 13.5.0 APK require?
    • -
    • A: Dark Riddle 13.5.0 APK requires about 150 MB of storage space on your device.
    • -
    • Q: What are the minimum system requirements for Dark Riddle 13.5.0 APK?
    • -
    • A: Dark Riddle 13.5.0 APK requires Android 4.4 or higher and at least 1 GB of RAM to run smoothly.
    • -
    • Q: How many chapters are there in Dark Riddle 13.5.0 APK?
    • -
    • A: Dark Riddle 13. 5.0 APK has 5 chapters in the story, plus a bonus chapter that can be unlocked by completing certain tasks.
    • -
    • Q: Can I play Dark Riddle 13.5.0 APK offline?
    • -
    • A: Yes, you can play Dark Riddle 13.5.0 APK offline without an internet connection. However, some features and modes may require an internet connection to work properly.
    • -
    -

    I hope you enjoyed reading this article and learned something new about Dark Riddle 13.5.0 APK. If you have any questions or feedback, feel free to leave a comment below. Thank you for your time and attention.

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK Everything You Need to Know.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK Everything You Need to Know.md deleted file mode 100644 index 5375eeb33d49ee4f8073496a65d20240b3ae1a12..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK Everything You Need to Know.md +++ /dev/null @@ -1,199 +0,0 @@ - -

    Dolphin Emulator APK Versions: Everything You Need to Know

    -

    If you are a fan of Nintendo GameCube and Wii games, you may have heard of Dolphin Emulator. Dolphin Emulator is a free and open-source software that allows you to play these games on your PC, Mac, Linux, and Android devices. In this article, we will tell you everything you need to know about Dolphin Emulator APK versions, how to install and use them on your Android device, and what are the pros and cons of doing so.

    -

    What is Dolphin Emulator?

    -

    Dolphin Emulator is an emulator for two recent Nintendo video game consoles: the GameCube and the Wii. It allows PC gamers to enjoy games for these two consoles in full HD (1080p) with several enhancements: compatibility with all PC controllers, turbo speed, networked multiplayer, and even more. Dolphin Emulator was first released in 2003 as a closed-source project, but was later open-sourced in 2008. Since then, it has been constantly updated and improved by a team of developers and contributors from all over the world.

    -

    dolphin emulator apk versions


    Download Zip >>> https://urlin.us/2uT1ty



    -

    Dolphin Emulator Features

    -

    Dolphin Emulator has many features that make it stand out from other emulators. Some of these features are:

    - -

    Dolphin Emulator Compatibility

    -

    Dolphin Emulator has a high compatibility rate with GameCube and Wii games. However, some games may not work properly or at all due to various reasons. Some of these reasons are:

    - -

    To check the compatibility of a specific game with Dolphin Emulator, you can visit the official compatibility list on the Dolphin Emulator website. You can also search for user reviews and videos online to see how the game runs on different devices and settings.

    -

    How to Install Dolphin Emulator on Android

    -

    If you want to play GameCube and Wii games on your Android device, you will need to install Dolphin Emulator APK on your device. Dolphin Emulator APK is an Android application package that contains the Dolphin Emulator software for Android devices. You can download the latest version of Dolphin Emulator APK from the official download page on the Dolphin Emulator website. Here are the steps to install Dolphin Emulator APK on your Android device:

    -

    Downloading the APK File

    -

    To download the APK file, you will need a web browser on your Android device. You can use any web browser you prefer, such as Chrome, Firefox, Opera, or Samsung Internet. Follow these steps to download the APK file:

    -
      -
    1. Open your web browser and go to the official download page of Dolphin Emulator.
    2. -
    3. Scroll down to the section that says "Download Dolphin 5.0-15503 for Android". You will see a button that says "Download APK". Tap on it.
    4. -
    5. You will be redirected to a page that says "Dolphin 5.0-15503 for Android". You will see a button that says "Download". Tap on it.
    6. -
    7. You will see a pop-up window that asks you to confirm the download. Tap on "OK".
    8. -
    9. The APK file will start downloading to your device. You can check the progress of the download in your notification bar or in your downloads folder.
    10. -
    -

    Enabling Unknown Sources

    -

    To install the APK file, you will need to enable unknown sources on your device. Unknown sources are sources that are not verified by Google Play Store or other official app stores. By default, Android devices do not allow installing apps from unknown sources for security reasons. However, you can enable unknown sources for specific apps or files that you trust. Follow these steps to enable unknown sources for Dolphin Emulator APK:

    -
      -
    1. Go to your device settings and look for an option that says "Security" or "Privacy". Tap on it.
    2. -
    3. Look for an option that says "Unknown sources" or "Install unknown apps". Tap on it.
    4. -
    5. You will see a list of apps that can install unknown apps. Look for your web browser app and tap on it.
    6. -
    7. You will see a toggle switch that says "Allow from this source" or "Install unknown apps". Turn it on.
    8. -
    9. You will see a warning message that says "Your phone and personal data are more vulnerable to attack by apps from unknown sources. You agree that you are solely responsible for any damage to your phone or loss of data that may result from using these apps." Tap on "OK".
    10. -
    -

    Installing the APK File

    -

    To install the APK file, you will need to locate it on your device and open it. Follow these steps to install the APK file:

    -
      -
    1. Go to your downloads folder and look for a file named "dolphin-master-5.0-15503.apk". Tap on it.
    2. -
    3. You will see a pop-up window that asks you to confirm the installation. Tap on "Install".
    4. -
    5. The installation process will begin and may take a few seconds or minutes depending on your device speed and performance.
    6. -
    7. When the installation is complete, you will see a message that says "App installed". Tap on "Open" to launch Dolphin Emulator or tap on "Done" to exit.
    8. -
    -

    How to Use Dolphin Emulator on Android

    -

    After installing Dolphin Emulator on your Android device, you can start playing GameCube and Wii games on it. However, you will need to do some configuration and preparation before you can enjoy the full emulation experience. Here are some steps to use Dolphin Emulator on Android:

    -

    dolphin emulator apk download latest version
    -dolphin emulator apk for android 10
    -dolphin emulator apk mod
    -dolphin emulator apk no verification
    -dolphin emulator apk old version
    -dolphin emulator apk pro
    -dolphin emulator apk uptodown
    -dolphin emulator apk with bios
    -dolphin emulator beta apk
    -dolphin emulator custom build apk
    -dolphin emulator gamecube and wii games apk
    -dolphin emulator gold apk
    -dolphin emulator mmj apk
    -dolphin emulator premium apk
    -dolphin emulator stable apk
    -download dolphin emulator 5.0 apk
    -download dolphin emulator for android 11 apk
    -download dolphin emulator for android 4.4.2 apk
    -download dolphin emulator for android 6.0 apk
    -download dolphin emulator for android 7.0 apk
    -download dolphin emulator for android 8.1 apk
    -download dolphin emulator for android 9.0 pie apk
    -download dolphin emulator for android tv box apk
    -download dolphin emulator for pc windows 10 64 bit apk
    -download gamecube games for dolphin emulator android apk
    -how to install dolphin emulator on android phone apk
    -how to play wii games on dolphin emulator android apk
    -how to use cheats in dolphin emulator android apk
    -new super mario bros wii dolphin emulator android apk
    -resident evil 4 wii edition dolphin emulator android apk
    -super mario galaxy 2 wii iso for dolphin emulator android apk
    -super smash bros brawl wii iso for dolphin emulator android apk
    -the legend of zelda twilight princess wii iso for dolphin emulator android apk
    -wwe 13 wii iso for dolphin emulator android apk
    -best settings for dolphin emulator android 2021 apk
    -best settings for pokemon colosseum on dolphin emulator android apk
    -best settings for resident evil zero on dolphin emulator android apk
    -best settings for sonic adventure 2 battle on dolphin emulator android apk
    -best settings for super mario sunshine on dolphin emulator android apk
    -best settings for the legend of zelda wind waker on dolphin emulator android apk
    -can you play gamecube games on dolphin emulator android apk
    -can you play wii u games on dolphin emulator android apk
    -does dolphin emulator work on android tablet apk
    -how to add games to dolphin emulator android home screen shortcut apk
    -how to connect ps4 controller to dolphin emulator android bluetooth apk
    -how to fix lag in dolphin emulator android performance boost tweak guide apk
    -how to get gamecube bios files for dolphin emulator android tutorial video link in description apk
    -how to increase fps in dolphin emulator android frame skip option explained in detail with examples apk
    -how to play multiplayer games on dolphin emulator android online netplay feature walkthrough step by step instructions with screenshots and tips and tricks included in the article link below the video description box please like share and subscribe to our channel thank you very much for watching and have a great day bye bye see you next time cheers happy gaming enjoy the video and stay tuned for more awesome content coming soon bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out (this is just a joke, please don't use this keyword) 😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂👍👍👍👍👍👍

    -

    Configuring the Settings

    -

    Dolphin Emulator has many settings that you can tweak to optimize the performance and quality of the emulation. You can access the settings by tapping on the menu icon (three horizontal lines) on the top left corner of the screen and then tapping on "Settings". You will see several categories of settings, such as General, Graphics, Audio, Controls, and Advanced. You can explore each category and adjust the settings according to your preference and device capability. Some of the most important settings are:

    - -

    You may need to experiment with different settings to find the best balance between performance and quality for your device and game. You can also check online for recommended settings for specific games or devices.

    -

    Adding and Launching Games

    -

    To play games on Dolphin Emulator, you will need to have the game files on your device or external storage. You can obtain the game files from your own discs using a disc drive and a PC software such as CleanRip or FriiDump. Alternatively, you can download the game files from online sources such as ROMs websites or torrents. However, downloading game files from online sources may be illegal in some countries or regions. Therefore, we do not endorse or encourage such actions.

    -

    Once you have the game files on your device or external storage, you can add them to Dolphin Emulator by following these steps:

    -
      -
    1. Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.
    2. -
    3. Tap on "Add folder" and navigate to the folder where your game files are stored.
    4. -
    5. Select the folder and tap on "Select this directory".
    6. -
    7. Dolphin Emulator will scan the folder and add any compatible game files to its library.
    8. -
    9. You will see a list of games on the main screen of Dolphin Emulator. Tap on any game to launch it.
    10. -
    -

    Using Controllers and Touchscreen

    -

    Dolphin Emulator supports various input methods for playing games on Android devices. You can use physical controllers such as Bluetooth controllers or USB controllers with an OTG adapter. You can also use the touchscreen of your device as a virtual controller. To use controllers or touchscreen with Dolphin Emulator, you will need to configure them in the settings. Here are some steps to use controllers or touchscreen with Dolphin Emulator:

    -
      -
    1. Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.
    2. -
    3. Tap on "Settings" and then tap on "Controls".
    4. -
    5. You will see four controller ports: Port 1 (Wii Remote 1), Port 2 (Wii Remote 2), Port 3 (GameCube Controller 1), and Port 4 (GameCube Controller 2). Tap on any port that you want to configure.
    6. -
    7. You will see a list of input devices that you can use for that port, such as Emulated Wii Remote, Real Wii Remote, Emulated GameCube Controller, or Standard Controller. Tap on the device that you want to use.
    8. -
    9. If you choose Emulated Wii Remote or Emulated GameCube Controller, you will see a screen that shows the button mapping for that device. You can tap on any button to change its mapping or use the default mapping. You can also enable or disable motion controls, rumble, and IR pointer.
    10. -
    11. If you choose Real Wii Remote, you will need to pair your Wii Remote with your Android device via Bluetooth. To do this, press and hold the 1 and 2 buttons on your Wii Remote until the LED lights start blinking. Then, tap on "Refresh" on the Dolphin Emulator screen and select your Wii Remote from the list of devices. You can also enable or disable continuous scanning, speaker data, and Wii Remote motor.
    12. -
    13. If you choose Standard Controller, you will need to connect your controller to your Android device via Bluetooth or USB. To do this, follow the instructions that came with your controller or check online for guides. Then, tap on "Configure" on the Dolphin Emulator screen and select your controller from the list of devices. You can also change the button mapping for your controller or use the default mapping.
    14. -
    15. After configuring your input device for each port, tap on "Back" to return to the main screen of Dolphin Emulator.
    16. -
    -

    To use the touchscreen of your device as a virtual controller, you will need to enable it in the settings. Here are some steps to use the touchscreen with Dolphin Emulator:

    -
      -
    1. Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.
    2. -
    3. Tap on "Settings" and then tap on "Controls".
    4. -
    5. Tap on "Port 1 (Wii Remote 1)" and then tap on "Emulated Wii Remote".
    6. -
    7. Tap on "Overlay Controls" and turn it on.
    8. -
    9. You will see a screen that shows the overlay controls for the emulated Wii Remote. You can adjust the size, position, opacity, and visibility of each control element by tapping on it and using the sliders. You can also enable or disable motion controls, rumble, and IR pointer.
    10. -
    11. After adjusting the overlay controls, tap on "Back" to return to the main screen of Dolphin Emulator.
    12. -
    -

    Pros and Cons of Dolphin Emulator on Android

    -

    Dolphin Emulator is a great way to play GameCube and Wii games on your Android device. However, it also has some pros and cons that you should be aware of before using it. Here are some of the pros and cons of Dolphin Emulator on Android:

    -

    Pros

    - -

    Cons

    - -

    Conclusion

    -

    However, it also has some drawbacks and challenges that you should be aware of before using it. You may encounter performance issues, battery drain, or legal issues depending on your device and game. Therefore, you should use Dolphin Emulator with caution and responsibility.

    -

    We hope this article has helped you learn more about Dolphin Emulator APK versions, how to install and use them on your Android device, and what are the pros and cons of doing so. If you have any questions or feedback, feel free to leave a comment below or contact us through our website. Happy gaming!

    -

    FAQs

    -

    Here are some frequently asked questions about Dolphin Emulator APK versions:

    -
      -
    1. What are the minimum requirements for Dolphin Emulator on Android?
    2. -

      The minimum requirements for Dolphin Emulator on Android are:

      -
        -
      • Android 5.0 (Lollipop) or higher
      • -
      • A 64-bit processor (ARMv8 or x86_64)
      • -
      • A graphics processor that supports OpenGL ES 3.0 or higher
      • -
      • At least 2 GB of RAM
      • -
      • At least 4 GB of free storage space
      • -
      -
    3. Where can I get the latest version of Dolphin Emulator APK?
    4. -

      You can get the latest version of Dolphin Emulator APK from the official download page on the Dolphin Emulator website. You can also check the official blog or the official GitHub page for the latest news and updates on Dolphin Emulator.

      -
    5. How can I update Dolphin Emulator APK on my Android device?
    6. -

      You can update Dolphin Emulator APK on your Android device by following these steps:

      -
        -
      1. Download the latest version of Dolphin Emulator APK from the official download page on the Dolphin Emulator website.
      2. -
      3. Open your file manager app and locate the downloaded APK file.
      4. -
      5. Tap on the APK file and confirm the installation.
      6. -
      7. The installation process will overwrite the previous version of Dolphin Emulator on your device.
      8. -
      9. When the installation is complete, you can launch Dolphin Emulator and enjoy the new features and improvements.
      10. -
      -
    7. How can I uninstall Dolphin Emulator APK from my Android device?
    8. -

      You can uninstall Dolphin Emulator APK from your Android device by following these steps:

      -
        -
      1. Go to your device settings and look for an option that says "Apps" or "Applications". Tap on it.
      2. -
      3. Look for an app that says "Dolphin" or "Dolphin Emulator". Tap on it.
      4. -
      5. You will see a screen that shows the app information and options. Tap on "Uninstall".
      6. -
      7. You will see a pop-up window that asks you to confirm the uninstallation. Tap on "OK".
      8. -
      9. The uninstallation process will remove Dolphin Emulator from your device.
      10. -
      -
    9. How can I get help or support for Dolphin Emulator on Android?
    10. -

      You can get help or support for Dolphin Emulator on Android by visiting the official forums, the official wiki, or the official Discord server. You can also report bugs or request features on the official issue tracker.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bhop GO APK and Enjoy the Best Parkour Experience.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bhop GO APK and Enjoy the Best Parkour Experience.md deleted file mode 100644 index 2fa0c2f1d9ead92cc98a2f5c75bd43ad70e1cbf6..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bhop GO APK and Enjoy the Best Parkour Experience.md +++ /dev/null @@ -1,95 +0,0 @@ -
      -

      Bhop GO APK: A Parkour Game for Android

      -

      Do you love parkour games? Do you want to practice your bunny hopping skills in realistic 3D environments? If yes, then you should download Bhop GO APK, a simulation game that lets you bhop faster in FPS and simulation games. In this article, we will tell you what Bhop GO APK is, how to download and install it, how to play it, what are its features, what are its pros and cons, and some FAQs about it.

      -

      bhop go apk


      Download Zip >>> https://urlin.us/2uSTJQ



      -

      What is Bhop GO APK?

      -

      Bhop GO APK is a simulation game that lets you practice bunny hopping in 3D maps. Bunny hopping or bhop is a skill to jump faster in FPS and simulation games by turning left and right while jumping to get more speed. It is simply parkour with air strafes. You can avoid falling and obstacles as they can slow your hopping down. You can also use checkpoints to help you finish the maps easily.

      -

      How to Download and Install Bhop GO APK?

      -

      Download from Google Play Store

      -

      If you want to download Bhop GO APK from Google Play Store, you can follow these steps:

      -
        -
      1. Open Google Play Store on your Android device.
      2. -
      3. Search for Bhop GO in the search bar.
      4. -
      5. Tap on the Bhop GO icon and then tap on Install.
      6. -
      7. Wait for the installation to complete and then open the game.
      8. -
      -

      Download from APKCombo

      -

      If you want to download Bhop GO APK from APKCombo, you can follow these steps:

      -
        -
      1. Open your web browser and go to [APKCombo].
      2. -
      3. Search for Bhop GO in the search bar.
      4. -
      5. Tap on the Bhop GO icon and then tap on Download APK.
      6. -
      7. After the download is complete, open the APK file and tap on Install.
      8. -
      9. You may need to enable unknown sources in your settings to install the APK file.
      10. -
      11. Wait for the installation to complete and then open the game.
      12. -
      -

      How to Play Bhop GO APK?

      -

      Single Player Mode

      -

      Single player mode lets you play offline and practice your bhop skills on various maps. You can choose from different difficulty levels, such as easy, normal, hard, and extreme. You can also create your own maps using the map editor. You can use the joystick to move, jump, and strafe. You can also use the buttons to crouch, sprint, and use items. You can see your speed, time, and checkpoints on the screen. You can pause the game and change the settings anytime.

      -

      Multiplayer Mode

      -

      Multiplayer mode lets you play online with friends or other players and compete for the best time on different maps. You can join or create a room with up to 10 players. You can chat with other players and see their names and ranks. You can also vote for the next map or kick a player. You can see your position, time, and speed on the screen. You can also see other players' movements and trails. You can pause the game and change the settings anytime.

      -

      bhop go apk download
      -bhop go apk mod
      -bhop go apk latest version
      -bhop go apk android
      -bhop go apk free
      -bhop go apk offline
      -bhop go apk online
      -bhop go apk multiplayer
      -bhop go apk hack
      -bhop go apk unlimited coins
      -bhop go apk 2023
      -bhop go apk update
      -bhop go apk old version
      -bhop go apk xapk
      -bhop go apk for pc
      -bhop go apk pure
      -bhop go apk mirror
      -bhop go apk uptodown
      -bhop go apk revdl
      -bhop go apk rexdl
      -bhop go apk no ads
      -bhop go apk pro
      -bhop go apk premium
      -bhop go apk full version
      -bhop go apk cracked
      -bhop go game download apk
      -bhop go game mod apk
      -bhop go game hack apk
      -bhop go game online apk
      -bhop go game offline apk
      -download bhop go simulation game 3d online offline mod menu hack cheat unlimited money coins gems skins knives weapons cases crates roulette vip maps membership free latest version 2023 update new android ios mobile phone tablet device app application file install setup play store google play services apkpure apkmirror apknite apptopia apkpure.com apkmirror.com apknite.com apptopia.com shockapp shockapp.com shockapp.bhopgo com.shockapp.bhopgo gmail.mattwilson720.Blop com.gmail.mattwilson720.Blop activegamedev.com/bhopgo activegamedev.com activegamedev Matthew Killoran Wilson Matthew Wilson Killoran Wilson Matthew Killoran active game dev activegamedev blop blopgo blopgo.com blop.go blop.go.com

      -

      What are the Features of Bhop GO APK?

      -

      Collecting Loot on Maps

      -

      You can find trampolines, bounce pads, knives, weapons, and skins on different maps to make the game more fun. Trampolines and bounce pads can help you jump higher and faster. Knives and weapons can help you attack other players or objects. Skins can help you customize your character's appearance. You can also buy cases with coins and get random loot.

      -

      Racing for World Records

      -

      You can check your rank and statistics on the leaderboard and try to beat the world records of other bhoppers. You can see your best time, average time, total time, total jumps, total maps, total coins, total kills, total deaths, total wins, total losses, and total cases on your profile. You can also see the top 100 players for each map and mode. You can also share your achievements with your friends on social media.

      -

      Customizing Your Character and Inventory

      -

      You can earn coins by playing the game or watching ads. You can use coins to buy cases, spin and win cool knives, gloves, weapons, and skins for your character and inventory. You can also sell or trade your items with other players. You can change your character's name, color, model, trail, gravity, speed, jump force, and sound effects in the settings.

      -

      What are the Pros and Cons of Bhop GO APK?

      -

      Bhop GO APK is a fun and challenging game that lets you practice your bhop skills in realistic 3D environments. However, it also has some drawbacks that you should be aware of before downloading it. Here is a comparison table of the pros and cons of Bhop GO APK:

      - - - - - - - -
      ProsCons
      - It is free to download and play- It contains ads that may be annoying or intrusive
      - It has realistic physics and graphics- It may lag or crash on some devices
      - It has many maps and modes to choose from- It may have some bugs or glitches
      - It has a lot of loot and customization options- It may require internet connection for some features
      - It has a friendly and active community- It may have some toxic or cheating players
      -

      Conclusion

      -

      Bhop GO APK is a simulation game that lets you practice bunny hopping in 3D maps. It is a great way to improve your bhop skills and have fun with friends or other players. You can download and install it from Google Play Store or APKCombo. You can play it in single player or multiplayer mode. You can collect loot, race for world records, and customize your character and inventory. You can also enjoy the realistic physics and graphics of the game. However, you should also be aware of the ads, bugs, glitches, lag, internet connection, and toxic or cheating players that may affect your gaming experience. Overall, Bhop GO APK is a game that you should try out if you love parkour games.

      FAQs

      -

      Here are some common questions and answers about Bhop GO APK:

      -
        -
      1. What is the latest version of Bhop GO APK?
      2. -

        The latest version of Bhop GO APK is 195, which was updated on June 16, 2023. It has some bug fixes and improvements.

        -
      3. How can I contact the developer of Bhop GO APK?
      4. -

        You can contact the developer of Bhop GO APK by emailing them at shockappsteam@gmail.com or by visiting their website at [ShockApps].

        -
      5. How can I report a bug or a cheater in Bhop GO APK?
      6. -

        You can report a bug or a cheater in Bhop GO APK by using the feedback button in the game settings or by emailing the developer at shockappsteam@gmail.com.

        -
      7. How can I support the development of Bhop GO APK?
      8. -

        You can support the development of Bhop GO APK by rating and reviewing the game on Google Play Store or APKCombo, by sharing the game with your friends on social media, or by donating to the developer via PayPal.

        -
      9. Is Bhop GO APK safe to download and install?
      10. -

        Bhop GO APK is safe to download and install from Google Play Store or APKCombo, as they are trusted sources that scan the APK files for viruses and malware. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain harmful or malicious code.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Create Amazing AR Effects for TikTok with Effect House - Download Now.md b/spaces/1phancelerku/anime-remove-background/Create Amazing AR Effects for TikTok with Effect House - Download Now.md deleted file mode 100644 index 74243961f34f85e8730967de09b3204b8c865cfc..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Create Amazing AR Effects for TikTok with Effect House - Download Now.md +++ /dev/null @@ -1,75 +0,0 @@ -
      -

      TikTok Effect House Download: How to Create and Share Amazing AR Effects for TikTok

      -

      TikTok is a platform where you can express your creativity in many ways, such as making short-form videos, hosting live streams, and now, creating augmented reality (AR) effects. With Effect House, you can design and develop your own Community Effects for TikTok, and share them with millions of users around the world. In this article, we will show you how to download and use Effect House, what kind of effects you can create with it, how to publish and manage your effects on TikTok, how to find and use other creators' effects on TikTok, and how to connect and collaborate with other effect creators.

      -

      tiktok effect house download


      Download Zip ::: https://jinyurl.com/2uNRnq



      -

      What is Effect House?

      -

      Effect House is a platform that allows you to design and develop Community Effects for TikTok. Community Effects are AR effects that can be used by anyone on TikTok to enhance their videos with interactive and immersive elements. Effect House is made for beginners as well as professional designers and developers. You can create, publish, and share dynamic effects that can be used by TikTok users around the world. To get started, you need to log in with your TikTok account and download Effect House from the official website.

      -

      How to Download and Use Effect House?

      -

      To download Effect House, you need to visit the official website and log in with your TikTok account. You will then be able to download the software for Windows or Mac. Once you have installed Effect House on your computer, you can launch it and start creating your own effects. You will have access to guides, tutorials, templates, and other community resources that will help you in your effect creation journey. You can also preview your effects in real-time on your phone by scanning a QR code or using a USB cable.

      -

      What Kind of Effects Can You Create with Effect House?

      -

      Effect House gives you all the tools you need to create AR effects that inspire creativity across TikTok. Some of the features that Effect House offers include:

      -
        -
      • Segmentation: You can segment different parts of the scene, such as hair, landscapes, or clothing, and apply different effects to them.
      • -
      • Head Tracking: You can track the movement and orientation of the user's head and face, and attach 3D objects or animations to them.
      • -
      • Visual Scripting: You can use a graphical interface to create logic and interactions for your effects without coding.
      • -
      • And more: You can also use advanced tracking, rich interactions, audio synthesis, particle systems, shaders, and more to create stunning effects.
      • -
      -

      How to Publish and Manage Your Effects on TikTok?

      -

      Once you have created your effects with Effect House, you can submit them for approval through the effect management portal. You will receive an email notification from the TikTok team on the status of your effects. If your effects are approved, they will be published on TikTok and available for anyone to use. You can also track how your effects perform through analytics, such as views , likes, comments, and shares. You can also generate shareable links for your effects that you can post on your social media platforms or send to your friends. To manage your effects, you can edit, update, or delete them at any time through the effect management portal.

      -

      How to Find and Use Other Creators' Effects on TikTok?

      -

      If you want to explore and use other creators' effects on TikTok, you have several options to do so. You can:

      -
        -
      • Explore the effects tab on their profiles: You can visit the profiles of other effect creators and tap on the effects tab to see all the effects they have created. You can then try them out by tapping on the effect icon and recording a video with it.
      • -
      • Explore the effects detail page: You can also tap on the effect name or icon on any video that uses an effect to go to the effects detail page. There, you can see more information about the effect, such as its creator, description, and related videos. You can also try it out by tapping on the try it button.
      • -
      • Scan the effects QR code: You can also scan the QR code of any effect that you see on other platforms, such as websites, posters, or flyers. To do so, you need to open the TikTok app and tap on the discover tab. Then, tap on the scan button and point your camera at the QR code. You will then be able to access and use the effect.
      • -
      • Use the effect link: You can also use the effect link that is generated by the effect creator or shared by other users. To do so, you need to copy and paste the link into your browser or tap on it if you see it on another app. You will then be redirected to the TikTok app where you can use the effect.
      • -
      -

      How to Connect and Collaborate with Other Effect Creators?

      -

      One of the best things about Effect House is that it allows you to connect and collaborate with other effect creators who share your passion and vision. You can join the Effect House Discord community, where you can chat with other creators, share feedback, ask questions, get support, and learn from each other. You can also get inspired by other creators' work by browsing their effects on TikTok or Effect House's website. You can also follow them on TikTok or other social media platforms to stay updated on their latest creations.

      -

      Conclusion

      -

      Effect House is a platform that enables you to create and share amazing AR effects for TikTok. It is easy to use, powerful, and fun. You can design and develop your own Community Effects for TikTok, and share them with millions of users around the world. You can also explore and use other creators' effects on TikTok, and connect and collaborate with them through the Effect House Discord community. Effect House is a great way to express your creativity and enhance your TikTok experience. So what are you waiting for? Download Effect House today and start creating your own effects!

      -

      FAQs

      -

      Here are some common questions about Effect House:

      -
        -
      1. Is Effect House free?
      2. -

        Yes, Effect House is free to download and use. However, you need to have a TikTok account to log in and create effects.

        -

        How to create AR effects for TikTok with Effect House
        -Effect House tutorials and guides for TikTok creators
        -Effect House login and download for Mac and Windows
        -TikTok Effect House community and resources
        -Effect House segmentation and head tracking features
        -How to submit and publish effects on TikTok Effect House
        -Effect House visual scripting and interaction tools
        -How to view and use effects from TikTok Effect House
        -Effect House analytics and performance tracking for TikTok effects
        -How to report effects that violate TikTok Community Guidelines
        -Effect House templates and examples for TikTok effects
        -How to join Effect House Discord and connect with other creators
        -Effect House QR code and effect link for TikTok effects
        -How to find effects by name or creator on TikTok Effect House
        -Effect House attribution and profile tab for TikTok effects
        -How to preview and test effects in-app on TikTok Effect House
        -Effect House review and approval process for TikTok effects
        -Effect House effect management portal for TikTok effects
        -How to create dynamic and interactive effects on TikTok Effect House
        -Effect House robust AR capabilities for TikTok effects
        -How to design and develop Community Effects for TikTok with Effect House
        -Effect House changes and updates on the TikTok app
        -How to share feedback and suggestions on Effect House topics
        -Effect House learning resources and video tutorials for TikTok effects
        -How to create effects for different categories on TikTok Effect House
        -How to use Effect House built-in capabilities for TikTok effects
        -How to create effects that inspire creativity on TikTok with Effect House
        -Effect House effect tray and detail page for TikTok effects
        -How to track the status of your effects on TikTok Effect House
        -Effect House powerful, intuitive, and expressive features for TikTok effects

        -
      3. What are the system requirements for Effect House?
      4. -

        You need to have a Windows 10 or Mac OS 10.15 or higher computer with at least 8 GB of RAM and a dedicated graphics card to run Effect House. You also need to have a smartphone with Android 7.0 or iOS 13.0 or higher to preview your effects.

        -
      5. How long does it take to get my effects approved by TikTok?
      6. -

        The approval process may vary depending on the volume of submissions and the quality of your effects. Generally, it takes about 24 hours for your effects to be reviewed by the TikTok team. You will receive an email notification once your effects are approved or rejected.

        -
      7. How can I monetize my effects on TikTok?
      8. -

        TikTok does not currently offer a direct way to monetize your effects on TikTok. However, you can use your effects as a way to showcase your skills and portfolio, attract more followers and engagement, promote your brand or business, or collaborate with other creators or brands.

        -
      9. Where can I find more resources and support for Effect House?
      10. -

        You can find more resources and support for Effect House on their official website, where you can access guides, tutorials, templates, and other community resources. You can also join the Effect House Discord community, where you can chat with other effect creators, share feedback, ask questions, get support, and learn from each other. You can also contact the Effect House team through email or social media if you have any issues or suggestions.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/mesh.py b/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/mesh.py deleted file mode 100644 index 36833ea3dfa6c095a18fc745ff34cf106e83c95d..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/mesh.py +++ /dev/null @@ -1,328 +0,0 @@ -"""Meshes, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-mesh - -Author: Matthew Matl -""" -import copy - -import numpy as np -import trimesh - -from .primitive import Primitive -from .constants import GLTF -from .material import MetallicRoughnessMaterial - - -class Mesh(object): - """A set of primitives to be rendered. - - Parameters - ---------- - name : str - The user-defined name of this object. - primitives : list of :class:`Primitive` - The primitives associated with this mesh. - weights : (k,) float - Array of weights to be applied to the Morph Targets. - is_visible : bool - If False, the mesh will not be rendered. - """ - - def __init__(self, primitives, name=None, weights=None, is_visible=True): - self.primitives = primitives - self.name = name - self.weights = weights - self.is_visible = is_visible - - self._bounds = None - - @property - def name(self): - """str : The user-defined name of this object. - """ - return self._name - - @name.setter - def name(self, value): - if value is not None: - value = str(value) - self._name = value - - @property - def primitives(self): - """list of :class:`Primitive` : The primitives associated - with this mesh. - """ - return self._primitives - - @primitives.setter - def primitives(self, value): - self._primitives = value - - @property - def weights(self): - """(k,) float : Weights to be applied to morph targets. - """ - return self._weights - - @weights.setter - def weights(self, value): - self._weights = value - - @property - def is_visible(self): - """bool : Whether the mesh is visible. - """ - return self._is_visible - - @is_visible.setter - def is_visible(self, value): - self._is_visible = value - - @property - def bounds(self): - """(2,3) float : The axis-aligned bounds of the mesh. - """ - if self._bounds is None: - bounds = np.array([[np.infty, np.infty, np.infty], - [-np.infty, -np.infty, -np.infty]]) - for p in self.primitives: - bounds[0] = np.minimum(bounds[0], p.bounds[0]) - bounds[1] = np.maximum(bounds[1], p.bounds[1]) - self._bounds = bounds - return self._bounds - - @property - def centroid(self): - """(3,) float : The centroid of the mesh's axis-aligned bounding box - (AABB). - """ - return np.mean(self.bounds, axis=0) - - @property - def extents(self): - """(3,) float : The lengths of the axes of the mesh's AABB. - """ - return np.diff(self.bounds, axis=0).reshape(-1) - - @property - def scale(self): - """(3,) float : The length of the diagonal of the mesh's AABB. - """ - return np.linalg.norm(self.extents) - - @property - def is_transparent(self): - """bool : If True, the mesh is partially-transparent. - """ - for p in self.primitives: - if p.is_transparent: - return True - return False - - @staticmethod - def from_points(points, colors=None, normals=None, - is_visible=True, poses=None): - """Create a Mesh from a set of points. - - Parameters - ---------- - points : (n,3) float - The point positions. - colors : (n,3) or (n,4) float, optional - RGB or RGBA colors for each point. - normals : (n,3) float, optionals - The normal vectors for each point. - is_visible : bool - If False, the points will not be rendered. - poses : (x,4,4) - Array of 4x4 transformation matrices for instancing this object. - - Returns - ------- - mesh : :class:`Mesh` - The created mesh. - """ - primitive = Primitive( - positions=points, - normals=normals, - color_0=colors, - mode=GLTF.POINTS, - poses=poses - ) - mesh = Mesh(primitives=[primitive], is_visible=is_visible) - return mesh - - @staticmethod - def from_trimesh(mesh, material=None, is_visible=True, - poses=None, wireframe=False, smooth=True): - """Create a Mesh from a :class:`~trimesh.base.Trimesh`. - - Parameters - ---------- - mesh : :class:`~trimesh.base.Trimesh` or list of them - A triangular mesh or a list of meshes. - material : :class:`Material` - The material of the object. Overrides any mesh material. - If not specified and the mesh has no material, a default material - will be used. - is_visible : bool - If False, the mesh will not be rendered. - poses : (n,4,4) float - Array of 4x4 transformation matrices for instancing this object. - wireframe : bool - If `True`, the mesh will be rendered as a wireframe object - smooth : bool - If `True`, the mesh will be rendered with interpolated vertex - normals. Otherwise, the mesh edges will stay sharp. - - Returns - ------- - mesh : :class:`Mesh` - The created mesh. - """ - - if isinstance(mesh, (list, tuple, set, np.ndarray)): - meshes = list(mesh) - elif isinstance(mesh, trimesh.Trimesh): - meshes = [mesh] - else: - raise TypeError('Expected a Trimesh or a list, got a {}' - .format(type(mesh))) - - primitives = [] - for m in meshes: - positions = None - normals = None - indices = None - - # Compute positions, normals, and indices - if smooth: - positions = m.vertices.copy() - normals = m.vertex_normals.copy() - indices = m.faces.copy() - else: - positions = m.vertices[m.faces].reshape((3 * len(m.faces), 3)) - normals = np.repeat(m.face_normals, 3, axis=0) - - # Compute colors, texture coords, and material properties - color_0, texcoord_0, primitive_material = Mesh._get_trimesh_props(m, smooth=smooth, material=material) - - # Override if material is given. - if material is not None: - #primitive_material = copy.copy(material) - primitive_material = copy.deepcopy(material) # TODO - - if primitive_material is None: - # Replace material with default if needed - primitive_material = MetallicRoughnessMaterial( - alphaMode='BLEND', - baseColorFactor=[0.3, 0.3, 0.3, 1.0], - metallicFactor=0.2, - roughnessFactor=0.8 - ) - - primitive_material.wireframe = wireframe - - # Create the primitive - primitives.append(Primitive( - positions=positions, - normals=normals, - texcoord_0=texcoord_0, - color_0=color_0, - indices=indices, - material=primitive_material, - mode=GLTF.TRIANGLES, - poses=poses - )) - - return Mesh(primitives=primitives, is_visible=is_visible) - - @staticmethod - def _get_trimesh_props(mesh, smooth=False, material=None): - """Gets the vertex colors, texture coordinates, and material properties - from a :class:`~trimesh.base.Trimesh`. - """ - colors = None - texcoords = None - - # If the trimesh visual is undefined, return none for both - if not mesh.visual.defined: - return colors, texcoords, material - - # Process vertex colors - if material is None: - if mesh.visual.kind == 'vertex': - vc = mesh.visual.vertex_colors.copy() - if smooth: - colors = vc - else: - colors = vc[mesh.faces].reshape( - (3 * len(mesh.faces), vc.shape[1]) - ) - material = MetallicRoughnessMaterial( - alphaMode='BLEND', - baseColorFactor=[1.0, 1.0, 1.0, 1.0], - metallicFactor=0.2, - roughnessFactor=0.8 - ) - # Process face colors - elif mesh.visual.kind == 'face': - if smooth: - raise ValueError('Cannot use face colors with a smooth mesh') - else: - colors = np.repeat(mesh.visual.face_colors, 3, axis=0) - - material = MetallicRoughnessMaterial( - alphaMode='BLEND', - baseColorFactor=[1.0, 1.0, 1.0, 1.0], - metallicFactor=0.2, - roughnessFactor=0.8 - ) - - # Process texture colors - if mesh.visual.kind == 'texture': - # Configure UV coordinates - if mesh.visual.uv is not None and len(mesh.visual.uv) != 0: - uv = mesh.visual.uv.copy() - if smooth: - texcoords = uv - else: - texcoords = uv[mesh.faces].reshape( - (3 * len(mesh.faces), uv.shape[1]) - ) - - if material is None: - # Configure mesh material - mat = mesh.visual.material - - if isinstance(mat, trimesh.visual.texture.PBRMaterial): - material = MetallicRoughnessMaterial( - normalTexture=mat.normalTexture, - occlusionTexture=mat.occlusionTexture, - emissiveTexture=mat.emissiveTexture, - emissiveFactor=mat.emissiveFactor, - alphaMode='BLEND', - baseColorFactor=mat.baseColorFactor, - baseColorTexture=mat.baseColorTexture, - metallicFactor=mat.metallicFactor, - roughnessFactor=mat.roughnessFactor, - metallicRoughnessTexture=mat.metallicRoughnessTexture, - doubleSided=mat.doubleSided, - alphaCutoff=mat.alphaCutoff - ) - elif isinstance(mat, trimesh.visual.texture.SimpleMaterial): - glossiness = mat.kwargs.get('Ns', 1.0) - if isinstance(glossiness, list): - glossiness = float(glossiness[0]) - roughness = (2 / (glossiness + 2)) ** (1.0 / 4.0) - material = MetallicRoughnessMaterial( - alphaMode='BLEND', - roughnessFactor=roughness, - baseColorFactor=mat.diffuse, - baseColorTexture=mat.image, - ) - elif isinstance(mat, MetallicRoughnessMaterial): - material = mat - - return colors, texcoords, material diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py deleted file mode 100644 index cf09dbfa25378e8cb098e57c3143f9d66ae00391..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py +++ /dev/null @@ -1,8 +0,0 @@ -def cpop_pinyin2ph_func(): - # In the README file of opencpop dataset, they defined a "pinyin to phoneme mapping table" - pinyin2phs = {'AP': 'AP', 'SP': 'SP'} - with open('NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt') as rf: - for line in rf.readlines(): - elements = [x.strip() for x in line.split('|') if x.strip() != ''] - pinyin2phs[elements[0]] = elements[1] - return pinyin2phs \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/sound_extraction/model/resunet_film.py b/spaces/AIGC-Audio/AudioGPT/sound_extraction/model/resunet_film.py deleted file mode 100644 index c00addcbf6586632b78511aabe8c50a479d9c24f..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/sound_extraction/model/resunet_film.py +++ /dev/null @@ -1,110 +0,0 @@ -from .modules import * -import numpy as np - -class UNetRes_FiLM(nn.Module): - def __init__(self, channels, cond_embedding_dim, nsrc=1): - super(UNetRes_FiLM, self).__init__() - activation = 'relu' - momentum = 0.01 - - self.nsrc = nsrc - self.channels = channels - self.downsample_ratio = 2 ** 6 # This number equals 2^{#encoder_blocks} - - self.encoder_block1 = EncoderBlockRes2BCond(in_channels=channels * nsrc, out_channels=32, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.encoder_block2 = EncoderBlockRes2BCond(in_channels=32, out_channels=64, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.encoder_block3 = EncoderBlockRes2BCond(in_channels=64, out_channels=128, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.encoder_block4 = EncoderBlockRes2BCond(in_channels=128, out_channels=256, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.encoder_block5 = EncoderBlockRes2BCond(in_channels=256, out_channels=384, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.encoder_block6 = EncoderBlockRes2BCond(in_channels=384, out_channels=384, - downsample=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.conv_block7 = ConvBlockResCond(in_channels=384, out_channels=384, - kernel_size=(3, 3), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block1 = DecoderBlockRes2BCond(in_channels=384, out_channels=384, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block2 = DecoderBlockRes2BCond(in_channels=384, out_channels=384, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block3 = DecoderBlockRes2BCond(in_channels=384, out_channels=256, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block4 = DecoderBlockRes2BCond(in_channels=256, out_channels=128, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block5 = DecoderBlockRes2BCond(in_channels=128, out_channels=64, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - self.decoder_block6 = DecoderBlockRes2BCond(in_channels=64, out_channels=32, - stride=(2, 2), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - - self.after_conv_block1 = ConvBlockResCond(in_channels=32, out_channels=32, - kernel_size=(3, 3), activation=activation, momentum=momentum, - cond_embedding_dim=cond_embedding_dim) - - self.after_conv2 = nn.Conv2d(in_channels=32, out_channels=1, - kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True) - - self.init_weights() - - def init_weights(self): - init_layer(self.after_conv2) - - def forward(self, sp, cond_vec, dec_cond_vec): - """ - Args: - input: sp: (batch_size, channels_num, segment_samples) - Outputs: - output_dict: { - 'wav': (batch_size, channels_num, segment_samples), - 'sp': (batch_size, channels_num, time_steps, freq_bins)} - """ - - x = sp - # Pad spectrogram to be evenly divided by downsample ratio. - origin_len = x.shape[2] # time_steps - pad_len = int(np.ceil(x.shape[2] / self.downsample_ratio)) * self.downsample_ratio - origin_len - x = F.pad(x, pad=(0, 0, 0, pad_len)) - x = x[..., 0: x.shape[-1] - 2] # (bs, channels, T, F) - - # UNet - (x1_pool, x1) = self.encoder_block1(x, cond_vec) # x1_pool: (bs, 32, T / 2, F / 2) - (x2_pool, x2) = self.encoder_block2(x1_pool, cond_vec) # x2_pool: (bs, 64, T / 4, F / 4) - (x3_pool, x3) = self.encoder_block3(x2_pool, cond_vec) # x3_pool: (bs, 128, T / 8, F / 8) - (x4_pool, x4) = self.encoder_block4(x3_pool, dec_cond_vec) # x4_pool: (bs, 256, T / 16, F / 16) - (x5_pool, x5) = self.encoder_block5(x4_pool, dec_cond_vec) # x5_pool: (bs, 512, T / 32, F / 32) - (x6_pool, x6) = self.encoder_block6(x5_pool, dec_cond_vec) # x6_pool: (bs, 1024, T / 64, F / 64) - x_center = self.conv_block7(x6_pool, dec_cond_vec) # (bs, 2048, T / 64, F / 64) - x7 = self.decoder_block1(x_center, x6, dec_cond_vec) # (bs, 1024, T / 32, F / 32) - x8 = self.decoder_block2(x7, x5, dec_cond_vec) # (bs, 512, T / 16, F / 16) - x9 = self.decoder_block3(x8, x4, cond_vec) # (bs, 256, T / 8, F / 8) - x10 = self.decoder_block4(x9, x3, cond_vec) # (bs, 128, T / 4, F / 4) - x11 = self.decoder_block5(x10, x2, cond_vec) # (bs, 64, T / 2, F / 2) - x12 = self.decoder_block6(x11, x1, cond_vec) # (bs, 32, T, F) - x = self.after_conv_block1(x12, cond_vec) # (bs, 32, T, F) - x = self.after_conv2(x) # (bs, channels, T, F) - - # Recover shape - x = F.pad(x, pad=(0, 2)) - x = x[:, :, 0: origin_len, :] - return x - - -if __name__ == "__main__": - model = UNetRes_FiLM(channels=1, cond_embedding_dim=16) - cond_vec = torch.randn((1, 16)) - dec_vec = cond_vec - print(model(torch.randn((1, 1, 1001, 513)), cond_vec, dec_vec).size()) diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py deleted file mode 100644 index 205668224ec87a9ce571f6428531080231b1c16b..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py +++ /dev/null @@ -1,199 +0,0 @@ -from loss import WeightedCrossEntropy -import random - -import numpy as np -import torch -import torchvision -from omegaconf import OmegaConf -from torch.utils.data.dataloader import DataLoader -from tqdm import tqdm - -from dataset import VGGSound -from transforms import Crop, StandardNormalizeAudio, ToTensor -from logger import LoggerWithTBoard -from metrics import metrics -from model import VGGishish - -if __name__ == "__main__": - cfg_cli = OmegaConf.from_cli() - cfg_yml = OmegaConf.load(cfg_cli.config) - # the latter arguments are prioritized - cfg = OmegaConf.merge(cfg_yml, cfg_cli) - OmegaConf.set_readonly(cfg, True) - print(OmegaConf.to_yaml(cfg)) - - logger = LoggerWithTBoard(cfg) - - random.seed(cfg.seed) - np.random.seed(cfg.seed) - torch.manual_seed(cfg.seed) - torch.cuda.manual_seed_all(cfg.seed) - # makes iterations faster (in this case 30%) if your inputs are of a fixed size - # https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3 - torch.backends.cudnn.benchmark = True - - transforms = [ - StandardNormalizeAudio(cfg.mels_path), - ] - if cfg.cropped_size not in [None, 'None', 'none']: - logger.print_logger.info(f'Using cropping {cfg.cropped_size}') - transforms.append(Crop(cfg.cropped_size)) - transforms.append(ToTensor()) - transforms = torchvision.transforms.transforms.Compose(transforms) - - datasets = { - 'train': VGGSound('train', cfg.mels_path, transforms), - 'valid': VGGSound('valid', cfg.mels_path, transforms), - 'test': VGGSound('test', cfg.mels_path, transforms), - } - - loaders = { - 'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True, - num_workers=cfg.num_workers, pin_memory=True), - 'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size, - num_workers=cfg.num_workers, pin_memory=True), - 'test': DataLoader(datasets['test'], batch_size=cfg.batch_size, - num_workers=cfg.num_workers, pin_memory=True), - } - - device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu') - - model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['train'].target2label)) - model = model.to(device) - param_num = logger.log_param_num(model) - - if cfg.optimizer == 'adam': - optimizer = torch.optim.Adam( - model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay) - elif cfg.optimizer == 'sgd': - optimizer = torch.optim.SGD( - model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay) - else: - raise NotImplementedError - - if cfg.cls_weights_in_loss: - weights = 1 / datasets['train'].class_counts - else: - weights = torch.ones(len(datasets['train'].target2label)) - criterion = WeightedCrossEntropy(weights.to(device)) - - # loop over the train and validation multiple times (typical PT boilerplate) - no_change_epochs = 0 - best_valid_loss = float('inf') - early_stop_triggered = False - - for epoch in range(cfg.num_epochs): - - for phase in ['train', 'valid']: - if phase == 'train': - model.train() - else: - model.eval() - - running_loss = 0 - preds_from_each_batch = [] - targets_from_each_batch = [] - - prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0) - for i, batch in enumerate(prog_bar): - inputs = batch['input'].to(device) - targets = batch['target'].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - with torch.set_grad_enabled(phase == 'train'): - outputs = model(inputs) - loss = criterion(outputs, targets, to_weight=phase == 'train') - - if phase == 'train': - loss.backward() - optimizer.step() - - # loss - running_loss += loss.item() - - # for metrics calculation later on - preds_from_each_batch += [outputs.detach().cpu()] - targets_from_each_batch += [targets.cpu()] - - # iter logging - if i % 50 == 0: - logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase) - # tracks loss in the tqdm progress bar - prog_bar.set_postfix(loss=loss.item()) - - # logging loss - epoch_loss = running_loss / len(loaders[phase]) - logger.log_epoch_loss(epoch_loss, epoch, phase) - - # logging metrics - preds_from_each_batch = torch.cat(preds_from_each_batch) - targets_from_each_batch = torch.cat(targets_from_each_batch) - metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch) - logger.log_epoch_metrics(metrics_dict, epoch, phase) - - # Early stopping - if phase == 'valid': - if epoch_loss < best_valid_loss: - no_change_epochs = 0 - best_valid_loss = epoch_loss - logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict) - else: - no_change_epochs += 1 - logger.print_logger.info( - f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}' - ) - if no_change_epochs >= cfg.patience: - early_stop_triggered = True - - if early_stop_triggered: - logger.print_logger.info(f'Training is early stopped @ {epoch}') - break - - logger.print_logger.info('Finished Training') - - # loading the best model - ckpt = torch.load(logger.best_model_path) - model.load_state_dict(ckpt['model']) - logger.print_logger.info(f'Loading the best model from {logger.best_model_path}') - logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}')) - - # Testing the model - model.eval() - running_loss = 0 - preds_from_each_batch = [] - targets_from_each_batch = [] - - for i, batch in enumerate(loaders['test']): - inputs = batch['input'].to(device) - targets = batch['target'].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - with torch.set_grad_enabled(False): - outputs = model(inputs) - loss = criterion(outputs, targets, to_weight=False) - - # loss - running_loss += loss.item() - - # for metrics calculation later on - preds_from_each_batch += [outputs.detach().cpu()] - targets_from_each_batch += [targets.cpu()] - - # logging metrics - preds_from_each_batch = torch.cat(preds_from_each_batch) - targets_from_each_batch = torch.cat(targets_from_each_batch) - test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch) - test_metrics_dict['avg_loss'] = running_loss / len(loaders['test']) - test_metrics_dict['param_num'] = param_num - # TODO: I have no idea why tboard doesn't keep metrics (hparams) when - # I run this experiment from cli: `python train_vggishish.py config=./configs/vggish.yaml` - # while when I run it in vscode debugger the metrics are logger (wtf) - logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch']) - - logger.print_logger.info('Finished the experiment') diff --git a/spaces/AIGuardians/SummarizeWikipediaDocument/summarize_train.py b/spaces/AIGuardians/SummarizeWikipediaDocument/summarize_train.py deleted file mode 100644 index cea810e9a9eb07f1c55219b0193cc660b3122b30..0000000000000000000000000000000000000000 --- a/spaces/AIGuardians/SummarizeWikipediaDocument/summarize_train.py +++ /dev/null @@ -1,109 +0,0 @@ -import transformers -from datasets import load_dataset, load_metric -import datasets -import random -import pandas as pd -from IPython.display import display, HTML -from transformers import AutoTokenizer -from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer - - -model_checkpoint = "t5-small" - -raw_datasets = load_dataset("xsum") -metric = load_metric("rouge") - - - -def show_random_elements(dataset, num_examples=5): - assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." - picks = [] - for _ in range(num_examples): - pick = random.randint(0, len(dataset) - 1) - while pick in picks: - pick = random.randint(0, len(dataset) - 1) - picks.append(pick) - - df = pd.DataFrame(dataset[picks]) - for column, typ in dataset.features.items(): - if isinstance(typ, datasets.ClassLabel): - df[column] = df[column].transform(lambda i: typ.names[i]) - display(HTML(df.to_html())) - -tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) -print(transformers.__version__) - -if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]: - prefix = "summarize: " -else: - prefix = "" - -max_input_length = 1024 -max_target_length = 128 - -def preprocess_function(examples): - inputs = [prefix + doc for doc in examples["document"]] - model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) - - # Setup the tokenizer for targets - with tokenizer.as_target_tokenizer(): - labels = tokenizer(examples["summary"], max_length=max_target_length, truncation=True) - - model_inputs["labels"] = labels["input_ids"] - return model_inputs - - -model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) - -batch_size = 16 -model_name = model_checkpoint.split("/")[-1] -args = Seq2SeqTrainingArguments( - f"{model_name}-finetuned-xsum", - evaluation_strategy = "epoch", - learning_rate=2e-5, - per_device_train_batch_size=batch_size, - per_device_eval_batch_size=batch_size, - weight_decay=0.01, - save_total_limit=3, - num_train_epochs=1, - predict_with_generate=True, - fp16=True, - push_to_hub=True, -) - -import nltk -import numpy as np - - -def compute_metrics(eval_pred): - predictions, labels = eval_pred - decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Rouge expects a newline after each sentence - decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds] - decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels] - - result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) - # Extract a few results - result = {key: value.mid.fmeasure * 100 for key, value in result.items()} - - # Add mean generated length - prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions] - result["gen_len"] = np.mean(prediction_lens) - - return {k: round(v, 4) for k, v in result.items()} - -trainer = Seq2SeqTrainer( - model, - args, - train_dataset=tokenized_datasets["train"], - eval_dataset=tokenized_datasets["validation"], - data_collator=data_collator, - tokenizer=tokenizer, - compute_metrics=compute_metrics -) - - diff --git a/spaces/ARTeLab/DTM_Estimation_SRandD/copy_and_transform_imgs.py b/spaces/ARTeLab/DTM_Estimation_SRandD/copy_and_transform_imgs.py deleted file mode 100644 index 595455bdea7813b5783a9615da1709bf4c2542a3..0000000000000000000000000000000000000000 --- a/spaces/ARTeLab/DTM_Estimation_SRandD/copy_and_transform_imgs.py +++ /dev/null @@ -1,14 +0,0 @@ -from osgeo import gdal -import os -from PIL import Image -import numpy as np - -path = '/home/super/datasets-nas/hirise_oxia_planum_test_tiles_thruth/' - -for i, file_name in enumerate(os.listdir(path)[40:90]): - file_path = os.path.join(path, file_name) - x = gdal.Open(file_path) - x_array = x.ReadAsArray() - # print(x_array.shape) - pil_img = Image.fromarray(np.uint8(x_array), 'L') - pil_img.save(f'demo_imgs/{i}.png') diff --git a/spaces/Abhilashvj/planogram-compliance/val.py b/spaces/Abhilashvj/planogram-compliance/val.py deleted file mode 100644 index 0f618a28114ff7584a3e262495c05803e0651cd4..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/val.py +++ /dev/null @@ -1,593 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 model accuracy on a custom dataset - -Usage: - $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 -""" - -import argparse -import json -import os -import sys -from pathlib import Path -from threading import Thread - -import numpy as np -import torch -from tqdm import tqdm - -FILE = Path(__file__).absolute() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path - -from models.experimental import attempt_load -from utils.callbacks import Callbacks -from utils.datasets import create_dataloader -from utils.general import ( - box_iou, - check_dataset, - check_img_size, - check_requirements, - check_suffix, - check_yaml, - coco80_to_coco91_class, - colorstr, - increment_path, - non_max_suppression, - scale_coords, - set_logging, - xywh2xyxy, - xyxy2xywh, -) -from utils.metrics import ConfusionMatrix, ap_per_class -from utils.plots import output_to_target, plot_images, plot_study_txt -from utils.torch_utils import select_device, time_sync - - -def save_one_txt(predn, save_conf, shape, file): - # Save one txt result - gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = ( - (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() - ) # normalized xywh - line = ( - (cls, *xywh, conf) if save_conf else (cls, *xywh) - ) # label format - with open(file, "a") as f: - f.write(("%g " * len(line)).rstrip() % line + "\n") - - -def save_one_json(predn, jdict, path, class_map): - # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(predn.tolist(), box.tolist()): - jdict.append( - { - "image_id": image_id, - "category_id": class_map[int(p[5])], - "bbox": [round(x, 3) for x in b], - "score": round(p[4], 5), - } - ) - - -def process_batch(detections, labels, iouv): - """ - Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - correct (Array[N, 10]), for 10 IoU levels - """ - correct = torch.zeros( - detections.shape[0], - iouv.shape[0], - dtype=torch.bool, - device=iouv.device, - ) - iou = box_iou(labels[:, 1:], detections[:, :4]) - x = torch.where( - (iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5]) - ) # IoU above threshold and classes match - if x[0].shape[0]: - matches = ( - torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1) - .cpu() - .numpy() - ) # [label, detection, iou] - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - # matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - matches = torch.Tensor(matches).to(iouv.device) - correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv - return correct - - -@torch.no_grad() -def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - task="val", # train, val, test, speed or study - device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project="runs/val", # save to project/name - name="exp", # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - model=None, - dataloader=None, - save_dir=Path(""), - plots=True, - callbacks=Callbacks(), - compute_loss=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device = next(model.parameters()).device # get model device - - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path( - Path(project) / name, exist_ok=exist_ok - ) # increment run - (save_dir / "labels" if save_txt else save_dir).mkdir( - parents=True, exist_ok=True - ) # make dir - - # Load model - check_suffix(weights, ".pt") - model = attempt_load(weights, map_location=device) # load FP32 model - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check image size - - # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 - # if device.type != 'cpu' and torch.cuda.device_count() > 1: - # model = nn.DataParallel(model) - - # Data - data = check_dataset(data) # check - - # Half - half &= device.type != "cpu" # half precision only supported on CUDA - if half: - model.half() - - # Configure - model.eval() - is_coco = isinstance(data.get("val"), str) and data["val"].endswith( - "coco/val2017.txt" - ) # COCO dataset - nc = 1 if single_cls else int(data["nc"]) # number of classes - iouv = torch.linspace(0.5, 0.95, 10).to( - device - ) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Dataloader - if not training: - if device.type != "cpu": - model( - torch.zeros(1, 3, imgsz, imgsz) - .to(device) - .type_as(next(model.parameters())) - ) # run once - task = ( - task if task in ("train", "val", "test") else "val" - ) # path to train/val/test images - dataloader = create_dataloader( - data[task], - imgsz, - batch_size, - gs, - single_cls, - pad=0.5, - rect=True, - prefix=colorstr(f"{task}: "), - )[0] - - seen = 0 - confusion_matrix = ConfusionMatrix(nc=nc) - names = { - k: v - for k, v in enumerate( - model.names if hasattr(model, "names") else model.module.names - ) - } - class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ("%20s" + "%11s" * 6) % ( - "Class", - "Images", - "Labels", - "P", - "R", - "mAP@.5", - "mAP@.5:.95", - ) - dt, p, r, f1, mp, mr, map50, map = ( - [0.0, 0.0, 0.0], - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ) - loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate( - tqdm(dataloader, desc=s) - ): - t1 = time_sync() - img = img.to(device, non_blocking=True) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - targets = targets.to(device) - nb, _, height, width = img.shape # batch size, channels, height, width - t2 = time_sync() - dt[0] += t2 - t1 - - # Run model - out, train_out = model( - img, augment=augment - ) # inference and training outputs - dt[1] += time_sync() - t2 - - # Compute loss - if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[ - 1 - ] # box, obj, cls - - # Run NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to( - device - ) # to pixels - lb = ( - [targets[targets[:, 0] == i, 1:] for i in range(nb)] - if save_hybrid - else [] - ) # for autolabelling - t3 = time_sync() - out = non_max_suppression( - out, - conf_thres, - iou_thres, - labels=lb, - multi_label=True, - agnostic=single_cls, - ) - dt[2] += time_sync() - t3 - - # Statistics per image - for si, pred in enumerate(out): - labels = targets[targets[:, 0] == si, 1:] - nl = len(labels) - tcls = labels[:, 0].tolist() if nl else [] # target class - path, shape = Path(paths[si]), shapes[si][0] - seen += 1 - - if len(pred) == 0: - if nl: - stats.append( - ( - torch.zeros(0, niou, dtype=torch.bool), - torch.Tensor(), - torch.Tensor(), - tcls, - ) - ) - continue - - # Predictions - if single_cls: - pred[:, 5] = 0 - predn = pred.clone() - scale_coords( - img[si].shape[1:], predn[:, :4], shape, shapes[si][1] - ) # native-space pred - - # Evaluate - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords( - img[si].shape[1:], tbox, shape, shapes[si][1] - ) # native-space labels - labelsn = torch.cat( - (labels[:, 0:1], tbox), 1 - ) # native-space labels - correct = process_batch(predn, labelsn, iouv) - if plots: - confusion_matrix.process_batch(predn, labelsn) - else: - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) - stats.append( - (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls) - ) # (correct, conf, pcls, tcls) - - # Save/log - if save_txt: - save_one_txt( - predn, - save_conf, - shape, - file=save_dir / "labels" / (path.stem + ".txt"), - ) - if save_json: - save_one_json( - predn, jdict, path, class_map - ) # append to COCO-JSON dictionary - callbacks.run( - "on_val_image_end", pred, predn, path, names, img[si] - ) - - # Plot images - if plots and batch_i < 3: - f = save_dir / f"val_batch{batch_i}_labels.jpg" # labels - Thread( - target=plot_images, - args=(img, targets, paths, f, names), - daemon=True, - ).start() - f = save_dir / f"val_batch{batch_i}_pred.jpg" # predictions - Thread( - target=plot_images, - args=(img, output_to_target(out), paths, f, names), - daemon=True, - ).start() - - # Compute statistics - stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - p, r, ap, f1, ap_class = ap_per_class( - *stats, plot=plots, save_dir=save_dir, names=names - ) - ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 - mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount( - stats[3].astype(np.int64), minlength=nc - ) # number of targets per class - else: - nt = torch.zeros(1) - - # Print results - pf = "%20s" + "%11i" * 2 + "%11.3g" * 4 # print format - print(pf % ("all", seen, nt.sum(), mp, mr, map50, map)) - - # Print results per class - if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): - for i, c in enumerate(ap_class): - print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) - - # Print speeds - t = tuple(x / seen * 1e3 for x in dt) # speeds per image - if not training: - shape = (batch_size, 3, imgsz, imgsz) - print( - f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" - % t - ) - - # Plots - if plots: - confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run("on_val_end") - - # Save JSON - if save_json and len(jdict): - w = ( - Path(weights[0] if isinstance(weights, list) else weights).stem - if weights is not None - else "" - ) # weights - anno_json = str( - Path(data.get("path", "../coco")) - / "annotations/instances_val2017.json" - ) # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print(f"\nEvaluating pycocotools mAP... saving {pred_json}...") - with open(pred_json, "w") as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements(["pycocotools"]) - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, "bbox") - if is_coco: - eval.params.imgIds = [ - int(Path(x).stem) for x in dataloader.dataset.img_files - ] # image IDs to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[ - :2 - ] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - print(f"pycocotools unable to run: {e}") - - # Return results - model.float() # for training - if not training: - s = ( - f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" - if save_txt - else "" - ) - print(f"Results saved to {colorstr('bold', save_dir)}{s}") - maps = np.zeros(nc) + map - for i, c in enumerate(ap_class): - maps[c] = ap[i] - return ( - (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), - maps, - t, - ) - - -def parse_opt(): - parser = argparse.ArgumentParser(prog="val.py") - parser.add_argument( - "--data", - type=str, - default="data/coco128.yaml", - help="dataset.yaml path", - ) - parser.add_argument( - "--weights", - nargs="+", - type=str, - default="yolov5s.pt", - help="model.pt path(s)", - ) - parser.add_argument( - "--batch-size", type=int, default=32, help="batch size" - ) - parser.add_argument( - "--imgsz", - "--img", - "--img-size", - type=int, - default=640, - help="inference size (pixels)", - ) - parser.add_argument( - "--conf-thres", type=float, default=0.001, help="confidence threshold" - ) - parser.add_argument( - "--iou-thres", type=float, default=0.6, help="NMS IoU threshold" - ) - parser.add_argument( - "--task", default="val", help="train, val, test, speed or study" - ) - parser.add_argument( - "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu" - ) - parser.add_argument( - "--single-cls", - action="store_true", - help="treat as single-class dataset", - ) - parser.add_argument( - "--augment", action="store_true", help="augmented inference" - ) - parser.add_argument( - "--verbose", action="store_true", help="report mAP by class" - ) - parser.add_argument( - "--save-txt", action="store_true", help="save results to *.txt" - ) - parser.add_argument( - "--save-hybrid", - action="store_true", - help="save label+prediction hybrid results to *.txt", - ) - parser.add_argument( - "--save-conf", - action="store_true", - help="save confidences in --save-txt labels", - ) - parser.add_argument( - "--save-json", - action="store_true", - help="save a COCO-JSON results file", - ) - parser.add_argument( - "--project", default="runs/val", help="save to project/name" - ) - parser.add_argument("--name", default="exp", help="save to project/name") - parser.add_argument( - "--exist-ok", - action="store_true", - help="existing project/name ok, do not increment", - ) - parser.add_argument( - "--half", action="store_true", help="use FP16 half-precision inference" - ) - opt = parser.parse_args() - opt.save_json |= opt.data.endswith("coco.yaml") - opt.save_txt |= opt.save_hybrid - opt.data = check_yaml(opt.data) # check YAML - return opt - - -def main(opt): - set_logging() - print( - colorstr("val: ") + ", ".join(f"{k}={v}" for k, v in vars(opt).items()) - ) - check_requirements( - requirements=FILE.parent / "requirements.txt", - exclude=("tensorboard", "thop"), - ) - - if opt.task in ("train", "val", "test"): # run normally - run(**vars(opt)) - - elif opt.task == "speed": # speed benchmarks - for w in ( - opt.weights if isinstance(opt.weights, list) else [opt.weights] - ): - run( - opt.data, - weights=w, - batch_size=opt.batch_size, - imgsz=opt.imgsz, - conf_thres=0.25, - iou_thres=0.45, - save_json=False, - plots=False, - ) - - elif opt.task == "study": # run over a range of settings and save/plot - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt - x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in ( - opt.weights if isinstance(opt.weights, list) else [opt.weights] - ): - f = f"study_{Path(opt.data).stem}_{Path(w).stem}.txt" # filename to save to - y = [] # y axis - for i in x: # img-size - print(f"\nRunning {f} point {i}...") - r, _, t = run( - opt.data, - weights=w, - batch_size=opt.batch_size, - imgsz=i, - conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, - save_json=opt.save_json, - plots=False, - ) - y.append(r + t) # results and times - np.savetxt(f, y, fmt="%10.4g") # save - os.system("zip -r study.zip study_*.txt") - plot_study_txt(x=x) # plot - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetExpandedChildWidth.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetExpandedChildWidth.js deleted file mode 100644 index fbeb90938e8929f12df5c7d705061662d1197aca..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetExpandedChildWidth.js +++ /dev/null @@ -1,22 +0,0 @@ -var GetExpandedChildWidth = function (child, parentWidth) { - if (parentWidth === undefined) { - parentWidth = this.width; - } - - var childWidth; - var childConfig = child.rexSizer; - var padding = childConfig.padding; - if (this.orientation === 0) { // x - if ((childConfig.proportion > 0) && (this.proportionLength > 0)) { - childWidth = (childConfig.proportion * this.proportionLength); - } - } else { // y - if (childConfig.expand) { - var innerWidth = parentWidth - this.space.left - this.space.right; - childWidth = innerWidth - padding.left - padding.right; - } - } - return childWidth; -} - -export default GetExpandedChildWidth; \ No newline at end of file diff --git a/spaces/AiBototicus/BucksAI-3/README.md b/spaces/AiBototicus/BucksAI-3/README.md deleted file mode 100644 index 5cfdc1de329f7bd7efa998d28bafd0d5b4c63cbf..0000000000000000000000000000000000000000 --- a/spaces/AiBototicus/BucksAI-3/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AiBototicus Autotrain Birds 48829118237 -emoji: 🌖 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: bigscience-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Alfasign/dIFFU/README.md b/spaces/Alfasign/dIFFU/README.md deleted file mode 100644 index 6f4dfc00a699a00e28ca59cd8b76252e00b1ee7c..0000000000000000000000000000000000000000 --- a/spaces/Alfasign/dIFFU/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 411 Models Toy World -emoji: 🪅🌐 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -duplicated_from: Yntec/ToyWorld ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AlphaGPT/PaperSummary/README.md b/spaces/AlphaGPT/PaperSummary/README.md deleted file mode 100644 index daabaff576a77c94d81ce153bec10ac4cab6bb44..0000000000000000000000000000000000000000 --- a/spaces/AlphaGPT/PaperSummary/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PaperSummary -emoji: 📚 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: cc-by-nc-nd-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md deleted file mode 100644 index 4a227cdb4d63585cc0f0ab76424be8a0b2c5b604..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Distillation for quantization on Textual Inversion models to personalize text2image - -[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images._By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_ -The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. -We have enabled distillation for quantization in `textual_inversion.py` to do quantization aware training as well as distillation on the model generated by Textual Inversion method. - -## Installing the dependencies - -Before running the scripts, make sure to install the library's training dependencies: - -```bash -pip install -r requirements.txt -``` - -## Prepare Datasets - -One picture which is from the huggingface datasets [sd-concepts-library/dicoo2](https://huggingface.co/sd-concepts-library/dicoo2) is needed, and save it to the `./dicoo` directory. The picture is shown below: - - - - - -## Get a FP32 Textual Inversion model - -Use the following command to fine-tune the Stable Diffusion model on the above dataset to obtain the FP32 Textual Inversion model. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export DATA_DIR="./dicoo" - -accelerate launch textual_inversion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 --scale_lr \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --output_dir="dicoo_model" -``` - -## Do distillation for quantization - -Distillation for quantization is a method that combines [intermediate layer knowledge distillation](https://github.com/intel/neural-compressor/blob/master/docs/source/distillation.md#intermediate-layer-knowledge-distillation) and [quantization aware training](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#quantization-aware-training) in the same training process to improve the performance of the quantized model. Provided a FP32 model, the distillation for quantization approach will take this model itself as the teacher model and transfer the knowledges of the specified layers to the student model, i.e. quantized version of the FP32 model, during the quantization aware training process. - -Once you have the FP32 Textual Inversion model, the following command will take the FP32 Textual Inversion model as input to do distillation for quantization and generate the INT8 Textual Inversion model. - -```bash -export FP32_MODEL_NAME="./dicoo_model" -export DATA_DIR="./dicoo" - -accelerate launch textual_inversion.py \ - --pretrained_model_name_or_path=$FP32_MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --use_ema --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --max_train_steps=300 \ - --learning_rate=5.0e-04 --max_grad_norm=3 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --output_dir="int8_model" \ - --do_quantization --do_distillation --verify_loading -``` - -After the distillation for quantization process, the quantized UNet would be 4 times smaller (3279MB -> 827MB). - -## Inference - -Once you have trained a INT8 model with the above command, the inference can be done simply using the `text2images.py` script. Make sure to include the `placeholder_token` in your prompt. - -```bash -export INT8_MODEL_NAME="./int8_model" - -python text2images.py \ - --pretrained_model_name_or_path=$INT8_MODEL_NAME \ - --caption "a lovely in red dress and hat, in the snowly and brightly night, with many brighly buildings." \ - --images_num 4 -``` - -Here is the comparison of images generated by the FP32 model (left) and INT8 model (right) respectively: - -

        - FP32 - INT8 -

        - diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/vq_diffusion/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/vq_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Andy1621/uniformer_image_demo/README.md b/spaces/Andy1621/uniformer_image_demo/README.md deleted file mode 100644 index de8ef46944886f63059954a8cd9eda98d1f156be..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Uniformer_image_demo -emoji: 📷 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.0.3 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Andy1621/uniformer_image_detection/configs/albu_example/README.md b/spaces/Andy1621/uniformer_image_detection/configs/albu_example/README.md deleted file mode 100644 index bf35a9bc861a3df4e0e556891d9f56bb96a8d588..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/albu_example/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Albu Example - -[OTHERS] - -``` -@article{2018arXiv180906839B, - author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin}, - title = "{Albumentations: fast and flexible image augmentations}", - journal = {ArXiv e-prints}, - eprint = {1809.06839}, - year = 2018 -} -``` - -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py deleted file mode 100644 index 81f61c6ee136628940e8bcc146d785840ac83c38..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +++ /dev/null @@ -1,44 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron/resnet101_caffe', - backbone=dict(depth=101)) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index f62da1a8090da389a77d77a9887926af2a7ded49..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 2221b202d6c53c4b04f2431d3344379cbfe06dd7..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/__init__.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/__init__.py deleted file mode 100644 index 835df136bdcf69348281d22914d41aa84cdf92b1..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .color import Color, color_val -from .image import imshow, imshow_bboxes, imshow_det_bboxes -from .optflow import flow2rgb, flowshow, make_color_wheel - -__all__ = [ - 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', - 'flowshow', 'flow2rgb', 'make_color_wheel' -] diff --git a/spaces/ArkanDash/rvc-models-new/app.py b/spaces/ArkanDash/rvc-models-new/app.py deleted file mode 100644 index 90b205513923f0b115f8e116c6673eb63bfd0573..0000000000000000000000000000000000000000 --- a/spaces/ArkanDash/rvc-models-new/app.py +++ /dev/null @@ -1,735 +0,0 @@ -import os -import glob -import json -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -import yt_dlp -import ffmpeg -import subprocess -import sys -import io -import wave -from datetime import datetime -from fairseq import checkpoint_utils -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from vc_infer_pipeline import VC -from config import Config -config = Config() -logging.getLogger("numba").setLevel(logging.WARNING) -spaces = os.getenv("SYSTEM") == "spaces" -force_support = None -if config.unsupported is False: - if config.device == "mps" or config.device == "cpu": - force_support = False -else: - force_support = True - -audio_mode = [] -f0method_mode = [] -f0method_info = "" - -if force_support is False or spaces is True: - if spaces is True: - audio_mode = ["Upload audio", "TTS Audio"] - else: - audio_mode = ["Input path", "Upload audio", "TTS Audio"] - f0method_mode = ["pm", "harvest"] - f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)" -else: - audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"] - f0method_mode = ["pm", "harvest", "crepe"] - f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)" - -if os.path.isfile("rmvpe.pt"): - f0method_mode.insert(2, "rmvpe") - -def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index): - def vc_fn( - vc_audio_mode, - vc_input, - vc_upload, - tts_text, - tts_voice, - f0_up_key, - f0_method, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - ): - try: - logs = [] - print(f"Converting using {model_name}...") - logs.append(f"Converting using {model_name}...") - yield "\n".join(logs), None - if vc_audio_mode == "Input path" or "Youtube" and vc_input != "": - audio, sr = librosa.load(vc_input, sr=16000, mono=True) - elif vc_audio_mode == "Upload audio": - if vc_upload is None: - return "You need to upload an audio", None - sampling_rate, audio = vc_upload - duration = audio.shape[0] / sampling_rate - if duration > 20 and spaces: - return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - elif vc_audio_mode == "TTS Audio": - if len(tts_text) > 100 and spaces: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - vc_input = "tts.mp3" - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - vc_input, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ) - info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - print(f"{model_name} | {info}") - logs.append(f"Successfully Convert {model_name}\n{info}") - yield "\n".join(logs), (tgt_sr, audio_opt) - except Exception as err: - info = traceback.format_exc() - print(info) - primt(f"Error when using {model_name}.\n{str(err)}") - yield info, None - return vc_fn - -def load_model(): - categories = [] - if os.path.isfile("weights/folder_info.json"): - for _, w_dirs, _ in os.walk(f"weights"): - category_count_total = len(w_dirs) - category_count = 1 - with open("weights/folder_info.json", "r", encoding="utf-8") as f: - folder_info = json.load(f) - for category_name, category_info in folder_info.items(): - if not category_info['enable']: - continue - category_title = category_info['title'] - category_folder = category_info['folder_path'] - description = category_info['description'] - print(f"Load {category_title} [{category_count}/{category_count_total}]") - models = [] - for _, m_dirs, _ in os.walk(f"weights/{category_folder}"): - model_count_total = len(m_dirs) - model_count = 1 - with open(f"weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for character_name, info in models_info.items(): - if not info['enable']: - continue - model_title = info['title'] - model_name = info['model_path'] - model_author = info.get("author", None) - model_cover = f"weights/{category_folder}/{character_name}/{info['cover']}" - model_index = f"weights/{category_folder}/{character_name}/{info['feature_retrieval_library']}" - cpt = torch.load(f"weights/{category_folder}/{character_name}/{model_name}", map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - model_version = "V1" - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - model_version = "V2" - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - print(f"Model loaded [{model_count}/{model_count_total}]: {character_name} / {info['feature_retrieval_library']} | ({model_version})") - model_count += 1 - models.append((character_name, model_title, model_author, model_cover, model_version, create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, model_index))) - category_count += 1 - categories.append([category_title, description, models]) - elif os.path.exists("weights"): - models = [] - for w_root, w_dirs, _ in os.walk("weights"): - model_count = 1 - for sub_dir in w_dirs: - pth_files = glob.glob(f"weights/{sub_dir}/*.pth") - index_files = glob.glob(f"weights/{sub_dir}/*.index") - if pth_files == []: - print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...") - continue - cpt = torch.load(pth_files[0]) - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - model_version = "V1" - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - model_version = "V2" - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - if index_files == []: - print("Warning: No Index file detected!") - index_info = "None" - model_index = "" - else: - index_info = index_files[0] - model_index = index_files[0] - print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})") - model_count += 1 - models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index))) - categories.append(["Models", "", models]) - else: - categories = [] - return categories - -def download_audio(url, audio_provider): - logs = [] - if url == "": - logs.append("URL required!") - yield None, "\n".join(logs) - return None, "\n".join(logs) - if not os.path.exists("dl_audio"): - os.mkdir("dl_audio") - if audio_provider == "Youtube": - logs.append("Downloading the audio...") - yield None, "\n".join(logs) - ydl_opts = { - 'noplaylist': True, - 'format': 'bestaudio/best', - 'postprocessors': [{ - 'key': 'FFmpegExtractAudio', - 'preferredcodec': 'wav', - }], - "outtmpl": 'dl_audio/audio', - } - audio_path = "dl_audio/audio.wav" - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - ydl.download([url]) - logs.append("Download Complete.") - yield audio_path, "\n".join(logs) - -def cut_vocal_and_inst(split_model): - logs = [] - logs.append("Starting the audio splitting process...") - yield "\n".join(logs), None, None, None - command = f"demucs --two-stems=vocals -n {split_model} dl_audio/audio.wav -o output" - result = subprocess.Popen(command.split(), stdout=subprocess.PIPE, text=True) - for line in result.stdout: - logs.append(line) - yield "\n".join(logs), None, None, None - print(result.stdout) - vocal = f"output/{split_model}/audio/vocals.wav" - inst = f"output/{split_model}/audio/no_vocals.wav" - logs.append("Audio splitting complete.") - yield "\n".join(logs), vocal, inst, vocal - -def combine_vocal_and_inst(audio_data, vocal_volume, inst_volume, split_model): - if not os.path.exists("output/result"): - os.mkdir("output/result") - vocal_path = "output/result/output.wav" - output_path = "output/result/combine.mp3" - inst_path = f"output/{split_model}/audio/no_vocals.wav" - with wave.open(vocal_path, "w") as wave_file: - wave_file.setnchannels(1) - wave_file.setsampwidth(2) - wave_file.setframerate(audio_data[0]) - wave_file.writeframes(audio_data[1].tobytes()) - command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}' - result = subprocess.run(command.split(), stdout=subprocess.PIPE) - print(result.stdout.decode()) - return output_path - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_audio_mode(vc_audio_mode): - if vc_audio_mode == "Input path": - return ( - # Input & Upload - gr.Textbox.update(visible=True), - gr.Checkbox.update(visible=False), - gr.Audio.update(visible=False), - # Youtube - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - # Splitter - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Slider.update(visible=False), - gr.Slider.update(visible=False), - gr.Audio.update(visible=False), - gr.Button.update(visible=False), - # TTS - gr.Textbox.update(visible=False), - gr.Dropdown.update(visible=False) - ) - elif vc_audio_mode == "Upload audio": - return ( - # Input & Upload - gr.Textbox.update(visible=False), - gr.Checkbox.update(visible=True), - gr.Audio.update(visible=True), - # Youtube - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - # Splitter - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Slider.update(visible=False), - gr.Slider.update(visible=False), - gr.Audio.update(visible=False), - gr.Button.update(visible=False), - # TTS - gr.Textbox.update(visible=False), - gr.Dropdown.update(visible=False) - ) - elif vc_audio_mode == "Youtube": - return ( - # Input & Upload - gr.Textbox.update(visible=False), - gr.Checkbox.update(visible=False), - gr.Audio.update(visible=False), - # Youtube - gr.Dropdown.update(visible=True), - gr.Textbox.update(visible=True), - gr.Textbox.update(visible=True), - gr.Button.update(visible=True), - # Splitter - gr.Dropdown.update(visible=True), - gr.Textbox.update(visible=True), - gr.Button.update(visible=True), - gr.Audio.update(visible=True), - gr.Audio.update(visible=True), - gr.Audio.update(visible=True), - gr.Slider.update(visible=True), - gr.Slider.update(visible=True), - gr.Audio.update(visible=True), - gr.Button.update(visible=True), - # TTS - gr.Textbox.update(visible=False), - gr.Dropdown.update(visible=False) - ) - elif vc_audio_mode == "TTS Audio": - return ( - # Input & Upload - gr.Textbox.update(visible=False), - gr.Checkbox.update(visible=False), - gr.Audio.update(visible=False), - # Youtube - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - # Splitter - gr.Dropdown.update(visible=False), - gr.Textbox.update(visible=False), - gr.Button.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Audio.update(visible=False), - gr.Slider.update(visible=False), - gr.Slider.update(visible=False), - gr.Audio.update(visible=False), - gr.Button.update(visible=False), - # TTS - gr.Textbox.update(visible=True), - gr.Dropdown.update(visible=True) - ) - -def use_microphone(microphone): - if microphone == True: - return gr.Audio.update(source="microphone") - else: - return gr.Audio.update(source="upload") - -if __name__ == '__main__': - load_hubert() - categories = load_model() - tts_voice_list = asyncio.new_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - with gr.Blocks() as app: - gr.Markdown( - "
        \n\n"+ - "# RVC Genshin Impact\n\n"+ - "### Recommended to use Google Colab to use other character and feature.\n\n"+ - "[![Colab](https://img.shields.io/badge/Colab-RVC%20Genshin%20Impact-blue?style=for-the-badge&logo=googlecolab)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"+ - "
        \n\n"+ - "[![Repository](https://img.shields.io/badge/Github-Multi%20Model%20RVC%20Inference-blue?style=for-the-badge&logo=github)](https://github.com/ArkanDash/Multi-Model-RVC-Inference)" - ) - if categories == []: - gr.Markdown( - "
        \n\n"+ - "## No model found, please add the model into weights folder\n\n"+ - "
        " - ) - for (folder_title, description, models) in categories: - with gr.TabItem(folder_title): - if description: - gr.Markdown(f"###
        {description}") - with gr.Tabs(): - if not models: - gr.Markdown("#
        No Model Loaded.") - gr.Markdown("##
        Please add the model or fix your model path.") - continue - for (name, title, author, cover, model_version, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
        ' - f'
        {title}
        \n'+ - f'
        RVC {model_version} Model
        \n'+ - (f'
        Model author: {author}
        ' if author else "")+ - (f'' if cover else "")+ - '
        ' - ) - with gr.Row(): - if spaces is False: - with gr.TabItem("Input"): - with gr.Row(): - with gr.Column(): - vc_audio_mode = gr.Dropdown(label="Input voice", choices=audio_mode, allow_custom_value=False, value="Upload audio") - # Input - vc_input = gr.Textbox(label="Input audio path", visible=False) - # Upload - vc_microphone_mode = gr.Checkbox(label="Use Microphone", value=False, visible=True, interactive=True) - vc_upload = gr.Audio(label="Upload audio file", source="upload", visible=True, interactive=True) - # Youtube - vc_download_audio = gr.Dropdown(label="Provider", choices=["Youtube"], allow_custom_value=False, visible=False, value="Youtube", info="Select provider (Default: Youtube)") - vc_link = gr.Textbox(label="Youtube URL", visible=False, info="Example: https://www.youtube.com/watch?v=Nc0sB1Bmf-A", placeholder="https://www.youtube.com/watch?v=...") - vc_log_yt = gr.Textbox(label="Output Information", visible=False, interactive=False) - vc_download_button = gr.Button("Download Audio", variant="primary", visible=False) - vc_audio_preview = gr.Audio(label="Audio Preview", visible=False) - # TTS - tts_text = gr.Textbox(label="TTS text", info="Text to speech input", visible=False) - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - with gr.Column(): - vc_split_model = gr.Dropdown(label="Splitter Model", choices=["hdemucs_mmi", "htdemucs", "htdemucs_ft", "mdx", "mdx_q", "mdx_extra_q"], allow_custom_value=False, visible=False, value="htdemucs", info="Select the splitter model (Default: htdemucs)") - vc_split_log = gr.Textbox(label="Output Information", visible=False, interactive=False) - vc_split = gr.Button("Split Audio", variant="primary", visible=False) - vc_vocal_preview = gr.Audio(label="Vocal Preview", visible=False) - vc_inst_preview = gr.Audio(label="Instrumental Preview", visible=False) - with gr.TabItem("Convert"): - with gr.Row(): - with gr.Column(): - vc_transform0 = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice') - f0method0 = gr.Radio( - label="Pitch extraction algorithm", - info=f0method_info, - choices=f0method_mode, - value="pm", - interactive=True - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - info="(Default: 0.7)", - value=0.7, - interactive=True, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label="Apply Median Filtering", - info="The value represents the filter radius and can reduce breathiness.", - value=3, - step=1, - interactive=True, - ) - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label="Resample the output audio", - info="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label="Volume Envelope", - info="Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used", - value=1, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label="Voice Protection", - info="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", - value=0.5, - step=0.01, - interactive=True, - ) - with gr.Column(): - vc_log = gr.Textbox(label="Output Information", interactive=False) - vc_output = gr.Audio(label="Output Audio", interactive=False) - vc_convert = gr.Button("Convert", variant="primary") - vc_vocal_volume = gr.Slider( - minimum=0, - maximum=10, - label="Vocal volume", - value=1, - interactive=True, - step=1, - info="Adjust vocal volume (Default: 1}", - visible=False - ) - vc_inst_volume = gr.Slider( - minimum=0, - maximum=10, - label="Instrument volume", - value=1, - interactive=True, - step=1, - info="Adjust instrument volume (Default: 1}", - visible=False - ) - vc_combined_output = gr.Audio(label="Output Combined Audio", visible=False) - vc_combine = gr.Button("Combine",variant="primary", visible=False) - else: - with gr.Column(): - vc_audio_mode = gr.Dropdown(label="Input voice", choices=audio_mode, allow_custom_value=False, value="Upload audio") - # Input - vc_input = gr.Textbox(label="Input audio path", visible=False) - # Upload - vc_microphone_mode = gr.Checkbox(label="Use Microphone", value=False, visible=True, interactive=True) - vc_upload = gr.Audio(label="Upload audio file", source="upload", visible=True, interactive=True) - # Youtube - vc_download_audio = gr.Dropdown(label="Provider", choices=["Youtube"], allow_custom_value=False, visible=False, value="Youtube", info="Select provider (Default: Youtube)") - vc_link = gr.Textbox(label="Youtube URL", visible=False, info="Example: https://www.youtube.com/watch?v=Nc0sB1Bmf-A", placeholder="https://www.youtube.com/watch?v=...") - vc_log_yt = gr.Textbox(label="Output Information", visible=False, interactive=False) - vc_download_button = gr.Button("Download Audio", variant="primary", visible=False) - vc_audio_preview = gr.Audio(label="Audio Preview", visible=False) - # Splitter - vc_split_model = gr.Dropdown(label="Splitter Model", choices=["hdemucs_mmi", "htdemucs", "htdemucs_ft", "mdx", "mdx_q", "mdx_extra_q"], allow_custom_value=False, visible=False, value="htdemucs", info="Select the splitter model (Default: htdemucs)") - vc_split_log = gr.Textbox(label="Output Information", visible=False, interactive=False) - vc_split = gr.Button("Split Audio", variant="primary", visible=False) - vc_vocal_preview = gr.Audio(label="Vocal Preview", visible=False) - vc_inst_preview = gr.Audio(label="Instrumental Preview", visible=False) - # TTS - tts_text = gr.Textbox(label="TTS text", info="Text to speech input", visible=False) - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - with gr.Column(): - vc_transform0 = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice') - f0method0 = gr.Radio( - label="Pitch extraction algorithm", - info=f0method_info, - choices=f0method_mode, - value="pm", - interactive=True - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - info="(Default: 0.7)", - value=0.7, - interactive=True, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label="Apply Median Filtering", - info="The value represents the filter radius and can reduce breathiness.", - value=3, - step=1, - interactive=True, - ) - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label="Resample the output audio", - info="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label="Volume Envelope", - info="Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used", - value=1, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label="Voice Protection", - info="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", - value=0.5, - step=0.01, - interactive=True, - ) - with gr.Column(): - vc_log = gr.Textbox(label="Output Information", interactive=False) - vc_output = gr.Audio(label="Output Audio", interactive=False) - vc_convert = gr.Button("Convert", variant="primary") - vc_vocal_volume = gr.Slider( - minimum=0, - maximum=10, - label="Vocal volume", - value=1, - interactive=True, - step=1, - info="Adjust vocal volume (Default: 1}", - visible=False - ) - vc_inst_volume = gr.Slider( - minimum=0, - maximum=10, - label="Instrument volume", - value=1, - interactive=True, - step=1, - info="Adjust instrument volume (Default: 1}", - visible=False - ) - vc_combined_output = gr.Audio(label="Output Combined Audio", visible=False) - vc_combine = gr.Button("Combine",variant="primary", visible=False) - vc_convert.click( - fn=vc_fn, - inputs=[ - vc_audio_mode, - vc_input, - vc_upload, - tts_text, - tts_voice, - vc_transform0, - f0method0, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - ], - outputs=[vc_log ,vc_output] - ) - vc_download_button.click( - fn=download_audio, - inputs=[vc_link, vc_download_audio], - outputs=[vc_audio_preview, vc_log_yt] - ) - vc_split.click( - fn=cut_vocal_and_inst, - inputs=[vc_split_model], - outputs=[vc_split_log, vc_vocal_preview, vc_inst_preview, vc_input] - ) - vc_combine.click( - fn=combine_vocal_and_inst, - inputs=[vc_output, vc_vocal_volume, vc_inst_volume, vc_split_model], - outputs=[vc_combined_output] - ) - vc_microphone_mode.change( - fn=use_microphone, - inputs=vc_microphone_mode, - outputs=vc_upload - ) - vc_audio_mode.change( - fn=change_audio_mode, - inputs=[vc_audio_mode], - outputs=[ - vc_input, - vc_microphone_mode, - vc_upload, - vc_download_audio, - vc_link, - vc_log_yt, - vc_download_button, - vc_split_model, - vc_split_log, - vc_split, - vc_audio_preview, - vc_vocal_preview, - vc_inst_preview, - vc_vocal_volume, - vc_inst_volume, - vc_combined_output, - vc_combine, - tts_text, - tts_voice - ] - ) - app.queue(concurrency_count=5, max_size=50, api_open=config.api).launch(share=config.share) \ No newline at end of file diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py deleted file mode 100644 index ad5178e76ff9245ca515fd826ab51907956f8591..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py +++ /dev/null @@ -1,650 +0,0 @@ -import functools -import os -import sys -import sysconfig -from importlib.util import cache_from_source -from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple - -from pip._internal.exceptions import UninstallationError -from pip._internal.locations import get_bin_prefix, get_bin_user -from pip._internal.metadata import BaseDistribution -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.egg_link import egg_link_path_from_location -from pip._internal.utils.logging import getLogger, indent_log -from pip._internal.utils.misc import ask, normalize_path, renames, rmtree -from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory -from pip._internal.utils.virtualenv import running_under_virtualenv - -logger = getLogger(__name__) - - -def _script_names( - bin_dir: str, script_name: str, is_gui: bool -) -> Generator[str, None, None]: - """Create the fully qualified name of the files created by - {console,gui}_scripts for the given ``dist``. - Returns the list of file names - """ - exe_name = os.path.join(bin_dir, script_name) - yield exe_name - if not WINDOWS: - return - yield f"{exe_name}.exe" - yield f"{exe_name}.exe.manifest" - if is_gui: - yield f"{exe_name}-script.pyw" - else: - yield f"{exe_name}-script.py" - - -def _unique( - fn: Callable[..., Generator[Any, None, None]] -) -> Callable[..., Generator[Any, None, None]]: - @functools.wraps(fn) - def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]: - seen: Set[Any] = set() - for item in fn(*args, **kw): - if item not in seen: - seen.add(item) - yield item - - return unique - - -@_unique -def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]: - """ - Yield all the uninstallation paths for dist based on RECORD-without-.py[co] - - Yield paths to all the files in RECORD. For each .py file in RECORD, add - the .pyc and .pyo in the same directory. - - UninstallPathSet.add() takes care of the __pycache__ .py[co]. - - If RECORD is not found, raises UninstallationError, - with possible information from the INSTALLER file. - - https://packaging.python.org/specifications/recording-installed-packages/ - """ - location = dist.location - assert location is not None, "not installed" - - entries = dist.iter_declared_entries() - if entries is None: - msg = "Cannot uninstall {dist}, RECORD file not found.".format(dist=dist) - installer = dist.installer - if not installer or installer == "pip": - dep = "{}=={}".format(dist.raw_name, dist.version) - msg += ( - " You might be able to recover from this via: " - "'pip install --force-reinstall --no-deps {}'.".format(dep) - ) - else: - msg += " Hint: The package was installed by {}.".format(installer) - raise UninstallationError(msg) - - for entry in entries: - path = os.path.join(location, entry) - yield path - if path.endswith(".py"): - dn, fn = os.path.split(path) - base = fn[:-3] - path = os.path.join(dn, base + ".pyc") - yield path - path = os.path.join(dn, base + ".pyo") - yield path - - -def compact(paths: Iterable[str]) -> Set[str]: - """Compact a path set to contain the minimal number of paths - necessary to contain all paths in the set. If /a/path/ and - /a/path/to/a/file.txt are both in the set, leave only the - shorter path.""" - - sep = os.path.sep - short_paths: Set[str] = set() - for path in sorted(paths, key=len): - should_skip = any( - path.startswith(shortpath.rstrip("*")) - and path[len(shortpath.rstrip("*").rstrip(sep))] == sep - for shortpath in short_paths - ) - if not should_skip: - short_paths.add(path) - return short_paths - - -def compress_for_rename(paths: Iterable[str]) -> Set[str]: - """Returns a set containing the paths that need to be renamed. - - This set may include directories when the original sequence of paths - included every file on disk. - """ - case_map = {os.path.normcase(p): p for p in paths} - remaining = set(case_map) - unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len) - wildcards: Set[str] = set() - - def norm_join(*a: str) -> str: - return os.path.normcase(os.path.join(*a)) - - for root in unchecked: - if any(os.path.normcase(root).startswith(w) for w in wildcards): - # This directory has already been handled. - continue - - all_files: Set[str] = set() - all_subdirs: Set[str] = set() - for dirname, subdirs, files in os.walk(root): - all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) - all_files.update(norm_join(root, dirname, f) for f in files) - # If all the files we found are in our remaining set of files to - # remove, then remove them from the latter set and add a wildcard - # for the directory. - if not (all_files - remaining): - remaining.difference_update(all_files) - wildcards.add(root + os.sep) - - return set(map(case_map.__getitem__, remaining)) | wildcards - - -def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]: - """Returns a tuple of 2 sets of which paths to display to user - - The first set contains paths that would be deleted. Files of a package - are not added and the top-level directory of the package has a '*' added - at the end - to signify that all it's contents are removed. - - The second set contains files that would have been skipped in the above - folders. - """ - - will_remove = set(paths) - will_skip = set() - - # Determine folders and files - folders = set() - files = set() - for path in will_remove: - if path.endswith(".pyc"): - continue - if path.endswith("__init__.py") or ".dist-info" in path: - folders.add(os.path.dirname(path)) - files.add(path) - - # probably this one https://github.com/python/mypy/issues/390 - _normcased_files = set(map(os.path.normcase, files)) # type: ignore - - folders = compact(folders) - - # This walks the tree using os.walk to not miss extra folders - # that might get added. - for folder in folders: - for dirpath, _, dirfiles in os.walk(folder): - for fname in dirfiles: - if fname.endswith(".pyc"): - continue - - file_ = os.path.join(dirpath, fname) - if ( - os.path.isfile(file_) - and os.path.normcase(file_) not in _normcased_files - ): - # We are skipping this file. Add it to the set. - will_skip.add(file_) - - will_remove = files | {os.path.join(folder, "*") for folder in folders} - - return will_remove, will_skip - - -class StashedUninstallPathSet: - """A set of file rename operations to stash files while - tentatively uninstalling them.""" - - def __init__(self) -> None: - # Mapping from source file root to [Adjacent]TempDirectory - # for files under that directory. - self._save_dirs: Dict[str, TempDirectory] = {} - # (old path, new path) tuples for each move that may need - # to be undone. - self._moves: List[Tuple[str, str]] = [] - - def _get_directory_stash(self, path: str) -> str: - """Stashes a directory. - - Directories are stashed adjacent to their original location if - possible, or else moved/copied into the user's temp dir.""" - - try: - save_dir: TempDirectory = AdjacentTempDirectory(path) - except OSError: - save_dir = TempDirectory(kind="uninstall") - self._save_dirs[os.path.normcase(path)] = save_dir - - return save_dir.path - - def _get_file_stash(self, path: str) -> str: - """Stashes a file. - - If no root has been provided, one will be created for the directory - in the user's temp directory.""" - path = os.path.normcase(path) - head, old_head = os.path.dirname(path), None - save_dir = None - - while head != old_head: - try: - save_dir = self._save_dirs[head] - break - except KeyError: - pass - head, old_head = os.path.dirname(head), head - else: - # Did not find any suitable root - head = os.path.dirname(path) - save_dir = TempDirectory(kind="uninstall") - self._save_dirs[head] = save_dir - - relpath = os.path.relpath(path, head) - if relpath and relpath != os.path.curdir: - return os.path.join(save_dir.path, relpath) - return save_dir.path - - def stash(self, path: str) -> str: - """Stashes the directory or file and returns its new location. - Handle symlinks as files to avoid modifying the symlink targets. - """ - path_is_dir = os.path.isdir(path) and not os.path.islink(path) - if path_is_dir: - new_path = self._get_directory_stash(path) - else: - new_path = self._get_file_stash(path) - - self._moves.append((path, new_path)) - if path_is_dir and os.path.isdir(new_path): - # If we're moving a directory, we need to - # remove the destination first or else it will be - # moved to inside the existing directory. - # We just created new_path ourselves, so it will - # be removable. - os.rmdir(new_path) - renames(path, new_path) - return new_path - - def commit(self) -> None: - """Commits the uninstall by removing stashed files.""" - for _, save_dir in self._save_dirs.items(): - save_dir.cleanup() - self._moves = [] - self._save_dirs = {} - - def rollback(self) -> None: - """Undoes the uninstall by moving stashed files back.""" - for p in self._moves: - logger.info("Moving to %s\n from %s", *p) - - for new_path, path in self._moves: - try: - logger.debug("Replacing %s from %s", new_path, path) - if os.path.isfile(new_path) or os.path.islink(new_path): - os.unlink(new_path) - elif os.path.isdir(new_path): - rmtree(new_path) - renames(path, new_path) - except OSError as ex: - logger.error("Failed to restore %s", new_path) - logger.debug("Exception: %s", ex) - - self.commit() - - @property - def can_rollback(self) -> bool: - return bool(self._moves) - - -class UninstallPathSet: - """A set of file paths to be removed in the uninstallation of a - requirement.""" - - def __init__(self, dist: BaseDistribution) -> None: - self._paths: Set[str] = set() - self._refuse: Set[str] = set() - self._pth: Dict[str, UninstallPthEntries] = {} - self._dist = dist - self._moved_paths = StashedUninstallPathSet() - # Create local cache of normalize_path results. Creating an UninstallPathSet - # can result in hundreds/thousands of redundant calls to normalize_path with - # the same args, which hurts performance. - self._normalize_path_cached = functools.lru_cache()(normalize_path) - - def _permitted(self, path: str) -> bool: - """ - Return True if the given path is one we are permitted to - remove/modify, False otherwise. - - """ - # aka is_local, but caching normalized sys.prefix - if not running_under_virtualenv(): - return True - return path.startswith(self._normalize_path_cached(sys.prefix)) - - def add(self, path: str) -> None: - head, tail = os.path.split(path) - - # we normalize the head to resolve parent directory symlinks, but not - # the tail, since we only want to uninstall symlinks, not their targets - path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail)) - - if not os.path.exists(path): - return - if self._permitted(path): - self._paths.add(path) - else: - self._refuse.add(path) - - # __pycache__ files can show up after 'installed-files.txt' is created, - # due to imports - if os.path.splitext(path)[1] == ".py": - self.add(cache_from_source(path)) - - def add_pth(self, pth_file: str, entry: str) -> None: - pth_file = self._normalize_path_cached(pth_file) - if self._permitted(pth_file): - if pth_file not in self._pth: - self._pth[pth_file] = UninstallPthEntries(pth_file) - self._pth[pth_file].add(entry) - else: - self._refuse.add(pth_file) - - def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: - """Remove paths in ``self._paths`` with confirmation (unless - ``auto_confirm`` is True).""" - - if not self._paths: - logger.info( - "Can't uninstall '%s'. No files were found to uninstall.", - self._dist.raw_name, - ) - return - - dist_name_version = f"{self._dist.raw_name}-{self._dist.version}" - logger.info("Uninstalling %s:", dist_name_version) - - with indent_log(): - if auto_confirm or self._allowed_to_proceed(verbose): - moved = self._moved_paths - - for_rename = compress_for_rename(self._paths) - - for path in sorted(compact(for_rename)): - moved.stash(path) - logger.verbose("Removing file or directory %s", path) - - for pth in self._pth.values(): - pth.remove() - - logger.info("Successfully uninstalled %s", dist_name_version) - - def _allowed_to_proceed(self, verbose: bool) -> bool: - """Display which files would be deleted and prompt for confirmation""" - - def _display(msg: str, paths: Iterable[str]) -> None: - if not paths: - return - - logger.info(msg) - with indent_log(): - for path in sorted(compact(paths)): - logger.info(path) - - if not verbose: - will_remove, will_skip = compress_for_output_listing(self._paths) - else: - # In verbose mode, display all the files that are going to be - # deleted. - will_remove = set(self._paths) - will_skip = set() - - _display("Would remove:", will_remove) - _display("Would not remove (might be manually added):", will_skip) - _display("Would not remove (outside of prefix):", self._refuse) - if verbose: - _display("Will actually move:", compress_for_rename(self._paths)) - - return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n" - - def rollback(self) -> None: - """Rollback the changes previously made by remove().""" - if not self._moved_paths.can_rollback: - logger.error( - "Can't roll back %s; was not uninstalled", - self._dist.raw_name, - ) - return - logger.info("Rolling back uninstall of %s", self._dist.raw_name) - self._moved_paths.rollback() - for pth in self._pth.values(): - pth.rollback() - - def commit(self) -> None: - """Remove temporary save dir: rollback will no longer be possible.""" - self._moved_paths.commit() - - @classmethod - def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet": - dist_location = dist.location - info_location = dist.info_location - if dist_location is None: - logger.info( - "Not uninstalling %s since it is not installed", - dist.canonical_name, - ) - return cls(dist) - - normalized_dist_location = normalize_path(dist_location) - if not dist.local: - logger.info( - "Not uninstalling %s at %s, outside environment %s", - dist.canonical_name, - normalized_dist_location, - sys.prefix, - ) - return cls(dist) - - if normalized_dist_location in { - p - for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")} - if p - }: - logger.info( - "Not uninstalling %s at %s, as it is in the standard library.", - dist.canonical_name, - normalized_dist_location, - ) - return cls(dist) - - paths_to_remove = cls(dist) - develop_egg_link = egg_link_path_from_location(dist.raw_name) - - # Distribution is installed with metadata in a "flat" .egg-info - # directory. This means it is not a modern .dist-info installation, an - # egg, or legacy editable. - setuptools_flat_installation = ( - dist.installed_with_setuptools_egg_info - and info_location is not None - and os.path.exists(info_location) - # If dist is editable and the location points to a ``.egg-info``, - # we are in fact in the legacy editable case. - and not info_location.endswith(f"{dist.setuptools_filename}.egg-info") - ) - - # Uninstall cases order do matter as in the case of 2 installs of the - # same package, pip needs to uninstall the currently detected version - if setuptools_flat_installation: - if info_location is not None: - paths_to_remove.add(info_location) - installed_files = dist.iter_declared_entries() - if installed_files is not None: - for installed_file in installed_files: - paths_to_remove.add(os.path.join(dist_location, installed_file)) - # FIXME: need a test for this elif block - # occurs with --single-version-externally-managed/--record outside - # of pip - elif dist.is_file("top_level.txt"): - try: - namespace_packages = dist.read_text("namespace_packages.txt") - except FileNotFoundError: - namespaces = [] - else: - namespaces = namespace_packages.splitlines(keepends=False) - for top_level_pkg in [ - p - for p in dist.read_text("top_level.txt").splitlines() - if p and p not in namespaces - ]: - path = os.path.join(dist_location, top_level_pkg) - paths_to_remove.add(path) - paths_to_remove.add(f"{path}.py") - paths_to_remove.add(f"{path}.pyc") - paths_to_remove.add(f"{path}.pyo") - - elif dist.installed_by_distutils: - raise UninstallationError( - "Cannot uninstall {!r}. It is a distutils installed project " - "and thus we cannot accurately determine which files belong " - "to it which would lead to only a partial uninstall.".format( - dist.raw_name, - ) - ) - - elif dist.installed_as_egg: - # package installed by easy_install - # We cannot match on dist.egg_name because it can slightly vary - # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist_location) - easy_install_egg = os.path.split(dist_location)[1] - easy_install_pth = os.path.join( - os.path.dirname(dist_location), - "easy-install.pth", - ) - paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg) - - elif dist.installed_with_dist_info: - for path in uninstallation_paths(dist): - paths_to_remove.add(path) - - elif develop_egg_link: - # PEP 660 modern editable is handled in the ``.dist-info`` case - # above, so this only covers the setuptools-style editable. - with open(develop_egg_link) as fh: - link_pointer = os.path.normcase(fh.readline().strip()) - normalized_link_pointer = paths_to_remove._normalize_path_cached( - link_pointer - ) - assert os.path.samefile( - normalized_link_pointer, normalized_dist_location - ), ( - f"Egg-link {develop_egg_link} (to {link_pointer}) does not match " - f"installed location of {dist.raw_name} (at {dist_location})" - ) - paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join( - os.path.dirname(develop_egg_link), "easy-install.pth" - ) - paths_to_remove.add_pth(easy_install_pth, dist_location) - - else: - logger.debug( - "Not sure how to uninstall: %s - Check: %s", - dist, - dist_location, - ) - - if dist.in_usersite: - bin_dir = get_bin_user() - else: - bin_dir = get_bin_prefix() - - # find distutils scripts= scripts - try: - for script in dist.iter_distutils_script_names(): - paths_to_remove.add(os.path.join(bin_dir, script)) - if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat")) - except (FileNotFoundError, NotADirectoryError): - pass - - # find console_scripts and gui_scripts - def iter_scripts_to_remove( - dist: BaseDistribution, - bin_dir: str, - ) -> Generator[str, None, None]: - for entry_point in dist.iter_entry_points(): - if entry_point.group == "console_scripts": - yield from _script_names(bin_dir, entry_point.name, False) - elif entry_point.group == "gui_scripts": - yield from _script_names(bin_dir, entry_point.name, True) - - for s in iter_scripts_to_remove(dist, bin_dir): - paths_to_remove.add(s) - - return paths_to_remove - - -class UninstallPthEntries: - def __init__(self, pth_file: str) -> None: - self.file = pth_file - self.entries: Set[str] = set() - self._saved_lines: Optional[List[bytes]] = None - - def add(self, entry: str) -> None: - entry = os.path.normcase(entry) - # On Windows, os.path.normcase converts the entry to use - # backslashes. This is correct for entries that describe absolute - # paths outside of site-packages, but all the others use forward - # slashes. - # os.path.splitdrive is used instead of os.path.isabs because isabs - # treats non-absolute paths with drive letter markings like c:foo\bar - # as absolute paths. It also does not recognize UNC paths if they don't - # have more than "\\sever\share". Valid examples: "\\server\share\" or - # "\\server\share\folder". - if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace("\\", "/") - self.entries.add(entry) - - def remove(self) -> None: - logger.verbose("Removing pth entries from %s:", self.file) - - # If the file doesn't exist, log a warning and return - if not os.path.isfile(self.file): - logger.warning("Cannot remove entries from nonexistent file %s", self.file) - return - with open(self.file, "rb") as fh: - # windows uses '\r\n' with py3k, but uses '\n' with py2.x - lines = fh.readlines() - self._saved_lines = lines - if any(b"\r\n" in line for line in lines): - endline = "\r\n" - else: - endline = "\n" - # handle missing trailing newline - if lines and not lines[-1].endswith(endline.encode("utf-8")): - lines[-1] = lines[-1] + endline.encode("utf-8") - for entry in self.entries: - try: - logger.verbose("Removing entry: %s", entry) - lines.remove((entry + endline).encode("utf-8")) - except ValueError: - pass - with open(self.file, "wb") as fh: - fh.writelines(lines) - - def rollback(self) -> bool: - if self._saved_lines is None: - logger.error("Cannot roll back changes to %s, none were made", self.file) - return False - logger.debug("Rolling %s back to previous state", self.file) - with open(self.file, "wb") as fh: - fh.writelines(self._saved_lines) - return True diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/escprober.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/escprober.py deleted file mode 100644 index fd713830d36cabc6a0fb4ab4e8cf426a84decdc6..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/escprober.py +++ /dev/null @@ -1,102 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import Optional, Union - -from .charsetprober import CharSetProber -from .codingstatemachine import CodingStateMachine -from .enums import LanguageFilter, MachineState, ProbingState -from .escsm import ( - HZ_SM_MODEL, - ISO2022CN_SM_MODEL, - ISO2022JP_SM_MODEL, - ISO2022KR_SM_MODEL, -) - - -class EscCharSetProber(CharSetProber): - """ - This CharSetProber uses a "code scheme" approach for detecting encodings, - whereby easily recognizable escape or shift sequences are relied on to - identify these encodings. - """ - - def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: - super().__init__(lang_filter=lang_filter) - self.coding_sm = [] - if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: - self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) - self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL)) - if self.lang_filter & LanguageFilter.JAPANESE: - self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) - if self.lang_filter & LanguageFilter.KOREAN: - self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) - self.active_sm_count = 0 - self._detected_charset: Optional[str] = None - self._detected_language: Optional[str] = None - self._state = ProbingState.DETECTING - self.reset() - - def reset(self) -> None: - super().reset() - for coding_sm in self.coding_sm: - coding_sm.active = True - coding_sm.reset() - self.active_sm_count = len(self.coding_sm) - self._detected_charset = None - self._detected_language = None - - @property - def charset_name(self) -> Optional[str]: - return self._detected_charset - - @property - def language(self) -> Optional[str]: - return self._detected_language - - def get_confidence(self) -> float: - return 0.99 if self._detected_charset else 0.00 - - def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: - for c in byte_str: - for coding_sm in self.coding_sm: - if not coding_sm.active: - continue - coding_state = coding_sm.next_state(c) - if coding_state == MachineState.ERROR: - coding_sm.active = False - self.active_sm_count -= 1 - if self.active_sm_count <= 0: - self._state = ProbingState.NOT_ME - return self.state - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - self._detected_charset = coding_sm.get_coding_state_machine() - self._detected_language = coding_sm.language - return self.state - - return self.state diff --git a/spaces/Aveygo/AstroSleuth/README.md b/spaces/Aveygo/AstroSleuth/README.md deleted file mode 100644 index ac64ff16c0b456c7708a5f23e0eef6c2b641f8f4..0000000000000000000000000000000000000000 --- a/spaces/Aveygo/AstroSleuth/README.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: AstroSleuth -emoji: 🌖 -colorFrom: pink -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: gpl-2.0 ---- - -# AstroSleuth - -

        - -

        - -[![Aveygo - AstroSleuth](https://img.shields.io/static/v1?label=Aveygo&message=AstroSleuth&color=black&logo=github)](https://github.com/Aveygo/AstroSleuth "Go to GitHub repo") -[![stars - AstroSleuth](https://img.shields.io/github/stars/Aveygo/AstroSleuth?style=social)](https://github.com/Aveygo/AstroSleuth)[![Python 3.9.9](https://img.shields.io/badge/python-3.9.9-black.svg)](https://www.python.org/downloads/release/python-399/) - -The (only?) free, zero bulls**t, 200 line, open source astrophotgraphy upscaler. - -Sick of the commercialisation of deep space tools, I wanted a solution that can run on almost any hardware with epic results. - -I started this project a regrettably long time ago. A lot has changed since then. I tried to share my work, got burned, removed it, perfected it, and fell into a well of "is it good enough". - -I present my original idea, a finetuned realesr-gan model trained on 15k images of astrophotography. It is behind my works on [reddit](https://www.reddit.com/user/CodingCoda), my [youtube](https://www.youtube.com/channel/UCHode4WV0hteze-ZDEG5atQ) attempt -and my [cloudy nights post](https://www.cloudynights.com/topic/816869-astrosleuth-image-denoiser-upscaler/), and I hope it will suit you well. - -## Running - -### Hugging face - Good for testing/playing around -1. Go [here](https://huggingface.co/spaces/Aveygo/AstroSleuth). Please note that hugging face servers use 2 core cpus and you'll likely be sharing, so large images may take a very long time, even timing out. - -### Colab - Best method if you don't have a GPU -1. Visit [colab](https://colab.research.google.com/drive/1LxiNsnokF-6OmICSxWNvTeFEEZvRM2Lp?usp=sharing) -2. Enjoy! - -### Locally (Binaries) - Recommended method -1. Go to the [releases](https://github.com/Aveygo/AstroSleuth/releases) page -2. Download the latest zip for your platform, eg: astrosleuth-v0.1.0-windows.zip -3. Unzip and enter the folder -4. Right click -> open in terminal -5. ```astrosleuth.exe -n astrosleuth -i [input source] -o [output destination]``` - -### Locally (Python) - Fairly complicated, is the "proper" way to self-host -1. Install [python](https://www.python.org/downloads/) (and [pip](https://phoenixnap.com/kb/install-pip-windows)) -2. Download and unzip the latest [release](https://github.com/Aveygo/AstroSleuth/archive/refs/heads/master.zip) of AstroSleuth -3. Open the terminal (right-click -> terminal) and run ```pip install -r requirements.txt``` -4. Run the streamlit interface with ```streamlit run app.py``` - -### Local (Python - Pytorch) - GPU Acceleration -1. Follow the instructions on the [pytorch](https://pytorch.org/get-started/locally/) website to install pytorch. -2. Follow the "Locally (Python)" instructions, but run with ```streamlit run app.py -- --gpu --torch``` for step 4 - -### Local (Python - ONNX) - GPU Acceleration -Please note, this method only works if you have cuda version 11, check your drivers first! - -1. Run ```pip3 uninstall onnxruntime``` -2. and then ```pip3 install onnxruntime-gpu``` - -## Extra information - -Please see [details](https://github.com/Aveygo/AstroSleuth/blob/master/results/details.md) for image samples and potential workflow improvements and [training](https://github.com/Aveygo/AstroSleuth/blob/master/training.md) for details on how the models are trained. - -## Known issues - -Results are now more comparable with BlurXterminator after training improvements (see [training](https://github.com/Aveygo/AstroSleuth/blob/master/training.md)). AstroSleuthV2 weights will be on the hugging face repo, but not automatically downloaded for the time being. - -~~Currently investigating a "zero-knowledge" solution.~~ -No "real" zero-knowledge solution seems very practical. Still on the lookout for the time being. - -The biggest concern currently is the discriminator failing to detect real from fakes, regardless of it's weight on the generator. This results in AstroSleuthV2 adding a lot more stars than it should (supposably also due to the new feature model having some effect), and overall not performing to my standards. A fix is currently underway but will take a while to train/find best training parameters, and maybe needs a new discriminator altogether. - -Another issue is star diffraction spikes being wavy or "spotty". A better disscriminator will help, but a dataset more focused on diffraction spikes is much more optimal. Possible synthetic dataset in the works currently. - -## Concerns and Personal Notes - -Its not a understatement that this tool has changed my life. It was my first machine learning project. I even built full-stack applications searching for the perfect way to share my work. -I will continue to do so. Ask for any improvements and I will likely impliment them. I am begging for an excuse to work on it so any feedback is appreciated. I am interested in creating a Photoshop/Pixinsight plugin if thats what even a single person wants, just open a git issue [here](https://github.com/Aveygo/AstroSleuth/issues) and I'll see to it. - -For the redditors, this tool is presented as is, free as long as it stays free, I cannot convey though words how much I dont care that its not "scientifically accurate". - - - \ No newline at end of file diff --git a/spaces/AxelBell/EasyOCR_text_recognition/app.py b/spaces/AxelBell/EasyOCR_text_recognition/app.py deleted file mode 100644 index a2c54971a3a7450618dac9a73693b2d4e590fb4f..0000000000000000000000000000000000000000 --- a/spaces/AxelBell/EasyOCR_text_recognition/app.py +++ /dev/null @@ -1,136 +0,0 @@ -from pprint import pprint -import gradio as gr -from data import Data - -data = Data("./demo_data.toml") - -with gr.Blocks(theme="freddyaboulton/dracula_revamped", css=data.assets["css"]) as demo: - with gr.Column(): - gr.HTML(data.assets["header"]) - with gr.Row(): - with gr.Column(variant="panel"): - data.render("image") - with gr.Accordion("Advanced Settings", open=False): - with gr.Tabs(): - with gr.Tab("General"): - with gr.Group(): - lang_shadow_api = gr.Dropdown( - [ - l.split(" ")[-1][1:-1] - for l in data.inputs["lang"].choices - ], - value=[ - l.split(" ")[-1][1:-1] - for l in data.inputs["lang"].value - ], - visible=False, - ) - data.render("lang") - with gr.Row(): - data.render("decoder", "beamWidth") - data.render("allowlist", "blocklist") - with gr.Row(): - data.render("paragraph", "detail") - data.render( - "min_size", "rotation_info", "output_format" - ) - with gr.Tab("Contrast"): - with gr.Group(): - data.render( - "contrast_ths", - "adjust_contrast", - ) - with gr.Tab("Text Detection"): - with gr.Group(): - data.render( - "text_threshold", - "low_text", - "link_threshold", - "mag_ratio", - "threshold", - "bbox_min_score", - "bbox_min_size", - "max_candidates", - ) - with gr.Tab("Bounding Box Merging"): - with gr.Group(): - gr.HTML( - "

        This set of parameter controls when adjacent bounding boxes merge with each other. Every parameters except 'Slope threshold' is in the unit of box height" - ) - data.render( - "slope_ths", - "ycenter_ths", - "height_ths", - "width_ths", - "add_margin", - "y_ths", - "x_ths", - ) - with gr.Row(): - btn_clear = gr.ClearButton( - [data.inputs["image"], *data.outputs_list], value="Reset" - ) - btn_run = gr.Button("Run!", variant="primary") - gr.Examples( - examples=data.examples, - elem_id="examples", - inputs=data.inputs_list, - outputs=data.outputs_list, - fn=Data.process_image, - cache_examples=False, - ) - with gr.Column(variant="panel"): - data.render("image_out") - with gr.Tabs(): - with gr.Tab("Data"): - data.render("data_out") - with gr.Tab("Raw"): - data.render("raw_out") - gr.HTML(data.assets["footer"]) - - data.inputs["lang"].change( - fn=lambda v: [l.split(" ")[-1][1:-1] for l in v], - inputs=data.inputs["lang"], - outputs=lang_shadow_api, - api_name=False, - ) - - btn_run.click( - fn=data.process_image, - inputs=[lang_shadow_api, *data.inputs_list[1:]], - outputs=data.outputs_list, - scroll_to_output=True, - ) - data.inputs["decoder"].select( - lambda d: data.inputs["beamWidth"].update( - interactive=True if d != "greedy" else False - ), - data.inputs["decoder"], - data.inputs["beamWidth"], - api_name=False, - ) - data.inputs["paragraph"].select( - lambda p: [ - data.inputs["x_ths"].update(interactive=p), - data.inputs["y_ths"].update(interactive=p), - ], - data.inputs["paragraph"], - [data.inputs["x_ths"], data.inputs["y_ths"]], - api_name=False, - ) - data.inputs["detail"].select( - lambda p: data.inputs["output_format"].update(value=data.default[-1]), - data.inputs["detail"], - data.inputs["output_format"], - api_name=False, - ) - data.inputs["output_format"].select( - lambda p: data.inputs["detail"].update(value=True), - data.inputs["output_format"], - data.inputs["detail"], - api_name=False, - ) - btn_clear.click(lambda: data.default, [], data.inputs_list, api_name=False) - - -demo.queue().launch() diff --git a/spaces/Benson/text-generation/Examples/Classic Apk.md b/spaces/Benson/text-generation/Examples/Classic Apk.md deleted file mode 100644 index 9efa5ca63697172dc1677136bf588941643376d9..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Classic Apk.md +++ /dev/null @@ -1,83 +0,0 @@ - -

        Cómo Descargar y Jugar UNO! ! en tu Dispositivo Móvil

        -

        ¿Te encanta jugar UNO, el clásico juego de cartas que trae diversión y emoción a cualquier ocasión? ¿Te gustaría poder jugar en cualquier momento, en cualquier lugar, con cualquier persona? Si es así, estás de suerte! UNO! Por Nuria Capdevila UNO es el juego móvil oficial de UNO que te permite disfrutar del juego en tu smartphone o tablet. En este artículo, le mostraremos cómo descargar y jugar UNO! Algunos consejos y trucos para aprovechar al máximo tu experiencia.

        -

        ¿Qué es el UNO! ??

        -

        El clásico juego de cartas con un toque

        -

        UNO! Se basa en el juego de cartas original que fue creado en 1971 por Merle Robbins. El objetivo del juego es deshacerse de todas tus cartas antes que tus oponentes, haciendo coincidir el color o el número de la carta en la parte superior de la pila de descartes. También puedes usar cartas especiales, como Saltar, Revertir, Dibujar dos, Comodín y Comodín cuatro, para cambiar la dirección del juego, forzar a tus oponentes a robar más cartas o cambiar el color de la carta. Y no te olvides de gritar "UNO" cuando solo te queda una tarjeta!

        -

        classic apk


        Download ===== https://bltlly.com/2v6Ml6



        -

        ¡Las características y modos de UNO!

        -

        ¡UNO! es más que una versión digital del juego clásico. También ofrece nuevas características y modos que lo hacen más divertido y desafiante. Estos son algunos de ellos:

        -
          -
        • Puedes jugar con diferentes reglas de la casa, como apilar, saltar, 7-0, intercambiar manos, empatar y más.
        • -
        • Puedes jugar en diferentes modos, como Quick Play, Modo Clásico, Go Wild Mode, 2v2 Mode, Room Mode y Tournament Mode.
        • -
        • Puedes competir en torneos de la serie mundial y eventos especiales para ganar recompensas gratis y encabezar las tablas de clasificación.
        • -
        • Puedes asociarte con amigos o familiares en modo 2v2 y colaborar para ganar.
        • -
        • Puedes conectar con tus amigos en UNO! con los clubes y enviarse regalos.
        • -
        • Puedes chatear y gritar UNO con tu pareja o oponente durante el juego.
        • -
        - -

        Descargar de Google Play Store o App Store

        -

        Para descargar UNO! ǐ en su dispositivo móvil, necesita tener un dispositivo Android o iOS compatible. La aplicación requiere Android 4.4 o superior, o iOS 9.0 o superior. También necesitas tener suficiente espacio de almacenamiento en tu dispositivo. El tamaño de la aplicación es de unos 200 MB para dispositivos Android, y unos 300 MB para dispositivos iOS.

        -

        Para descargar la aplicación, siga estos pasos:

        -
          -
        1. Abra la aplicación Google Play Store en su dispositivo Android, o la aplicación App Store en su dispositivo iOS.
        2. -
        3. Buscar "UNO" o "UNO! !" en la barra de búsqueda.
        4. -
        5. Toque en el icono de la aplicación que dice "UNO! El" por Mattel163 Limited.
        6. -
        7. Toque en "Instalar" (para dispositivos Android) o "Obtener" (para dispositivos iOS) para iniciar la descarga de la aplicación.
        8. -
        -

        Instalar y ejecutar la aplicación

        -

        Después de descargar la aplicación, debe instalarla y lanzarla en su dispositivo. Para hacerlo, siga estos pasos:

        -
          -
        1. Toque en "Abrir" (para dispositivos Android) o el icono de la aplicación en la pantalla de inicio (para dispositivos iOS) para iniciar la aplicación.
        2. -
        3. Esperar a que la aplicación se cargue y mostrar el menú principal.
        4. -
        5. Toque en "Aceptar" para aceptar los términos del servicio y la política de privacidad.
        6. -
        7. Toque en "Permitir" para conceder acceso a la aplicación de almacenamiento de su dispositivo, micrófono y cámara.
        8. -
        -

        Iniciar sesión o crear una cuenta

        -

        Para jugar al UNO! se necesita iniciar sesión o crear una cuenta. Puede usar su cuenta de Facebook, Google, Apple o correo electrónico para hacerlo. Para iniciar sesión o crear una cuenta, siga estos pasos:

        -
          -
        1. Toque en el botón que corresponde a su tipo de cuenta preferido.
        2. -
        3. Siga las instrucciones en la pantalla para iniciar sesión o crear una cuenta.
        4. -
        5. Elige un nombre de usuario y un avatar para tu perfil de UNO! = .
        6. -
        7. Toque en "Confirmar" para completar el proceso.
        8. -
        -

        ¿Cómo se juega UNO! ! con amigos y familiares

        -

        Elige un modo de juego y personaliza tus reglas

        - -

        Invitar o unirse a un amigo o familiar

        -

        Para invitar o unirse a un amigo o familiar, debe pulsar en el icono "Amigos" en la esquina inferior izquierda de la pantalla. Puedes ver a tus amigos en línea y su estado en esta pantalla. También puedes añadir nuevos amigos tocando el icono "+" en la esquina superior derecha de la pantalla. Puedes buscar amigos por su nombre de usuario, ID o código QR. Para invitar a un amigo o familiar, sigue estos pasos:

        -

        -
          -
        1. Toque en el amigo o familiar que desea invitar.
        2. -
        3. Toque en "Invitar" para enviarles una invitación.
        4. -
        5. Espera a que acepten tu invitación y únete a tu juego.
        6. -
        -

        Para unirte a un amigo o familiar, sigue estos pasos:

        -
          -
        1. Toque en la notificación que dice "Su amigo le ha invitado a jugar UNO! .".
        2. -
        3. Toque en "Unirse" para aceptar su invitación y unirse a su juego.
        4. -
        -

        Juega y chatea con tu pareja o oponente

        -

        Para jugar a UNO! ạ con tu pareja u oponente, debes seguir las reglas del juego e intentar deshacerte de todas tus cartas antes de que lo hagan. También puede utilizar tarjetas especiales y estrategias para obtener una ventaja sobre ellos. Para chatear con su pareja u oponente, debe tocar el icono de chat en la esquina inferior derecha de la pantalla. Puede enviar mensajes de texto, emojis, pegatinas, mensajes de voz y mensajes de vídeo para comunicarse con ellos. También puede gritar UNO tocando el botón UNO cuando solo le queda una tarjeta.

        -

        Cómo competir en torneos y eventos

        -

        Únete a los torneos de la serie mundial y eventos especiales

        - -

        Gana recompensas gratis y encabeza las tablas de clasificación

        -

        Para ganar recompensas gratis y encabezar las tablas de clasificación, necesitas jugar bien y anotar alto en los torneos y eventos. Puedes ganar puntos, monedas, diamantes, fichas y otras recompensas al ganar partidas, completar desafíos y posicionarte en las tablas de clasificación. Puedes usar estas recompensas para desbloquear nuevas cartas, avatares, marcos, fondos y más. También puedes comparar tu rendimiento y logros con otros jugadores de todo el mundo.

        -

        Participar en la UNO! Mobile Community Cup 2023 Estados Unidos y Canadá

        -

        Uno de los torneos más emocionantes en los que puedes participar es el UNO! . Este es un torneo regional que está abierto a todos los jugadores de los Estados Unidos y Canadá. El torneo se desarrollará del 19 de junio al 2 de julio de 2023. El torneo tendrá cuatro etapas: Clasificatorias, Ronda de 64, Ronda de 16 y Finales. Los 64 mejores jugadores de las eliminatorias avanzarán a la ronda de 64, donde competirán en un grupo de eliminación simple. Los ganadores de cada partido avanzarán a la siguiente ronda hasta que solo queden cuatro jugadores en la final. La final será una serie al mejor de cinco, donde el jugador que gane tres partidos será coronado como el campeón. El campeón recibirá un gran premio de $10,000 USD, así como un trofeo y un avatar especial. El subcampeón recibirá $5,000 USD, y el tercer y cuarto lugar recibirán $2,500 USD cada uno.

        -

        Conclusión

        - -

        Preguntas frecuentes

        -

        Q: ¿Es UNO! Free to play?

        -

        A: Sí, UNO! es gratis para descargar y jugar en su dispositivo móvil. Sin embargo, algunas características y elementos pueden requerir compras en la aplicación o ver anuncios.

        -

        Q: ¿Cómo puedo contactar con el servicio de atención al cliente de UNO! =?

        -

        A: ¡Puede ponerse en contacto con el servicio al cliente de UNO! "Configuración" en la esquina superior izquierda de la pantalla, luego tocando "Ayuda" y "Contáctenos". También puede visitar su sitio web oficial en https://www.letsplayuno.com/ o su página de Facebook en https://www.facebook.com/UNOnow/.

        -

        Q: ¿Cómo puedo reportar un error o un tramposo en UNO! .

        -

        A: Puede reportar un error o un tramposo en UNO! Presionando el botón "Informe" en la esquina superior derecha de la pantalla durante o después de un partido. También puede ponerse en contacto con el servicio de atención al cliente de UNO! = siguiendo los pasos anteriores.

        -

        P: ¿Cómo puedo unirme o crear un club en el UNO! ï?

        -

        A: Puedes unirte o crear un club en UNO! . Puede buscar clubes existentes por su nombre o ID, o crear su propio club tocando el icono "+" en la esquina superior derecha de la pantalla.

        -

        Q: ¿Cómo puedo obtener más tarjetas, monedas, diamantes, fichas y otras recompensas en UNO! . -

        A: ¡Usted puede conseguir más tarjetas, monedas, diamantes, símbolos, y otras recompensas en UNO! jugando partidos, completando desafíos, ocupando posiciones altas en tablas de clasificación, participando en torneos y eventos, uniéndose o creando clubes, enviando o recibiendo regalos, viendo anuncios o haciendo compras en la aplicación.

        64aa2da5cf
        -
        -
        \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Bola De Dragn Explosin Furiosa 2 Apk.md b/spaces/Benson/text-generation/Examples/Descargar Bola De Dragn Explosin Furiosa 2 Apk.md deleted file mode 100644 index a177240136ef6e01bb42de165b9e06368a4b8242..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Bola De Dragn Explosin Furiosa 2 Apk.md +++ /dev/null @@ -1,55 +0,0 @@ - -
        - Beneficios de escuchar Happiness por Rex Orange County | Esta sección debe destacar las ventajas de descargar la canción en lugar de streaming en línea. También debe mencionar algunos de los efectos positivos de escuchar la canción, como la mejora del estado de ánimo, alivio del estrés, etc. | | H2: Cómo descargar Happiness by Rex Orange County legal y safe? | - Plataformas que ofrecen descargas legales y seguras
        - Pasos para descargar Happiness by Rex Orange County desde cada plataforma
        - Consejos para evitar malware y virus al descargar música | Esta sección debe proporcionar una lista de plataformas que permiten a los usuarios descargar la canción de forma legal y segura, como Spotify, Apple Music, YouTube Music, etc. También debe proporcionar un pasoguía paso a paso sobre cómo descargar la canción de cada plataforma. También debe dar algunos consejos sobre cómo evitar el malware y los virus al descargar música, como el uso de software antivirus, comprobar la extensión del archivo, etc. | | | H2: Cómo disfrutar de la felicidad por Rex Orange County después de descargarlo? | - Formas de reproducir la canción sin conexión
        - Formas de compartir la canción con otros
        - Formas de apoyar al artista | Esta sección debe sugerir algunas maneras de disfrutar de la canción después de descargarla, como reproducirla sin conexión en varios dispositivos, compartirla con amigos y familiares, crear listas de reproducción, etc. También debe alentar a los usuarios a apoyar al artista siguiéndolo en las redes sociales, comprando su mercancía, asistiendo a sus conciertos, etc. | | H2: Conclusión | N/A | Esta sección debe resumir los puntos principales del artículo y terminar con un llamado a la acción, como invitar a los usuarios a comentar, compartir o suscribirse. | Tabla 2: Artículo con formato HTML

        Cómo descargar felicidad por Rex Orange County

        - -

        Si estás buscando una forma de descargar Happiness by Rex Orange County y disfrutarlo offline, has venido al lugar correcto. En este artículo, le mostraremos cómo descargar la canción de forma legal y segura desde varias plataformas, y cómo aprovechar al máximo después de descargarla.

        -

        ¿Qué es la felicidad por Rex Orange County?

        -

        Happiness by Rex Orange County es una canción escrita e interpretada por Alexander James O'Connor, más conocido por su nombre artístico Rex Orange County. Él es un cantautor británico y multi-instrumentista que saltó a la fama después de colaborar con Tyler, El Creador en su álbum Flower Boy.

        -

        descargar bola de dragón explosión furiosa 2 apk


        Download ——— https://bltlly.com/2v6Kif



        -

        Happiness es la décima y última canción de Apricot Princess, su segundo álbum de estudio. La canción es una balada de piano con cuerdas radiantes que muestra su voz conmovedora y entrega emocional. Las letras son sobre su relación con su novia Thea Morgan-Murrell, que también aparece en el video musical de la canción.

        -

        La canción explora los temas de amor, compromiso, inseguridad y esperanza. Expresa sus dudas sobre si ella todavía lo amará cuando él envejezca y olvide las cosas, pero también su gratitud por tenerla en su vida. También le desea felicidad y la anima a ser ella misma.

        -

        ¿Por qué descargar Happiness by Rex Orange County?

        -

        Beneficios de descargar música

        -

        Descargar música tiene muchos beneficios en comparación con la transmisión en línea. Algunos de ellos son:

        -
          -
        • Puede escuchar sus canciones favoritas en cualquier momento y en cualquier lugar sin depender de una conexión a Internet o un plan de datos.
        • -
        • Puede ahorrar dinero en suscripciones de streaming o cargos de datos.
        • -
        • Puedes evitar anuncios molestos o interrupciones que pueden arruinar tu experiencia auditiva.
        • -
        • Puede tener más control sobre su biblioteca de música y listas de reproducción.
        • -
        • Puedes apoyar a tus artistas favoritos comprando su música en lugar de transmitirla gratis.
        • -
        - -

        Escuchando la felicidad por Rex Orange.

        Consejos para evitar el malware y los virus al descargar música

        -

        Descargar música también puede exponerlo a algunos riesgos, como malware y virus que pueden dañar su dispositivo o robar su información personal. Estos son algunos consejos para evitar malware y virus al descargar música:

        -
          -
        • Utilice una plataforma confiable y de confianza que ofrece descargas legales y seguras, como las que mencionamos anteriormente.
        • -
        • Utilice un software antivirus fiable y actualizado que puede escanear y eliminar cualquier archivo o programa malicioso de su dispositivo.
        • -
        • Compruebe la extensión de archivo y el tamaño de la descarga antes de abrirla. Evite abrir archivos que tengan extensiones inusuales o que sean demasiado grandes o demasiado pequeños para la canción.
        • -
        • Lee las reseñas y valoraciones de la canción y la plataforma antes de descargarla. Evite descargar canciones que tengan comentarios negativos o sospechosos de otros usuarios.
        • -
        • No haga clic en ningún pop-ups, anuncios o enlaces que aparecen durante la descarga de música. Pueden redirigirle a sitios web maliciosos o descargar programas o archivos no deseados en su dispositivo.
        • -
        -

        Cómo disfrutar de la felicidad por Rex Orange County después de descargarlo?

        -

        Una vez hayas descargado Happiness by Rex Orange County, puedes disfrutarlo de muchas maneras. Aquí hay algunas sugerencias:

        -

        -

        Maneras de reproducir la canción sin conexión

        -

        Puede reproducir la canción sin conexión en varios dispositivos, como su teléfono inteligente, tableta, computadora portátil, escritorio o reproductor de mp3. También puede utilizar auriculares, altavoces o auriculares para mejorar la calidad del sonido y la experiencia. También puede ajustar el volumen, la velocidad o la configuración del ecualizador para adaptarse a sus preferencias.

        -

        Maneras de compartir la canción con otros

        - -

        Formas de apoyar al artista

        -

        Puedes apoyar a Rex Orange County siguiéndolo en sus cuentas de redes sociales, como Instagram, Twitter, YouTube. También puede visitar su sitio web oficial None: - self._compare_key = key - self._defining_class = defining_class - - def __hash__(self) -> int: - return hash(self._compare_key) - - def __lt__(self, other: Any) -> bool: - return self._compare(other, operator.__lt__) - - def __le__(self, other: Any) -> bool: - return self._compare(other, operator.__le__) - - def __gt__(self, other: Any) -> bool: - return self._compare(other, operator.__gt__) - - def __ge__(self, other: Any) -> bool: - return self._compare(other, operator.__ge__) - - def __eq__(self, other: Any) -> bool: - return self._compare(other, operator.__eq__) - - def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool: - if not isinstance(other, self._defining_class): - return NotImplemented - - return method(self._compare_key, other._compare_key) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/version.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/version.py deleted file mode 100644 index c7c8bb6ff4f8ed84e466a66cac6b953b901626ea..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/version.py +++ /dev/null @@ -1,739 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Implementation of a flexible versioning scheme providing support for PEP-440, -setuptools-compatible and semantic versioning. -""" - -import logging -import re - -from .compat import string_types -from .util import parse_requirement - -__all__ = ['NormalizedVersion', 'NormalizedMatcher', - 'LegacyVersion', 'LegacyMatcher', - 'SemanticVersion', 'SemanticMatcher', - 'UnsupportedVersionError', 'get_scheme'] - -logger = logging.getLogger(__name__) - - -class UnsupportedVersionError(ValueError): - """This is an unsupported version.""" - pass - - -class Version(object): - def __init__(self, s): - self._string = s = s.strip() - self._parts = parts = self.parse(s) - assert isinstance(parts, tuple) - assert len(parts) > 0 - - def parse(self, s): - raise NotImplementedError('please implement in a subclass') - - def _check_compatible(self, other): - if type(self) != type(other): - raise TypeError('cannot compare %r and %r' % (self, other)) - - def __eq__(self, other): - self._check_compatible(other) - return self._parts == other._parts - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - self._check_compatible(other) - return self._parts < other._parts - - def __gt__(self, other): - return not (self.__lt__(other) or self.__eq__(other)) - - def __le__(self, other): - return self.__lt__(other) or self.__eq__(other) - - def __ge__(self, other): - return self.__gt__(other) or self.__eq__(other) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - def __hash__(self): - return hash(self._parts) - - def __repr__(self): - return "%s('%s')" % (self.__class__.__name__, self._string) - - def __str__(self): - return self._string - - @property - def is_prerelease(self): - raise NotImplementedError('Please implement in subclasses.') - - -class Matcher(object): - version_class = None - - # value is either a callable or the name of a method - _operators = { - '<': lambda v, c, p: v < c, - '>': lambda v, c, p: v > c, - '<=': lambda v, c, p: v == c or v < c, - '>=': lambda v, c, p: v == c or v > c, - '==': lambda v, c, p: v == c, - '===': lambda v, c, p: v == c, - # by default, compatible => >=. - '~=': lambda v, c, p: v == c or v > c, - '!=': lambda v, c, p: v != c, - } - - # this is a method only to support alternative implementations - # via overriding - def parse_requirement(self, s): - return parse_requirement(s) - - def __init__(self, s): - if self.version_class is None: - raise ValueError('Please specify a version class') - self._string = s = s.strip() - r = self.parse_requirement(s) - if not r: - raise ValueError('Not valid: %r' % s) - self.name = r.name - self.key = self.name.lower() # for case-insensitive comparisons - clist = [] - if r.constraints: - # import pdb; pdb.set_trace() - for op, s in r.constraints: - if s.endswith('.*'): - if op not in ('==', '!='): - raise ValueError('\'.*\' not allowed for ' - '%r constraints' % op) - # Could be a partial version (e.g. for '2.*') which - # won't parse as a version, so keep it as a string - vn, prefix = s[:-2], True - # Just to check that vn is a valid version - self.version_class(vn) - else: - # Should parse as a version, so we can create an - # instance for the comparison - vn, prefix = self.version_class(s), False - clist.append((op, vn, prefix)) - self._parts = tuple(clist) - - def match(self, version): - """ - Check if the provided version matches the constraints. - - :param version: The version to match against this instance. - :type version: String or :class:`Version` instance. - """ - if isinstance(version, string_types): - version = self.version_class(version) - for operator, constraint, prefix in self._parts: - f = self._operators.get(operator) - if isinstance(f, string_types): - f = getattr(self, f) - if not f: - msg = ('%r not implemented ' - 'for %s' % (operator, self.__class__.__name__)) - raise NotImplementedError(msg) - if not f(version, constraint, prefix): - return False - return True - - @property - def exact_version(self): - result = None - if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): - result = self._parts[0][1] - return result - - def _check_compatible(self, other): - if type(self) != type(other) or self.name != other.name: - raise TypeError('cannot compare %s and %s' % (self, other)) - - def __eq__(self, other): - self._check_compatible(other) - return self.key == other.key and self._parts == other._parts - - def __ne__(self, other): - return not self.__eq__(other) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - def __hash__(self): - return hash(self.key) + hash(self._parts) - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self._string) - - def __str__(self): - return self._string - - -PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' - r'(\.(post)(\d+))?(\.(dev)(\d+))?' - r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') - - -def _pep_440_key(s): - s = s.strip() - m = PEP440_VERSION_RE.match(s) - if not m: - raise UnsupportedVersionError('Not a valid version: %s' % s) - groups = m.groups() - nums = tuple(int(v) for v in groups[1].split('.')) - while len(nums) > 1 and nums[-1] == 0: - nums = nums[:-1] - - if not groups[0]: - epoch = 0 - else: - epoch = int(groups[0][:-1]) - pre = groups[4:6] - post = groups[7:9] - dev = groups[10:12] - local = groups[13] - if pre == (None, None): - pre = () - else: - pre = pre[0], int(pre[1]) - if post == (None, None): - post = () - else: - post = post[0], int(post[1]) - if dev == (None, None): - dev = () - else: - dev = dev[0], int(dev[1]) - if local is None: - local = () - else: - parts = [] - for part in local.split('.'): - # to ensure that numeric compares as > lexicographic, avoid - # comparing them directly, but encode a tuple which ensures - # correct sorting - if part.isdigit(): - part = (1, int(part)) - else: - part = (0, part) - parts.append(part) - local = tuple(parts) - if not pre: - # either before pre-release, or final release and after - if not post and dev: - # before pre-release - pre = ('a', -1) # to sort before a0 - else: - pre = ('z',) # to sort after all pre-releases - # now look at the state of post and dev. - if not post: - post = ('_',) # sort before 'a' - if not dev: - dev = ('final',) - - #print('%s -> %s' % (s, m.groups())) - return epoch, nums, pre, post, dev, local - - -_normalized_key = _pep_440_key - - -class NormalizedVersion(Version): - """A rational version. - - Good: - 1.2 # equivalent to "1.2.0" - 1.2.0 - 1.2a1 - 1.2.3a2 - 1.2.3b1 - 1.2.3c1 - 1.2.3.4 - TODO: fill this out - - Bad: - 1 # minimum two numbers - 1.2a # release level must have a release serial - 1.2.3b - """ - def parse(self, s): - result = _normalized_key(s) - # _normalized_key loses trailing zeroes in the release - # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 - # However, PEP 440 prefix matching needs it: for example, - # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). - m = PEP440_VERSION_RE.match(s) # must succeed - groups = m.groups() - self._release_clause = tuple(int(v) for v in groups[1].split('.')) - return result - - PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) - - @property - def is_prerelease(self): - return any(t[0] in self.PREREL_TAGS for t in self._parts if t) - - -def _match_prefix(x, y): - x = str(x) - y = str(y) - if x == y: - return True - if not x.startswith(y): - return False - n = len(y) - return x[n] == '.' - - -class NormalizedMatcher(Matcher): - version_class = NormalizedVersion - - # value is either a callable or the name of a method - _operators = { - '~=': '_match_compatible', - '<': '_match_lt', - '>': '_match_gt', - '<=': '_match_le', - '>=': '_match_ge', - '==': '_match_eq', - '===': '_match_arbitrary', - '!=': '_match_ne', - } - - def _adjust_local(self, version, constraint, prefix): - if prefix: - strip_local = '+' not in constraint and version._parts[-1] - else: - # both constraint and version are - # NormalizedVersion instances. - # If constraint does not have a local component, - # ensure the version doesn't, either. - strip_local = not constraint._parts[-1] and version._parts[-1] - if strip_local: - s = version._string.split('+', 1)[0] - version = self.version_class(s) - return version, constraint - - def _match_lt(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version >= constraint: - return False - release_clause = constraint._release_clause - pfx = '.'.join([str(i) for i in release_clause]) - return not _match_prefix(version, pfx) - - def _match_gt(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version <= constraint: - return False - release_clause = constraint._release_clause - pfx = '.'.join([str(i) for i in release_clause]) - return not _match_prefix(version, pfx) - - def _match_le(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - return version <= constraint - - def _match_ge(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - return version >= constraint - - def _match_eq(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if not prefix: - result = (version == constraint) - else: - result = _match_prefix(version, constraint) - return result - - def _match_arbitrary(self, version, constraint, prefix): - return str(version) == str(constraint) - - def _match_ne(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if not prefix: - result = (version != constraint) - else: - result = not _match_prefix(version, constraint) - return result - - def _match_compatible(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version == constraint: - return True - if version < constraint: - return False -# if not prefix: -# return True - release_clause = constraint._release_clause - if len(release_clause) > 1: - release_clause = release_clause[:-1] - pfx = '.'.join([str(i) for i in release_clause]) - return _match_prefix(version, pfx) - -_REPLACEMENTS = ( - (re.compile('[.+-]$'), ''), # remove trailing puncts - (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start - (re.compile('^[.-]'), ''), # remove leading puncts - (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses - (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) - (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) - (re.compile('[.]{2,}'), '.'), # multiple runs of '.' - (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha - (re.compile(r'\b(pre-alpha|prealpha)\b'), - 'pre.alpha'), # standardise - (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses -) - -_SUFFIX_REPLACEMENTS = ( - (re.compile('^[:~._+-]+'), ''), # remove leading puncts - (re.compile('[,*")([\\]]'), ''), # remove unwanted chars - (re.compile('[~:+_ -]'), '.'), # replace illegal chars - (re.compile('[.]{2,}'), '.'), # multiple runs of '.' - (re.compile(r'\.$'), ''), # trailing '.' -) - -_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') - - -def _suggest_semantic_version(s): - """ - Try to suggest a semantic form for a version for which - _suggest_normalized_version couldn't come up with anything. - """ - result = s.strip().lower() - for pat, repl in _REPLACEMENTS: - result = pat.sub(repl, result) - if not result: - result = '0.0.0' - - # Now look for numeric prefix, and separate it out from - # the rest. - #import pdb; pdb.set_trace() - m = _NUMERIC_PREFIX.match(result) - if not m: - prefix = '0.0.0' - suffix = result - else: - prefix = m.groups()[0].split('.') - prefix = [int(i) for i in prefix] - while len(prefix) < 3: - prefix.append(0) - if len(prefix) == 3: - suffix = result[m.end():] - else: - suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] - prefix = prefix[:3] - prefix = '.'.join([str(i) for i in prefix]) - suffix = suffix.strip() - if suffix: - #import pdb; pdb.set_trace() - # massage the suffix. - for pat, repl in _SUFFIX_REPLACEMENTS: - suffix = pat.sub(repl, suffix) - - if not suffix: - result = prefix - else: - sep = '-' if 'dev' in suffix else '+' - result = prefix + sep + suffix - if not is_semver(result): - result = None - return result - - -def _suggest_normalized_version(s): - """Suggest a normalized version close to the given version string. - - If you have a version string that isn't rational (i.e. NormalizedVersion - doesn't like it) then you might be able to get an equivalent (or close) - rational version from this function. - - This does a number of simple normalizations to the given string, based - on observation of versions currently in use on PyPI. Given a dump of - those version during PyCon 2009, 4287 of them: - - 2312 (53.93%) match NormalizedVersion without change - with the automatic suggestion - - 3474 (81.04%) match when using this suggestion method - - @param s {str} An irrational version string. - @returns A rational version string, or None, if couldn't determine one. - """ - try: - _normalized_key(s) - return s # already rational - except UnsupportedVersionError: - pass - - rs = s.lower() - - # part of this could use maketrans - for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), - ('beta', 'b'), ('rc', 'c'), ('-final', ''), - ('-pre', 'c'), - ('-release', ''), ('.release', ''), ('-stable', ''), - ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), - ('final', '')): - rs = rs.replace(orig, repl) - - # if something ends with dev or pre, we add a 0 - rs = re.sub(r"pre$", r"pre0", rs) - rs = re.sub(r"dev$", r"dev0", rs) - - # if we have something like "b-2" or "a.2" at the end of the - # version, that is probably beta, alpha, etc - # let's remove the dash or dot - rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) - - # 1.0-dev-r371 -> 1.0.dev371 - # 0.1-dev-r79 -> 0.1.dev79 - rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) - - # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 - rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) - - # Clean: v0.3, v1.0 - if rs.startswith('v'): - rs = rs[1:] - - # Clean leading '0's on numbers. - #TODO: unintended side-effect on, e.g., "2003.05.09" - # PyPI stats: 77 (~2%) better - rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) - - # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers - # zero. - # PyPI stats: 245 (7.56%) better - rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) - - # the 'dev-rNNN' tag is a dev tag - rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) - - # clean the - when used as a pre delimiter - rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) - - # a terminal "dev" or "devel" can be changed into ".dev0" - rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) - - # a terminal "dev" can be changed into ".dev0" - rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) - - # a terminal "final" or "stable" can be removed - rs = re.sub(r"(final|stable)$", "", rs) - - # The 'r' and the '-' tags are post release tags - # 0.4a1.r10 -> 0.4a1.post10 - # 0.9.33-17222 -> 0.9.33.post17222 - # 0.9.33-r17222 -> 0.9.33.post17222 - rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) - - # Clean 'r' instead of 'dev' usage: - # 0.9.33+r17222 -> 0.9.33.dev17222 - # 1.0dev123 -> 1.0.dev123 - # 1.0.git123 -> 1.0.dev123 - # 1.0.bzr123 -> 1.0.dev123 - # 0.1a0dev.123 -> 0.1a0.dev123 - # PyPI stats: ~150 (~4%) better - rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) - - # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: - # 0.2.pre1 -> 0.2c1 - # 0.2-c1 -> 0.2c1 - # 1.0preview123 -> 1.0c123 - # PyPI stats: ~21 (0.62%) better - rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) - - # Tcl/Tk uses "px" for their post release markers - rs = re.sub(r"p(\d+)$", r".post\1", rs) - - try: - _normalized_key(rs) - except UnsupportedVersionError: - rs = None - return rs - -# -# Legacy version processing (distribute-compatible) -# - -_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) -_VERSION_REPLACE = { - 'pre': 'c', - 'preview': 'c', - '-': 'final-', - 'rc': 'c', - 'dev': '@', - '': None, - '.': None, -} - - -def _legacy_key(s): - def get_parts(s): - result = [] - for p in _VERSION_PART.split(s.lower()): - p = _VERSION_REPLACE.get(p, p) - if p: - if '0' <= p[:1] <= '9': - p = p.zfill(8) - else: - p = '*' + p - result.append(p) - result.append('*final') - return result - - result = [] - for p in get_parts(s): - if p.startswith('*'): - if p < '*final': - while result and result[-1] == '*final-': - result.pop() - while result and result[-1] == '00000000': - result.pop() - result.append(p) - return tuple(result) - - -class LegacyVersion(Version): - def parse(self, s): - return _legacy_key(s) - - @property - def is_prerelease(self): - result = False - for x in self._parts: - if (isinstance(x, string_types) and x.startswith('*') and - x < '*final'): - result = True - break - return result - - -class LegacyMatcher(Matcher): - version_class = LegacyVersion - - _operators = dict(Matcher._operators) - _operators['~='] = '_match_compatible' - - numeric_re = re.compile(r'^(\d+(\.\d+)*)') - - def _match_compatible(self, version, constraint, prefix): - if version < constraint: - return False - m = self.numeric_re.match(str(constraint)) - if not m: - logger.warning('Cannot compute compatible match for version %s ' - ' and constraint %s', version, constraint) - return True - s = m.groups()[0] - if '.' in s: - s = s.rsplit('.', 1)[0] - return _match_prefix(version, s) - -# -# Semantic versioning -# - -_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' - r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' - r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) - - -def is_semver(s): - return _SEMVER_RE.match(s) - - -def _semantic_key(s): - def make_tuple(s, absent): - if s is None: - result = (absent,) - else: - parts = s[1:].split('.') - # We can't compare ints and strings on Python 3, so fudge it - # by zero-filling numeric values so simulate a numeric comparison - result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) - return result - - m = is_semver(s) - if not m: - raise UnsupportedVersionError(s) - groups = m.groups() - major, minor, patch = [int(i) for i in groups[:3]] - # choose the '|' and '*' so that versions sort correctly - pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') - return (major, minor, patch), pre, build - - -class SemanticVersion(Version): - def parse(self, s): - return _semantic_key(s) - - @property - def is_prerelease(self): - return self._parts[1][0] != '|' - - -class SemanticMatcher(Matcher): - version_class = SemanticVersion - - -class VersionScheme(object): - def __init__(self, key, matcher, suggester=None): - self.key = key - self.matcher = matcher - self.suggester = suggester - - def is_valid_version(self, s): - try: - self.matcher.version_class(s) - result = True - except UnsupportedVersionError: - result = False - return result - - def is_valid_matcher(self, s): - try: - self.matcher(s) - result = True - except UnsupportedVersionError: - result = False - return result - - def is_valid_constraint_list(self, s): - """ - Used for processing some metadata fields - """ - # See issue #140. Be tolerant of a single trailing comma. - if s.endswith(','): - s = s[:-1] - return self.is_valid_matcher('dummy_name (%s)' % s) - - def suggest(self, s): - if self.suggester is None: - result = None - else: - result = self.suggester(s) - return result - -_SCHEMES = { - 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, - _suggest_normalized_version), - 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), - 'semantic': VersionScheme(_semantic_key, SemanticMatcher, - _suggest_semantic_version), -} - -_SCHEMES['default'] = _SCHEMES['normalized'] - - -def get_scheme(name): - if name not in _SCHEMES: - raise ValueError('unknown scheme name: %r' % name) - return _SCHEMES[name] diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/progress_bar.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/progress_bar.py deleted file mode 100644 index 67361df2e49d48dd56c91e291ba92553e9afe344..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/progress_bar.py +++ /dev/null @@ -1,224 +0,0 @@ -import math -from functools import lru_cache -from time import monotonic -from typing import Iterable, List, Optional - -from .color import Color, blend_rgb -from .color_triplet import ColorTriplet -from .console import Console, ConsoleOptions, RenderResult -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment -from .style import Style, StyleType - -# Number of characters before 'pulse' animation repeats -PULSE_SIZE = 20 - - -class ProgressBar(JupyterMixin): - """Renders a (progress) bar. Used by rich.progress. - - Args: - total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation. - completed (float, optional): Number of steps completed. Defaults to 0. - width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. - pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time. - """ - - def __init__( - self, - total: Optional[float] = 100.0, - completed: float = 0, - width: Optional[int] = None, - pulse: bool = False, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - animation_time: Optional[float] = None, - ): - self.total = total - self.completed = completed - self.width = width - self.pulse = pulse - self.style = style - self.complete_style = complete_style - self.finished_style = finished_style - self.pulse_style = pulse_style - self.animation_time = animation_time - - self._pulse_segments: Optional[List[Segment]] = None - - def __repr__(self) -> str: - return f"" - - @property - def percentage_completed(self) -> Optional[float]: - """Calculate percentage complete.""" - if self.total is None: - return None - completed = (self.completed / self.total) * 100.0 - completed = min(100, max(0.0, completed)) - return completed - - @lru_cache(maxsize=16) - def _get_pulse_segments( - self, - fore_style: Style, - back_style: Style, - color_system: str, - no_color: bool, - ascii: bool = False, - ) -> List[Segment]: - """Get a list of segments to render a pulse animation. - - Returns: - List[Segment]: A list of segments, one segment per character. - """ - bar = "-" if ascii else "━" - segments: List[Segment] = [] - if color_system not in ("standard", "eight_bit", "truecolor") or no_color: - segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2) - segments += [Segment(" " if no_color else bar, back_style)] * ( - PULSE_SIZE - (PULSE_SIZE // 2) - ) - return segments - - append = segments.append - fore_color = ( - fore_style.color.get_truecolor() - if fore_style.color - else ColorTriplet(255, 0, 255) - ) - back_color = ( - back_style.color.get_truecolor() - if back_style.color - else ColorTriplet(0, 0, 0) - ) - cos = math.cos - pi = math.pi - _Segment = Segment - _Style = Style - from_triplet = Color.from_triplet - - for index in range(PULSE_SIZE): - position = index / PULSE_SIZE - fade = 0.5 + cos((position * pi * 2)) / 2.0 - color = blend_rgb(fore_color, back_color, cross_fade=fade) - append(_Segment(bar, _Style(color=from_triplet(color)))) - return segments - - def update(self, completed: float, total: Optional[float] = None) -> None: - """Update progress with new values. - - Args: - completed (float): Number of steps completed. - total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None. - """ - self.completed = completed - self.total = total if total is not None else self.total - - def _render_pulse( - self, console: Console, width: int, ascii: bool = False - ) -> Iterable[Segment]: - """Renders the pulse animation. - - Args: - console (Console): Console instance. - width (int): Width in characters of pulse animation. - - Returns: - RenderResult: [description] - - Yields: - Iterator[Segment]: Segments to render pulse - """ - fore_style = console.get_style(self.pulse_style, default="white") - back_style = console.get_style(self.style, default="black") - - pulse_segments = self._get_pulse_segments( - fore_style, back_style, console.color_system, console.no_color, ascii=ascii - ) - segment_count = len(pulse_segments) - current_time = ( - monotonic() if self.animation_time is None else self.animation_time - ) - segments = pulse_segments * (int(width / segment_count) + 2) - offset = int(-current_time * 15) % segment_count - segments = segments[offset : offset + width] - yield from segments - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - - width = min(self.width or options.max_width, options.max_width) - ascii = options.legacy_windows or options.ascii_only - should_pulse = self.pulse or self.total is None - if should_pulse: - yield from self._render_pulse(console, width, ascii=ascii) - return - - completed: Optional[float] = ( - min(self.total, max(0, self.completed)) if self.total is not None else None - ) - - bar = "-" if ascii else "━" - half_bar_right = " " if ascii else "╸" - half_bar_left = " " if ascii else "╺" - complete_halves = ( - int(width * 2 * completed / self.total) - if self.total and completed is not None - else width * 2 - ) - bar_count = complete_halves // 2 - half_bar_count = complete_halves % 2 - style = console.get_style(self.style) - is_finished = self.total is None or self.completed >= self.total - complete_style = console.get_style( - self.finished_style if is_finished else self.complete_style - ) - _Segment = Segment - if bar_count: - yield _Segment(bar * bar_count, complete_style) - if half_bar_count: - yield _Segment(half_bar_right * half_bar_count, complete_style) - - if not console.no_color: - remaining_bars = width - bar_count - half_bar_count - if remaining_bars and console.color_system is not None: - if not half_bar_count and bar_count: - yield _Segment(half_bar_left, style) - remaining_bars -= 1 - if remaining_bars: - yield _Segment(bar * remaining_bars, style) - - def __rich_measure__( - self, console: Console, options: ConsoleOptions - ) -> Measurement: - return ( - Measurement(self.width, self.width) - if self.width is not None - else Measurement(4, options.max_width) - ) - - -if __name__ == "__main__": # pragma: no cover - console = Console() - bar = ProgressBar(width=50, total=100) - - import time - - console.show_cursor(False) - for n in range(0, 101, 1): - bar.update(n) - console.print(bar) - console.file.write("\r") - time.sleep(0.05) - console.show_cursor(True) - console.print() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/futures.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/futures.py deleted file mode 100644 index 39e071fb6023eb3c17d1fdd91f030f3c15fa7b2f..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/futures.py +++ /dev/null @@ -1,606 +0,0 @@ -# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import copy -import logging -import sys -import threading -from collections import namedtuple -from concurrent import futures - -from s3transfer.compat import MAXINT -from s3transfer.exceptions import CancelledError, TransferNotDoneError -from s3transfer.utils import FunctionContainer, TaskSemaphore - -logger = logging.getLogger(__name__) - - -class BaseTransferFuture: - @property - def meta(self): - """The metadata associated to the TransferFuture""" - raise NotImplementedError('meta') - - def done(self): - """Determines if a TransferFuture has completed - - :returns: True if completed. False, otherwise. - """ - raise NotImplementedError('done()') - - def result(self): - """Waits until TransferFuture is done and returns the result - - If the TransferFuture succeeded, it will return the result. If the - TransferFuture failed, it will raise the exception associated to the - failure. - """ - raise NotImplementedError('result()') - - def cancel(self): - """Cancels the request associated with the TransferFuture""" - raise NotImplementedError('cancel()') - - -class BaseTransferMeta: - @property - def call_args(self): - """The call args used in the transfer request""" - raise NotImplementedError('call_args') - - @property - def transfer_id(self): - """The unique id of the transfer""" - raise NotImplementedError('transfer_id') - - @property - def user_context(self): - """A dictionary that requesters can store data in""" - raise NotImplementedError('user_context') - - -class TransferFuture(BaseTransferFuture): - def __init__(self, meta=None, coordinator=None): - """The future associated to a submitted transfer request - - :type meta: TransferMeta - :param meta: The metadata associated to the request. This object - is visible to the requester. - - :type coordinator: TransferCoordinator - :param coordinator: The coordinator associated to the request. This - object is not visible to the requester. - """ - self._meta = meta - if meta is None: - self._meta = TransferMeta() - - self._coordinator = coordinator - if coordinator is None: - self._coordinator = TransferCoordinator() - - @property - def meta(self): - return self._meta - - def done(self): - return self._coordinator.done() - - def result(self): - try: - # Usually the result() method blocks until the transfer is done, - # however if a KeyboardInterrupt is raised we want want to exit - # out of this and propagate the exception. - return self._coordinator.result() - except KeyboardInterrupt as e: - self.cancel() - raise e - - def cancel(self): - self._coordinator.cancel() - - def set_exception(self, exception): - """Sets the exception on the future.""" - if not self.done(): - raise TransferNotDoneError( - 'set_exception can only be called once the transfer is ' - 'complete.' - ) - self._coordinator.set_exception(exception, override=True) - - -class TransferMeta(BaseTransferMeta): - """Holds metadata about the TransferFuture""" - - def __init__(self, call_args=None, transfer_id=None): - self._call_args = call_args - self._transfer_id = transfer_id - self._size = None - self._user_context = {} - - @property - def call_args(self): - """The call args used in the transfer request""" - return self._call_args - - @property - def transfer_id(self): - """The unique id of the transfer""" - return self._transfer_id - - @property - def size(self): - """The size of the transfer request if known""" - return self._size - - @property - def user_context(self): - """A dictionary that requesters can store data in""" - return self._user_context - - def provide_transfer_size(self, size): - """A method to provide the size of a transfer request - - By providing this value, the TransferManager will not try to - call HeadObject or use the use OS to determine the size of the - transfer. - """ - self._size = size - - -class TransferCoordinator: - """A helper class for managing TransferFuture""" - - def __init__(self, transfer_id=None): - self.transfer_id = transfer_id - self._status = 'not-started' - self._result = None - self._exception = None - self._associated_futures = set() - self._failure_cleanups = [] - self._done_callbacks = [] - self._done_event = threading.Event() - self._lock = threading.Lock() - self._associated_futures_lock = threading.Lock() - self._done_callbacks_lock = threading.Lock() - self._failure_cleanups_lock = threading.Lock() - - def __repr__(self): - return '{}(transfer_id={})'.format( - self.__class__.__name__, self.transfer_id - ) - - @property - def exception(self): - return self._exception - - @property - def associated_futures(self): - """The list of futures associated to the inprogress TransferFuture - - Once the transfer finishes this list becomes empty as the transfer - is considered done and there should be no running futures left. - """ - with self._associated_futures_lock: - # We return a copy of the list because we do not want to - # processing the returned list while another thread is adding - # more futures to the actual list. - return copy.copy(self._associated_futures) - - @property - def failure_cleanups(self): - """The list of callbacks to call when the TransferFuture fails""" - return self._failure_cleanups - - @property - def status(self): - """The status of the TransferFuture - - The currently supported states are: - * not-started - Has yet to start. If in this state, a transfer - can be canceled immediately and nothing will happen. - * queued - SubmissionTask is about to submit tasks - * running - Is inprogress. In-progress as of now means that - the SubmissionTask that runs the transfer is being executed. So - there is no guarantee any transfer requests had been made to - S3 if this state is reached. - * cancelled - Was cancelled - * failed - An exception other than CancelledError was thrown - * success - No exceptions were thrown and is done. - """ - return self._status - - def set_result(self, result): - """Set a result for the TransferFuture - - Implies that the TransferFuture succeeded. This will always set a - result because it is invoked on the final task where there is only - ever one final task and it is ran at the very end of a transfer - process. So if a result is being set for this final task, the transfer - succeeded even if something came a long and canceled the transfer - on the final task. - """ - with self._lock: - self._exception = None - self._result = result - self._status = 'success' - - def set_exception(self, exception, override=False): - """Set an exception for the TransferFuture - - Implies the TransferFuture failed. - - :param exception: The exception that cause the transfer to fail. - :param override: If True, override any existing state. - """ - with self._lock: - if not self.done() or override: - self._exception = exception - self._status = 'failed' - - def result(self): - """Waits until TransferFuture is done and returns the result - - If the TransferFuture succeeded, it will return the result. If the - TransferFuture failed, it will raise the exception associated to the - failure. - """ - # Doing a wait() with no timeout cannot be interrupted in python2 but - # can be interrupted in python3 so we just wait with the largest - # possible value integer value, which is on the scale of billions of - # years... - self._done_event.wait(MAXINT) - - # Once done waiting, raise an exception if present or return the - # final result. - if self._exception: - raise self._exception - return self._result - - def cancel(self, msg='', exc_type=CancelledError): - """Cancels the TransferFuture - - :param msg: The message to attach to the cancellation - :param exc_type: The type of exception to set for the cancellation - """ - with self._lock: - if not self.done(): - should_announce_done = False - logger.debug('%s cancel(%s) called', self, msg) - self._exception = exc_type(msg) - if self._status == 'not-started': - should_announce_done = True - self._status = 'cancelled' - if should_announce_done: - self.announce_done() - - def set_status_to_queued(self): - """Sets the TransferFutrue's status to running""" - self._transition_to_non_done_state('queued') - - def set_status_to_running(self): - """Sets the TransferFuture's status to running""" - self._transition_to_non_done_state('running') - - def _transition_to_non_done_state(self, desired_state): - with self._lock: - if self.done(): - raise RuntimeError( - 'Unable to transition from done state %s to non-done ' - 'state %s.' % (self.status, desired_state) - ) - self._status = desired_state - - def submit(self, executor, task, tag=None): - """Submits a task to a provided executor - - :type executor: s3transfer.futures.BoundedExecutor - :param executor: The executor to submit the callable to - - :type task: s3transfer.tasks.Task - :param task: The task to submit to the executor - - :type tag: s3transfer.futures.TaskTag - :param tag: A tag to associate to the submitted task - - :rtype: concurrent.futures.Future - :returns: A future representing the submitted task - """ - logger.debug( - "Submitting task {} to executor {} for transfer request: {}.".format( - task, executor, self.transfer_id - ) - ) - future = executor.submit(task, tag=tag) - # Add this created future to the list of associated future just - # in case it is needed during cleanups. - self.add_associated_future(future) - future.add_done_callback( - FunctionContainer(self.remove_associated_future, future) - ) - return future - - def done(self): - """Determines if a TransferFuture has completed - - :returns: False if status is equal to 'failed', 'cancelled', or - 'success'. True, otherwise - """ - return self.status in ['failed', 'cancelled', 'success'] - - def add_associated_future(self, future): - """Adds a future to be associated with the TransferFuture""" - with self._associated_futures_lock: - self._associated_futures.add(future) - - def remove_associated_future(self, future): - """Removes a future's association to the TransferFuture""" - with self._associated_futures_lock: - self._associated_futures.remove(future) - - def add_done_callback(self, function, *args, **kwargs): - """Add a done callback to be invoked when transfer is done""" - with self._done_callbacks_lock: - self._done_callbacks.append( - FunctionContainer(function, *args, **kwargs) - ) - - def add_failure_cleanup(self, function, *args, **kwargs): - """Adds a callback to call upon failure""" - with self._failure_cleanups_lock: - self._failure_cleanups.append( - FunctionContainer(function, *args, **kwargs) - ) - - def announce_done(self): - """Announce that future is done running and run associated callbacks - - This will run any failure cleanups if the transfer failed if not - they have not been run, allows the result() to be unblocked, and will - run any done callbacks associated to the TransferFuture if they have - not already been ran. - """ - if self.status != 'success': - self._run_failure_cleanups() - self._done_event.set() - self._run_done_callbacks() - - def _run_done_callbacks(self): - # Run the callbacks and remove the callbacks from the internal - # list so they do not get ran again if done is announced more than - # once. - with self._done_callbacks_lock: - self._run_callbacks(self._done_callbacks) - self._done_callbacks = [] - - def _run_failure_cleanups(self): - # Run the cleanup callbacks and remove the callbacks from the internal - # list so they do not get ran again if done is announced more than - # once. - with self._failure_cleanups_lock: - self._run_callbacks(self.failure_cleanups) - self._failure_cleanups = [] - - def _run_callbacks(self, callbacks): - for callback in callbacks: - self._run_callback(callback) - - def _run_callback(self, callback): - try: - callback() - # We do not want a callback interrupting the process, especially - # in the failure cleanups. So log and catch, the exception. - except Exception: - logger.debug("Exception raised in %s." % callback, exc_info=True) - - -class BoundedExecutor: - EXECUTOR_CLS = futures.ThreadPoolExecutor - - def __init__( - self, max_size, max_num_threads, tag_semaphores=None, executor_cls=None - ): - """An executor implementation that has a maximum queued up tasks - - The executor will block if the number of tasks that have been - submitted and is currently working on is past its maximum. - - :params max_size: The maximum number of inflight futures. An inflight - future means that the task is either queued up or is currently - being executed. A size of None or 0 means that the executor will - have no bound in terms of the number of inflight futures. - - :params max_num_threads: The maximum number of threads the executor - uses. - - :type tag_semaphores: dict - :params tag_semaphores: A dictionary where the key is the name of the - tag and the value is the semaphore to use when limiting the - number of tasks the executor is processing at a time. - - :type executor_cls: BaseExecutor - :param underlying_executor_cls: The executor class that - get bounded by this executor. If None is provided, the - concurrent.futures.ThreadPoolExecutor class is used. - """ - self._max_num_threads = max_num_threads - if executor_cls is None: - executor_cls = self.EXECUTOR_CLS - self._executor = executor_cls(max_workers=self._max_num_threads) - self._semaphore = TaskSemaphore(max_size) - self._tag_semaphores = tag_semaphores - - def submit(self, task, tag=None, block=True): - """Submit a task to complete - - :type task: s3transfer.tasks.Task - :param task: The task to run __call__ on - - - :type tag: s3transfer.futures.TaskTag - :param tag: An optional tag to associate to the task. This - is used to override which semaphore to use. - - :type block: boolean - :param block: True if to wait till it is possible to submit a task. - False, if not to wait and raise an error if not able to submit - a task. - - :returns: The future associated to the submitted task - """ - semaphore = self._semaphore - # If a tag was provided, use the semaphore associated to that - # tag. - if tag: - semaphore = self._tag_semaphores[tag] - - # Call acquire on the semaphore. - acquire_token = semaphore.acquire(task.transfer_id, block) - # Create a callback to invoke when task is done in order to call - # release on the semaphore. - release_callback = FunctionContainer( - semaphore.release, task.transfer_id, acquire_token - ) - # Submit the task to the underlying executor. - future = ExecutorFuture(self._executor.submit(task)) - # Add the Semaphore.release() callback to the future such that - # it is invoked once the future completes. - future.add_done_callback(release_callback) - return future - - def shutdown(self, wait=True): - self._executor.shutdown(wait) - - -class ExecutorFuture: - def __init__(self, future): - """A future returned from the executor - - Currently, it is just a wrapper around a concurrent.futures.Future. - However, this can eventually grow to implement the needed functionality - of concurrent.futures.Future if we move off of the library and not - affect the rest of the codebase. - - :type future: concurrent.futures.Future - :param future: The underlying future - """ - self._future = future - - def result(self): - return self._future.result() - - def add_done_callback(self, fn): - """Adds a callback to be completed once future is done - - :param fn: A callable that takes no arguments. Note that is different - than concurrent.futures.Future.add_done_callback that requires - a single argument for the future. - """ - # The done callback for concurrent.futures.Future will always pass a - # the future in as the only argument. So we need to create the - # proper signature wrapper that will invoke the callback provided. - def done_callback(future_passed_to_callback): - return fn() - - self._future.add_done_callback(done_callback) - - def done(self): - return self._future.done() - - -class BaseExecutor: - """Base Executor class implementation needed to work with s3transfer""" - - def __init__(self, max_workers=None): - pass - - def submit(self, fn, *args, **kwargs): - raise NotImplementedError('submit()') - - def shutdown(self, wait=True): - raise NotImplementedError('shutdown()') - - -class NonThreadedExecutor(BaseExecutor): - """A drop-in replacement non-threaded version of ThreadPoolExecutor""" - - def submit(self, fn, *args, **kwargs): - future = NonThreadedExecutorFuture() - try: - result = fn(*args, **kwargs) - future.set_result(result) - except Exception: - e, tb = sys.exc_info()[1:] - logger.debug( - 'Setting exception for %s to %s with traceback %s', - future, - e, - tb, - ) - future.set_exception_info(e, tb) - return future - - def shutdown(self, wait=True): - pass - - -class NonThreadedExecutorFuture: - """The Future returned from NonThreadedExecutor - - Note that this future is **not** thread-safe as it is being used - from the context of a non-threaded environment. - """ - - def __init__(self): - self._result = None - self._exception = None - self._traceback = None - self._done = False - self._done_callbacks = [] - - def set_result(self, result): - self._result = result - self._set_done() - - def set_exception_info(self, exception, traceback): - self._exception = exception - self._traceback = traceback - self._set_done() - - def result(self, timeout=None): - if self._exception: - raise self._exception.with_traceback(self._traceback) - return self._result - - def _set_done(self): - self._done = True - for done_callback in self._done_callbacks: - self._invoke_done_callback(done_callback) - self._done_callbacks = [] - - def _invoke_done_callback(self, done_callback): - return done_callback(self) - - def done(self): - return self._done - - def add_done_callback(self, fn): - if self._done: - self._invoke_done_callback(fn) - else: - self._done_callbacks.append(fn) - - -TaskTag = namedtuple('TaskTag', ['name']) - -IN_MEMORY_UPLOAD_TAG = TaskTag('in_memory_upload') -IN_MEMORY_DOWNLOAD_TAG = TaskTag('in_memory_download') diff --git a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip_old.py b/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip_old.py deleted file mode 100644 index 4532ca932a36f46321c43c76599a5ae765b82b7e..0000000000000000000000000000000000000000 --- a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip_old.py +++ /dev/null @@ -1,140 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from typing import Union, List - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from CLIP.model import build_model -from CLIP.simple_tokenizer import SimpleTokenizer as _Tokenizer - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", -} - - -def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: - return download_target - else: - warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: - raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") - - return download_target - - -def available_models(): - return list(_MODELS.keys()) - - -def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True): - if name not in _MODELS: - raise RuntimeError(f"Model {name} not found; available models = {available_models()}") - - model_path = _download(_MODELS[name]) - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - n_px = model.input_resolution.item() - - transform = Compose([ - Resize(n_px, interpolation=Image.BICUBIC), - CenterCrop(n_px), - lambda image: image.convert("RGB"), - ToTensor(), - Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ]) - - if not jit: - print("get Model.....") - model = build_model(model.state_dict()).to(device) - return model, transform - - # patch the device names - device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) - device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] - - def patch_device(module): - graphs = [module.graph] if hasattr(module, "graph") else [] - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if device == "cpu": - float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - graphs = [module.graph] if hasattr(module, "graph") else [] - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [1, 2]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, transform - - -def tokenize(texts: Union[str, List[str]], context_length: int = 77): - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") - result[i, :len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/internal/decompose.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/internal/decompose.h deleted file mode 100644 index e949f202485e7356dd1296c258a26bdd28e40840..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/internal/decompose.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace internal -{ - - template - class index_range - { - public: - typedef IndexType index_type; - - __host__ __device__ - index_range(index_type begin, index_type end) : m_begin(begin), m_end(end) {} - - __host__ __device__ - index_type begin(void) const { return m_begin; } - - __host__ __device__ - index_type end(void) const { return m_end; } - - __host__ __device__ - index_type size(void) const { return m_end - m_begin; } - - private: - index_type m_begin; - index_type m_end; - }; - - template - class uniform_decomposition - { - public: - typedef IndexType index_type; - typedef index_range range_type; - - __host__ __device__ - uniform_decomposition(index_type N, index_type granularity, index_type max_intervals) - : m_N(N), - m_intervals((N + granularity - 1) / granularity), - m_threshold(0), - m_small_interval(granularity), - m_large_interval(0) - { - if(m_intervals > max_intervals) - { - m_small_interval = granularity * (m_intervals / max_intervals); - m_large_interval = m_small_interval + granularity; - m_threshold = m_intervals % max_intervals; - m_intervals = max_intervals; - } - } - - __host__ __device__ - index_range operator[](const index_type& i) const - { - if (i < m_threshold) - { - index_type begin = m_large_interval * i; - index_type end = begin + m_large_interval; - return range_type(begin, end); - } - else - { - index_type begin = m_large_interval * m_threshold + m_small_interval * (i - m_threshold); - index_type end = (begin + m_small_interval < m_N) ? begin + m_small_interval : m_N; - return range_type(begin, end); - } - } - - __host__ __device__ - index_type size(void) const - { - return m_intervals; - } - - private: - - index_type m_N; - index_type m_intervals; - index_type m_threshold; - index_type m_small_interval; - index_type m_large_interval; - }; - - -} // end namespace internal -} // end namespace detail -} // end namespace system -} // end namespace thrust - diff --git a/spaces/CVPR/regionclip-demo/detectron2/evaluation/testing.py b/spaces/CVPR/regionclip-demo/detectron2/evaluation/testing.py deleted file mode 100644 index 9e5ae625bb0593fc20739dd3ea549157e4df4f3d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/evaluation/testing.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -import pprint -import sys -from collections.abc import Mapping - - -def print_csv_format(results): - """ - Print main metrics in a format similar to Detectron, - so that they are easy to copypaste into a spreadsheet. - - Args: - results (OrderedDict[dict]): task_name -> {metric -> score} - unordered dict can also be printed, but in arbitrary order - """ - assert isinstance(results, Mapping) or not len(results), results - logger = logging.getLogger(__name__) - for task, res in results.items(): - if isinstance(res, Mapping): - # Don't print "AP-category" metrics since they are usually not tracked. - important_res = [(k, v) for k, v in res.items() if "-" not in k] - logger.info("copypaste: Task: {}".format(task)) - logger.info("copypaste: " + ",".join([k[0] for k in important_res])) - logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) - else: - logger.info(f"copypaste: {task}={res}") - - -def verify_results(cfg, results): - """ - Args: - results (OrderedDict[dict]): task_name -> {metric -> score} - - Returns: - bool: whether the verification succeeds or not - """ - expected_results = cfg.TEST.EXPECTED_RESULTS - if not len(expected_results): - return True - - ok = True - for task, metric, expected, tolerance in expected_results: - actual = results[task].get(metric, None) - if actual is None: - ok = False - continue - if not np.isfinite(actual): - ok = False - continue - diff = abs(actual - expected) - if diff > tolerance: - ok = False - - logger = logging.getLogger(__name__) - if not ok: - logger.error("Result verification failed!") - logger.error("Expected Results: " + str(expected_results)) - logger.error("Actual Results: " + pprint.pformat(results)) - - sys.exit(1) - else: - logger.info("Results verification passed.") - return ok - - -def flatten_results_dict(results): - """ - Expand a hierarchical dict of scalars into a flat dict of scalars. - If results[k1][k2][k3] = v, the returned dict will have the entry - {"k1/k2/k3": v}. - - Args: - results (dict): - """ - r = {} - for k, v in results.items(): - if isinstance(v, Mapping): - v = flatten_results_dict(v) - for kk, vv in v.items(): - r[k + "/" + kk] = vv - else: - r[k] = v - return r diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/registry.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/registry.py deleted file mode 100644 index 8991272a6e2294ea86eee338cf61d87e4123f724..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/registry.py +++ /dev/null @@ -1,18 +0,0 @@ -_lang_encoders = {} - - -def register_lang_encoder(fn): - module_name_split = fn.__module__.split('.') - model_name = module_name_split[-1] - - _lang_encoders[model_name] = fn - - return fn - - -def lang_encoders(model_name): - return _lang_encoders[model_name] - - -def is_lang_encoder(model_name): - return model_name in _lang_encoders diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/prod_cons.h b/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/__init__.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DEBO-PROJECT/DEBO-V1/bots/debate_bot.py b/spaces/DEBO-PROJECT/DEBO-V1/bots/debate_bot.py deleted file mode 100644 index 9c6d5b055e68664a8fc2e9e83c522881d4b727bb..0000000000000000000000000000000000000000 --- a/spaces/DEBO-PROJECT/DEBO-V1/bots/debate_bot.py +++ /dev/null @@ -1,27 +0,0 @@ -import re -import random -from langchain.prompts import PromptTemplate -from modules.gpt_modules import gpt_call - -from .normal_debate import nomal_debator -from .one_to_one_debate import one_to_one_debator - - -############################################# -# Debate bot setting -############################################# -def debate_bot(prompt, history="", debate_subject="", bot_role="", history_num=0): - - if bot_role == "토론": - #bot_response = nomal_debator(prompt, history, debate_subject, bot_role, history_num) - bot_response = one_to_one_debator(prompt, history, debate_subject, bot_role, history_num) - elif bot_role == "주제 정의": - pass - elif bot_role == "POI 연습": - pass - elif bot_role == "역할 추천": - pass - elif bot_role == "주장 비판": - pass - else: - print("bot_role error") \ No newline at end of file diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/iou.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/iou.py deleted file mode 100644 index 0e4004302f50b9a55561be617d80051b55e0ff44..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/iou.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -@date: 2021/6/29 -@description: -The method with "_floorplan" suffix is only for comparison, which is used for calculation in LED2-net. -However, the floorplan is affected by show_radius. Setting too large will result in the decrease of accuracy, -and setting too small will result in the failure of calculation beyond the range. -""" -import numpy as np -from shapely.geometry import Polygon - - -def calc_inter_area(dt_xz, gt_xz): - """ - :param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :return: - """ - dt_polygon = Polygon(dt_xz) - gt_polygon = Polygon(gt_xz) - - dt_area = dt_polygon.area - gt_area = gt_polygon.area - inter_area = dt_polygon.intersection(gt_polygon).area - return dt_area, gt_area, inter_area - - -def calc_IoU_2D(dt_xz, gt_xz): - """ - :param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :return: - """ - dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz) - iou_2d = inter_area / (gt_area + dt_area - inter_area) - return iou_2d - - -def calc_IoU_3D(dt_xz, gt_xz, dt_height, gt_height): - """ - :param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param dt_height: - :param gt_height: - :return: - """ - dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz) - dt_volume = dt_area * dt_height - gt_volume = gt_area * gt_height - inter_volume = inter_area * min(dt_height, gt_height) - iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume) - return iou_3d - - -def calc_IoU(dt_xz, gt_xz, dt_height, gt_height): - """ - :param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...] - :param dt_height: - :param gt_height: - :return: - """ - dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz) - iou_2d = inter_area / (gt_area + dt_area - inter_area) - - dt_volume = dt_area * dt_height - gt_volume = gt_area * gt_height - inter_volume = inter_area * min(dt_height, gt_height) - iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume) - - return iou_2d, iou_3d - - -def calc_Iou_height(dt_height, gt_height): - return min(dt_height, gt_height) / max(dt_height, gt_height) - - -# the following is for testing only -def calc_inter_area_floorplan(dt_floorplan, gt_floorplan): - intersect = np.sum(np.logical_and(dt_floorplan, gt_floorplan)) - dt_area = np.sum(dt_floorplan) - gt_area = np.sum(gt_floorplan) - return dt_area, gt_area, intersect - - -def calc_IoU_2D_floorplan(dt_floorplan, gt_floorplan): - dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan) - iou_2d = inter_area / (gt_area + dt_area - inter_area) - return iou_2d - - -def calc_IoU_3D_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height): - dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan) - dt_volume = dt_area * dt_height - gt_volume = gt_area * gt_height - inter_volume = inter_area * min(dt_height, gt_height) - iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume) - return iou_3d - - -def calc_IoU_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height): - dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan) - iou_2d = inter_area / (gt_area + dt_area - inter_area) - - dt_volume = dt_area * dt_height - gt_volume = gt_area * gt_height - inter_volume = inter_area * min(dt_height, gt_height) - iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume) - return iou_2d, iou_3d - - -if __name__ == '__main__': - from visualization.floorplan import draw_floorplan, draw_iou_floorplan - from visualization.boundary import draw_boundaries, corners2boundaries - from utils.conversion import uv2xyz - from utils.height import height2ratio - - # dummy data - dt_floor_corners = np.array([[0.2, 0.7], - [0.4, 0.7], - [0.6, 0.7], - [0.8, 0.7]]) - dt_height = 2.8 - - gt_floor_corners = np.array([[0.3, 0.7], - [0.5, 0.7], - [0.7, 0.7], - [0.9, 0.7]]) - gt_height = 3.2 - - dt_xz = uv2xyz(dt_floor_corners)[..., ::2] - gt_xz = uv2xyz(gt_floor_corners)[..., ::2] - - dt_floorplan = draw_floorplan(dt_xz, show=False, show_radius=1) - gt_floorplan = draw_floorplan(gt_xz, show=False, show_radius=1) - # dt_floorplan = draw_floorplan(dt_xz, show=False, show_radius=2) - # gt_floorplan = draw_floorplan(gt_xz, show=False, show_radius=2) - - iou_2d, iou_3d = calc_IoU_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height) - print('use floor plan image:', iou_2d, iou_3d) - - iou_2d, iou_3d = calc_IoU(dt_xz, gt_xz, dt_height, gt_height) - print('use floor plan polygon:', iou_2d, iou_3d) - - draw_iou_floorplan(dt_xz, gt_xz, show=True, iou_2d=iou_2d, iou_3d=iou_3d) - pano_bd = draw_boundaries(np.zeros([512, 1024, 3]), corners_list=[dt_floor_corners], - boundary_color=[0, 0, 1], ratio=height2ratio(dt_height), draw_corners=False) - pano_bd = draw_boundaries(pano_bd, corners_list=[gt_floor_corners], - boundary_color=[0, 1, 0], ratio=height2ratio(gt_height), show=True, draw_corners=False) diff --git a/spaces/Datasculptor/MusicGen/audiocraft/models/builders.py b/spaces/Datasculptor/MusicGen/audiocraft/models/builders.py deleted file mode 100644 index 77ee5f96fea2e3c9e475fe961bc1a5ee473ed8eb..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/audiocraft/models/builders.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -All the functions to build the relevant models and modules -from the Hydra config. -""" - -import typing as tp -import warnings - -import audiocraft -import omegaconf -import torch - -from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa -from .lm import LMModel -from ..modules.codebooks_patterns import ( - CodebooksPatternProvider, - DelayedPatternProvider, - ParallelPatternProvider, - UnrolledPatternProvider, - VALLEPattern, - MusicLMPattern, -) -from ..modules.conditioners import ( - BaseConditioner, - ConditioningProvider, - LUTConditioner, - T5Conditioner, - ConditionFuser, - ChromaStemConditioner, -) -from .. import quantization as qt -from ..utils.utils import dict_from_config - - -def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer: - klass = { - 'no_quant': qt.DummyQuantizer, - 'rvq': qt.ResidualVectorQuantizer - }[quantizer] - kwargs = dict_from_config(getattr(cfg, quantizer)) - if quantizer != 'no_quant': - kwargs['dimension'] = dimension - return klass(**kwargs) - - -def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig): - if encoder_name == 'seanet': - kwargs = dict_from_config(getattr(cfg, 'seanet')) - encoder_override_kwargs = kwargs.pop('encoder') - decoder_override_kwargs = kwargs.pop('decoder') - encoder_kwargs = {**kwargs, **encoder_override_kwargs} - decoder_kwargs = {**kwargs, **decoder_override_kwargs} - encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs) - decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs) - return encoder, decoder - else: - raise KeyError(f'Unexpected compression model {cfg.compression_model}') - - -def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel: - """Instantiate a compression model. - """ - if cfg.compression_model == 'encodec': - kwargs = dict_from_config(getattr(cfg, 'encodec')) - encoder_name = kwargs.pop('autoencoder') - quantizer_name = kwargs.pop('quantizer') - encoder, decoder = get_encodec_autoencoder(encoder_name, cfg) - quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension) - frame_rate = kwargs['sample_rate'] // encoder.hop_length - renormalize = kwargs.pop('renormalize', None) - renorm = kwargs.pop('renorm') - if renormalize is None: - renormalize = renorm is not None - warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.") - return EncodecModel(encoder, decoder, quantizer, - frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device) - else: - raise KeyError(f'Unexpected compression model {cfg.compression_model}') - - -def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel: - """Instantiate a transformer LM. - """ - if cfg.lm_model == 'transformer_lm': - kwargs = dict_from_config(getattr(cfg, 'transformer_lm')) - n_q = kwargs['n_q'] - q_modeling = kwargs.pop('q_modeling', None) - codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern') - attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout')) - cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance')) - cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"] - fuser = get_condition_fuser(cfg) - condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device) - if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically - kwargs['cross_attention'] = True - if codebooks_pattern_cfg.modeling is None: - assert q_modeling is not None, \ - 'LM model should either have a codebook pattern defined or transformer_lm.q_modeling' - codebooks_pattern_cfg = omegaconf.OmegaConf.create( - {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}} - ) - pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg) - return LMModel( - pattern_provider=pattern_provider, - condition_provider=condition_provider, - fuser=fuser, - cfg_dropout=cfg_prob, - cfg_coef=cfg_coef, - attribute_dropout=attribute_dropout, - dtype=getattr(torch, cfg.dtype), - device=cfg.device, - **kwargs - ).to(cfg.device) - else: - raise KeyError(f'Unexpected LM model {cfg.lm_model}') - - -def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider: - """Instantiate a conditioning model. - """ - device = cfg.device - duration = cfg.dataset.segment_duration - cfg = getattr(cfg, "conditioners") - cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg - conditioners: tp.Dict[str, BaseConditioner] = {} - with omegaconf.open_dict(cfg): - condition_provider_args = cfg.pop('args', {}) - for cond, cond_cfg in cfg.items(): - model_type = cond_cfg["model"] - model_args = cond_cfg[model_type] - if model_type == "t5": - conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args) - elif model_type == "lut": - conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args) - elif model_type == "chroma_stem": - model_args.pop('cache_path', None) - conditioners[str(cond)] = ChromaStemConditioner( - output_dim=output_dim, - duration=duration, - device=device, - **model_args - ) - else: - raise ValueError(f"unrecognized conditioning model: {model_type}") - conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args) - return conditioner - - -def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser: - """Instantiate a condition fuser object. - """ - fuser_cfg = getattr(cfg, "fuser") - fuser_methods = ["sum", "cross", "prepend", "input_interpolate"] - fuse2cond = {k: fuser_cfg[k] for k in fuser_methods} - kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods} - fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs) - return fuser - - -def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider: - """Instantiate a codebooks pattern provider object. - """ - pattern_providers = { - 'parallel': ParallelPatternProvider, - 'delay': DelayedPatternProvider, - 'unroll': UnrolledPatternProvider, - 'valle': VALLEPattern, - 'musiclm': MusicLMPattern, - } - name = cfg.modeling - kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {} - klass = pattern_providers[name] - return klass(n_q, **kwargs) - - -def get_debug_compression_model(device='cpu'): - """Instantiate a debug compression model to be used for unit tests. - """ - seanet_kwargs = { - 'n_filters': 4, - 'n_residual_layers': 1, - 'dimension': 32, - 'ratios': [10, 8, 16] # 25 Hz at 32kHz - } - encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs) - decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs) - quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4) - init_x = torch.randn(8, 32, 128) - quantizer(init_x, 1) # initialize kmeans etc. - compression_model = EncodecModel( - encoder, decoder, quantizer, - frame_rate=25, sample_rate=32000, channels=1).to(device) - return compression_model.eval() - - -def get_debug_lm_model(device='cpu'): - """Instantiate a debug LM to be used for unit tests. - """ - pattern = DelayedPatternProvider(n_q=4) - dim = 16 - providers = { - 'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"), - } - condition_provider = ConditioningProvider(providers) - fuser = ConditionFuser( - {'cross': ['description'], 'prepend': [], - 'sum': [], 'input_interpolate': []}) - lm = LMModel( - pattern, condition_provider, fuser, - n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2, - cross_attention=True, causal=True) - return lm.to(device).eval() diff --git a/spaces/Deci/DeciCoder-Demo/README.md b/spaces/Deci/DeciCoder-Demo/README.md deleted file mode 100644 index 798306e6b0f5c702be48cffb5d9f9d8767d3a05f..0000000000000000000000000000000000000000 --- a/spaces/Deci/DeciCoder-Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DeciCoder Demo -emoji: 🏎️ -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DeepLabCut/MegaDetector_DeepLabCut/viz_utils.py b/spaces/DeepLabCut/MegaDetector_DeepLabCut/viz_utils.py deleted file mode 100644 index ea282f2adf28826040b442c9f7d8c1baa3b56bb5..0000000000000000000000000000000000000000 --- a/spaces/DeepLabCut/MegaDetector_DeepLabCut/viz_utils.py +++ /dev/null @@ -1,192 +0,0 @@ -import json -import numpy as np - -from matplotlib import cm -import matplotlib -from PIL import Image, ImageColor, ImageFont, ImageDraw -import numpy as np -import pdb -from datetime import date -today = date.today() -FONTS = {'amiko': "fonts/Amiko-Regular.ttf", - 'nature': "fonts/LoveNature.otf", - 'painter':"fonts/PainterDecorator.otf", - 'animals': "fonts/UncialAnimals.ttf", - 'zen': "fonts/ZEN.TTF"} - -######################################### -# Draw keypoints on image -def draw_keypoints_on_image(image, - keypoints, - map_label_id_to_str, - flag_show_str_labels, - use_normalized_coordinates=True, - font_style='amiko', - font_size=8, - keypt_color="#ff0000", - marker_size=2, - ): - """Draws keypoints on an image. - Modified from: - https://www.programcreek.com/python/?code=fjchange%2Fobject_centric_VAD%2Fobject_centric_VAD-master%2Fobject_detection%2Futils%2Fvisualization_utils.py - Args: - image: a PIL.Image object. - keypoints: a numpy array with shape [num_keypoints, 2]. - map_label_id_to_str: dict with keys=label number and values= label string - flag_show_str_labels: boolean to select whether or not to show string labels - color: color to draw the keypoints with. Default is red. - radius: keypoint radius. Default value is 2. - use_normalized_coordinates: if True (default), treat keypoint values as - relative to the image. Otherwise treat them as absolute. - - - """ - # get a drawing context - draw = ImageDraw.Draw(image,"RGBA") - - im_width, im_height = image.size - keypoints_x = [k[0] for k in keypoints] - keypoints_y = [k[1] for k in keypoints] - alpha = [k[2] for k in keypoints] - norm = matplotlib.colors.Normalize(vmin=0, vmax=255) - - names_for_color = [i for i in map_label_id_to_str.keys()] - colores = np.linspace(0, 255, num=len(names_for_color),dtype= int) - - # adjust keypoints coords if required - if use_normalized_coordinates: - keypoints_x = tuple([im_width * x for x in keypoints_x]) - keypoints_y = tuple([im_height * y for y in keypoints_y]) - - #cmap = matplotlib.cm.get_cmap('hsv') - cmap2 = matplotlib.cm.get_cmap('Greys') - # draw ellipses around keypoints - for i, (keypoint_x, keypoint_y) in enumerate(zip(keypoints_x, keypoints_y)): - round_fill = list(cm.viridis(norm(colores[i]),bytes=True))#[round(num*255) for num in list(cmap(i))[:3]] #check! - if np.isnan(alpha[i]) == False : - round_fill[3] = round(alpha[i] *255) - if np.isnan(keypoint_x).any(): - continue - #print(round_fill) - #round_outline = [round(num*255) for num in list(cmap2(alpha[i]))[:3]] - draw.ellipse([(keypoint_x - marker_size, keypoint_y - marker_size), - (keypoint_x + marker_size, keypoint_y + marker_size)], - fill=tuple(round_fill), outline= 'black', width=1) #fill and outline: [0,255] - - # add string labels around keypoints - if flag_show_str_labels: - font = ImageFont.truetype(FONTS[font_style], - font_size) - draw.text((keypoint_x + marker_size, keypoint_y + marker_size),#(0.5*im_width, 0.5*im_height), #------- - map_label_id_to_str[i], - ImageColor.getcolor(keypt_color, "RGB"), # rgb # - font=font) - -######################################### -# Draw bboxes on image -def draw_bbox_w_text(img, - results, - font_style='amiko', - font_size=8): #TODO: select color too? - #pdb.set_trace() - bbxyxy = results - w, h = bbxyxy[2], bbxyxy[3] - shape = [(bbxyxy[0], bbxyxy[1]), (w , h)] - imgR = ImageDraw.Draw(img) - imgR.rectangle(shape, outline ="red",width=5) ##bb for animal - - confidence = bbxyxy[4] - string_bb = 'animal ' + str(round(confidence, 2)) - font = ImageFont.truetype(FONTS[font_style], font_size) - - text_size = font.getsize(string_bb) # (h,w) - position = (bbxyxy[0],bbxyxy[1] - text_size[1] -2 ) - left, top, right, bottom = imgR.textbbox(position, string_bb, font=font) - imgR.rectangle((left, top-5, right+5, bottom+5), fill="red") - imgR.text((bbxyxy[0] + 3 ,bbxyxy[1] - text_size[1] -2 ), string_bb, font=font, fill="black") - - return imgR - -########################################### -def save_results_as_json(md_results, dlc_outputs, map_dlc_label_id_to_str, thr,model,mega_model_input, path_to_output_file = 'download_predictions.json'): - - """ - Output detections as json file - - """ - # initialise dict to save to json - info = {} - info['date'] = str(today) - info['MD_model'] = str(mega_model_input) - # info from megaDetector - info['file']= md_results.files[0] - number_bb = len(md_results.xyxy[0].tolist()) - info['number_of_bb'] = number_bb - # info from DLC - number_bb_thr = len(dlc_outputs) - labels = [n for n in map_dlc_label_id_to_str.values()] - - # create list of bboxes above th - new_index = [] - for i in range(number_bb): - corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[i] - - if confidence > thr: - new_index.append(i) - - # define aux dict for every bounding box above threshold - for i in range(number_bb_thr): - aux={} - # MD output - corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[new_index[i]] - aux['corner_1'] = (corner_x1,corner_y1) - aux['corner_2'] = (corner_x2,corner_y2) - aux['predict MD'] = md_results.names[0] - aux['confidence MD'] = confidence - - # DLC output - info['dlc_model'] = model - kypts = [] - for s in dlc_outputs[i]: - aux1 = [] - for j in s: - aux1.append(float(j)) - - kypts.append(aux1) - aux['dlc_pred'] = dict(zip(labels,kypts)) - info['bb_' + str(new_index[i]) ]=aux - - # save dict as json - with open(path_to_output_file, 'w') as f: - json.dump(info, f, indent=1) - print('Output file saved at {}'.format(path_to_output_file)) - - return path_to_output_file - - -def save_results_only_dlc(dlc_outputs,map_label_id_to_str,model,output_file = 'dowload_predictions_dlc.json'): - - """ - write json dlc output - """ - info = {} - info['date'] = str(today) - labels = [n for n in map_label_id_to_str.values()] - info['dlc_model'] = model - kypts = [] - for s in dlc_outputs: - aux1 = [] - for j in s: - aux1.append(float(j)) - - kypts.append(aux1) - info['dlc_pred'] = dict(zip(labels,kypts)) - - with open(output_file, 'w') as f: - json.dump(info, f, indent=1) - print('Output file saved at {}'.format(output_file)) - - return output_file - - -########################################### \ No newline at end of file diff --git a/spaces/Deepaksiwania12/Face-Landmark-Detection/app.py b/spaces/Deepaksiwania12/Face-Landmark-Detection/app.py deleted file mode 100644 index 3703e2db0009fea1686d779101b431c47248e5e9..0000000000000000000000000000000000000000 --- a/spaces/Deepaksiwania12/Face-Landmark-Detection/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/conv2d_gradfix.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/conv2d_gradfix.py deleted file mode 100644 index 8fd6a8dd22661d42899c7dba5047b3a466eecfc7..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/conv2d_gradfix.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.conv2d` that supports -arbitrarily high order gradients with zero performance penalty.""" - -import warnings -import contextlib -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -# ---------------------------------------------------------------------------- - -# Enable the custom op by setting this to true. -enabled = False -# Forcefully disable computation of gradients with respect to the weights. -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - -# ---------------------------------------------------------------------------- - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) - - -def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) - -# ---------------------------------------------------------------------------- - - -def _should_use_custom_op(input): - assert isinstance(input, torch.Tensor) - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - if input.device.type != 'cuda': - return False - if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']): - return True - warnings.warn( - f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().') - return False - - -def _tuple_of_ints(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - assert len(xs) == ndim - assert all(isinstance(x, int) for x in xs) - return xs - -# ---------------------------------------------------------------------------- - - -_conv2d_gradfix_cache = dict() - - -def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): - # Parse arguments. - ndim = 2 - weight_shape = tuple(weight_shape) - stride = _tuple_of_ints(stride, ndim) - padding = _tuple_of_ints(padding, ndim) - output_padding = _tuple_of_ints(output_padding, ndim) - dilation = _tuple_of_ints(dilation, ndim) - - # Lookup from cache. - key = (transpose, weight_shape, stride, padding, - output_padding, dilation, groups) - if key in _conv2d_gradfix_cache: - return _conv2d_gradfix_cache[key] - - # Validate arguments. - assert groups >= 1 - assert len(weight_shape) == ndim + 2 - assert all(stride[i] >= 1 for i in range(ndim)) - assert all(padding[i] >= 0 for i in range(ndim)) - assert all(dilation[i] >= 0 for i in range(ndim)) - if not transpose: - assert all(output_padding[i] == 0 for i in range(ndim)) - else: # transpose - assert all(0 <= output_padding[i] < max( - stride[i], dilation[i]) for i in range(ndim)) - - # Helpers. - common_kwargs = dict(stride=stride, padding=padding, - dilation=dilation, groups=groups) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - # Forward & backward. - class Conv2d(torch.autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - assert weight.shape == weight_shape - if not transpose: - output = torch.nn.functional.conv2d( - input=input, weight=weight, bias=bias, **common_kwargs) - else: # transpose - output = torch.nn.functional.conv_transpose2d( - input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) - ctx.save_for_backward(input, weight) - return output - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input = None - grad_weight = None - grad_bias = None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape) - grad_input = _conv2d_gradfix(transpose=( - not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None) - assert grad_input.shape == input.shape - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - assert grad_weight.shape == weight_shape - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum([0, 2, 3]) - - return grad_input, grad_weight, grad_bias - - # Gradient with respect to the weights. - class Conv2dGradWeight(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - 'aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight') - flags = [torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] - grad_weight = op(weight_shape, grad_output, input, - padding, stride, dilation, groups, *flags) - assert grad_weight.shape == weight_shape - ctx.save_for_backward(grad_output, input) - return grad_weight - - @staticmethod - def backward(ctx, grad2_grad_weight): - grad_output, input = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = Conv2d.apply( - input, grad2_grad_weight, None) - assert grad2_grad_output.shape == grad_output.shape - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape) - grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, - output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None) - assert grad2_input.shape == input.shape - - return grad2_grad_output, grad2_input - - _conv2d_gradfix_cache[key] = Conv2d - return Conv2d - -# ---------------------------------------------------------------------------- diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py deleted file mode 100644 index 976ce2c61104efdc6b0015d895830346dd01bc10..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/encoders/model_irse.py +++ /dev/null @@ -1,84 +0,0 @@ -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module -from encoder4editing.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm - -""" -Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Backbone(Module): - def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): - super(Backbone, self).__init__() - assert input_size in [112, 224], "input_size should be 112 or 224" - assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" - assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - if input_size == 112: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 7 * 7, 512), - BatchNorm1d(512, affine=affine)) - else: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 14 * 14, 512), - BatchNorm1d(512, affine=affine)) - - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - return l2_norm(x) - - -def IR_50(input_size): - """Constructs a ir-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_101(input_size): - """Constructs a ir-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_152(input_size): - """Constructs a ir-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_50(input_size): - """Constructs a ir_se-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_101(input_size): - """Constructs a ir_se-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_152(input_size): - """Constructs a ir_se-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) - return model diff --git a/spaces/EronSamez/RVC_HFmeu/demucs/wav.py b/spaces/EronSamez/RVC_HFmeu/demucs/wav.py deleted file mode 100644 index a65c3b2ba5aacb1fcab3753f1f85ff7b8db7fc11..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/demucs/wav.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import OrderedDict -import hashlib -import math -import json -from pathlib import Path - -import julius -import torch as th -from torch import distributed -import torchaudio as ta -from torch.nn import functional as F - -from .audio import convert_audio_channels -from .compressed import get_musdb_tracks - -MIXTURE = "mixture" -EXT = ".wav" - - -def _track_metadata(track, sources): - track_length = None - track_samplerate = None - for source in sources + [MIXTURE]: - file = track / f"{source}{EXT}" - info = ta.info(str(file)) - length = info.num_frames - if track_length is None: - track_length = length - track_samplerate = info.sample_rate - elif track_length != length: - raise ValueError( - f"Invalid length for file {file}: " - f"expecting {track_length} but got {length}.") - elif info.sample_rate != track_samplerate: - raise ValueError( - f"Invalid sample rate for file {file}: " - f"expecting {track_samplerate} but got {info.sample_rate}.") - if source == MIXTURE: - wav, _ = ta.load(str(file)) - wav = wav.mean(0) - mean = wav.mean().item() - std = wav.std().item() - - return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate} - - -def _build_metadata(path, sources): - meta = {} - path = Path(path) - for file in path.iterdir(): - meta[file.name] = _track_metadata(file, sources) - return meta - - -class Wavset: - def __init__( - self, - root, metadata, sources, - length=None, stride=None, normalize=True, - samplerate=44100, channels=2): - """ - Waveset (or mp3 set for that matter). Can be used to train - with arbitrary sources. Each track should be one folder inside of `path`. - The folder should contain files named `{source}.{ext}`. - Files will be grouped according to `sources` (each source is a list of - filenames). - - Sample rate and channels will be converted on the fly. - - `length` is the sample size to extract (in samples, not duration). - `stride` is how many samples to move by between each example. - """ - self.root = Path(root) - self.metadata = OrderedDict(metadata) - self.length = length - self.stride = stride or length - self.normalize = normalize - self.sources = sources - self.channels = channels - self.samplerate = samplerate - self.num_examples = [] - for name, meta in self.metadata.items(): - track_length = int(self.samplerate * meta['length'] / meta['samplerate']) - if length is None or track_length < length: - examples = 1 - else: - examples = int(math.ceil((track_length - self.length) / self.stride) + 1) - self.num_examples.append(examples) - - def __len__(self): - return sum(self.num_examples) - - def get_file(self, name, source): - return self.root / name / f"{source}{EXT}" - - def __getitem__(self, index): - for name, examples in zip(self.metadata, self.num_examples): - if index >= examples: - index -= examples - continue - meta = self.metadata[name] - num_frames = -1 - offset = 0 - if self.length is not None: - offset = int(math.ceil( - meta['samplerate'] * self.stride * index / self.samplerate)) - num_frames = int(math.ceil( - meta['samplerate'] * self.length / self.samplerate)) - wavs = [] - for source in self.sources: - file = self.get_file(name, source) - wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames) - wav = convert_audio_channels(wav, self.channels) - wavs.append(wav) - - example = th.stack(wavs) - example = julius.resample_frac(example, meta['samplerate'], self.samplerate) - if self.normalize: - example = (example - meta['mean']) / meta['std'] - if self.length: - example = example[..., :self.length] - example = F.pad(example, (0, self.length - example.shape[-1])) - return example - - -def get_wav_datasets(args, samples, sources): - sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8] - metadata_file = args.metadata / (sig + ".json") - train_path = args.wav / "train" - valid_path = args.wav / "valid" - if not metadata_file.is_file() and args.rank == 0: - train = _build_metadata(train_path, sources) - valid = _build_metadata(valid_path, sources) - json.dump([train, valid], open(metadata_file, "w")) - if args.world_size > 1: - distributed.barrier() - train, valid = json.load(open(metadata_file)) - train_set = Wavset(train_path, train, sources, - length=samples, stride=args.data_stride, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - valid_set = Wavset(valid_path, valid, [MIXTURE] + sources, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - return train_set, valid_set - - -def get_musdb_wav_datasets(args, samples, sources): - metadata_file = args.metadata / "musdb_wav.json" - root = args.musdb / "train" - if not metadata_file.is_file() and args.rank == 0: - metadata = _build_metadata(root, sources) - json.dump(metadata, open(metadata_file, "w")) - if args.world_size > 1: - distributed.barrier() - metadata = json.load(open(metadata_file)) - - train_tracks = get_musdb_tracks(args.musdb, is_wav=True, subsets=["train"], split="train") - metadata_train = {name: meta for name, meta in metadata.items() if name in train_tracks} - metadata_valid = {name: meta for name, meta in metadata.items() if name not in train_tracks} - train_set = Wavset(root, metadata_train, sources, - length=samples, stride=args.data_stride, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - valid_set = Wavset(root, metadata_valid, [MIXTURE] + sources, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - return train_set, valid_set diff --git a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/model.py b/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/model.py deleted file mode 100644 index e8735106e733524e046821c733601b1394676ee3..0000000000000000000000000000000000000000 --- a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/model.py +++ /dev/null @@ -1,36 +0,0 @@ -import config -import transformers -import torch.nn as nn - - -class BERTBaseUncased(nn.Module): - def __init__(self): - super(BERTBaseUncased, self).__init__() - self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH) - - self.bert_drop = nn.Dropout(0.3) - - self.out = nn.Linear(768, 3) - # self.out = nn.Linear(256, 3) - - nn.init.xavier_uniform_(self.out.weight) - - def forward(self, ids, mask, token_type_ids): - _, o2 = self.bert( - ids, - attention_mask=mask, - token_type_ids=token_type_ids - ) - bo = self.bert_drop(o2) - # bo = self.tanh(self.fc(bo)) # to be commented if original - output = self.out(bo) - return output - - def extract_features(self, ids, mask, token_type_ids): - _, o2 = self.bert( - ids, - attention_mask=mask, - token_type_ids=token_type_ids - ) - bo = self.bert_drop(o2) - return bo \ No newline at end of file diff --git "a/spaces/GaenKoki/voicevox/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" "b/spaces/GaenKoki/voicevox/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" deleted file mode 100644 index 540173be1b280ce5c3593b8aed02fd42ef633f65..0000000000000000000000000000000000000000 --- "a/spaces/GaenKoki/voicevox/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" +++ /dev/null @@ -1,7 +0,0 @@ -メモ書き程度ですが、どういう方針で開発を進めているかを紹介します。 - -- バージョンが上がっても、`/audio_query`で返ってくる値をそのまま`/synthesis`に POST すれば音声合成できるようにする予定です - - `AudioQuery`のパラメータは増えますが、なるべくデフォルト値で以前と変わらない音声が生成されるようにします -- バージョン 0.7 から音声スタイルが実装されました。スタイルの情報は`/speakers`から取得できます - - スタイルの情報にある`style_id`を`speaker`に指定することで、今まで通り音声合成ができます - - style_id の指定先が speaker なのは互換性のためです diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder_train.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder_train.py deleted file mode 100644 index b8740a894d615aadfe529cb36068fc8e3496125f..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder_train.py +++ /dev/null @@ -1,47 +0,0 @@ -from utils.argutils import print_args -from encoder.train import train -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Trains the speaker encoder. You must have run encoder_preprocess.py first.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument("run_id", type=str, help= \ - "Name for this model instance. If a model state from the same run ID was previously " - "saved, the training will restart from there. Pass -f to overwrite saved states and " - "restart from scratch.") - parser.add_argument("clean_data_root", type=Path, help= \ - "Path to the output directory of encoder_preprocess.py. If you left the default " - "output directory when preprocessing, it should be /SV2TTS/encoder/.") - parser.add_argument("-m", "--models_dir", type=Path, default="encoder/saved_models/", help=\ - "Path to the output directory that will contain the saved model weights, as well as " - "backups of those weights and plots generated during training.") - parser.add_argument("-v", "--vis_every", type=int, default=10, help= \ - "Number of steps between updates of the loss and the plots.") - parser.add_argument("-u", "--umap_every", type=int, default=100, help= \ - "Number of steps between updates of the umap projection. Set to 0 to never update the " - "projections.") - parser.add_argument("-s", "--save_every", type=int, default=500, help= \ - "Number of steps between updates of the model on the disk. Set to 0 to never save the " - "model.") - parser.add_argument("-b", "--backup_every", type=int, default=7500, help= \ - "Number of steps between backups of the model. Set to 0 to never make backups of the " - "model.") - parser.add_argument("-f", "--force_restart", action="store_true", help= \ - "Do not load any saved model.") - parser.add_argument("--visdom_server", type=str, default="http://localhost") - parser.add_argument("--no_visdom", action="store_true", help= \ - "Disable visdom.") - args = parser.parse_args() - - # Process the arguments - args.models_dir.mkdir(exist_ok=True) - - # Run the training - print_args(args, parser) - train(**vars(args)) - \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py deleted file mode 100644 index c4e86387e3ce4aad3dd68d7613160fced4d3785b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', - '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=15) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index e87d21a4e6a241f5af892eb11aa82e2c6012a31c..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 31e5943216f19a87a2f1e6f666efead573f72626..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py deleted file mode 100644 index 7866acebea689e7a863a836c326b1407de733fe8..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/yolo_bbox_coder.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/yolo_bbox_coder.py deleted file mode 100644 index d6d0e82ac780820952938d8751ac9776ea31588a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/yolo_bbox_coder.py +++ /dev/null @@ -1,89 +0,0 @@ -import mmcv -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class YOLOBBoxCoder(BaseBBoxCoder): - """YOLO BBox coder. - - Following `YOLO `_, this coder divide - image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). - cx, cy in [0., 1.], denotes relative center position w.r.t the center of - bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. - - Args: - eps (float): Min value of cx, cy when encoding. - """ - - def __init__(self, eps=1e-6): - super(BaseBBoxCoder, self).__init__() - self.eps = eps - - @mmcv.jit(coderize=True) - def encode(self, bboxes, gt_bboxes, stride): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): Source boxes, e.g., anchors. - gt_bboxes (torch.Tensor): Target of the transformation, e.g., - ground-truth boxes. - stride (torch.Tensor | int): Stride of bboxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 - y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 - w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] - h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] - x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 - y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 - w = bboxes[..., 2] - bboxes[..., 0] - h = bboxes[..., 3] - bboxes[..., 1] - w_target = torch.log((w_gt / w).clamp(min=self.eps)) - h_target = torch.log((h_gt / h).clamp(min=self.eps)) - x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - encoded_bboxes = torch.stack( - [x_center_target, y_center_target, w_target, h_target], dim=-1) - return encoded_bboxes - - @mmcv.jit(coderize=True) - def decode(self, bboxes, pred_bboxes, stride): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - boxes (torch.Tensor): Basic boxes, e.g. anchors. - pred_bboxes (torch.Tensor): Encoded boxes with shape - stride (torch.Tensor | int): Strides of bboxes. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert pred_bboxes.size(0) == bboxes.size(0) - assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 - x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 - y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 - w = bboxes[..., 2] - bboxes[..., 0] - h = bboxes[..., 3] - bboxes[..., 1] - # Get outputs x, y - x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center - y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center - w_pred = torch.exp(pred_bboxes[..., 2]) * w - h_pred = torch.exp(pred_bboxes[..., 3]) * h - - decoded_bboxes = torch.stack( - (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2, - x_center_pred + w_pred / 2, y_center_pred + h_pred / 2), - dim=-1) - - return decoded_bboxes diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/custom.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/custom.py deleted file mode 100644 index 1a2351c217f43d32178053dfc682a2b241f9a3f1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/custom.py +++ /dev/null @@ -1,323 +0,0 @@ -import os.path as osp -import warnings -from collections import OrderedDict - -import mmcv -import numpy as np -from mmcv.utils import print_log -from torch.utils.data import Dataset - -from mmdet.core import eval_map, eval_recalls -from .builder import DATASETS -from .pipelines import Compose - - -@DATASETS.register_module() -class CustomDataset(Dataset): - """Custom dataset for detection. - - The annotation format is shown as follows. The `ann` field is optional for - testing. - - .. code-block:: none - - [ - { - 'filename': 'a.jpg', - 'width': 1280, - 'height': 720, - 'ann': { - 'bboxes': (n, 4) in (x1, y1, x2, y2) order. - 'labels': (n, ), - 'bboxes_ignore': (k, 4), (optional field) - 'labels_ignore': (k, 4) (optional field) - } - }, - ... - ] - - Args: - ann_file (str): Annotation file path. - pipeline (list[dict]): Processing pipeline. - classes (str | Sequence[str], optional): Specify classes to load. - If is None, ``cls.CLASSES`` will be used. Default: None. - data_root (str, optional): Data root for ``ann_file``, - ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. - test_mode (bool, optional): If set True, annotation will not be loaded. - filter_empty_gt (bool, optional): If set true, images without bounding - boxes of the dataset's classes will be filtered out. This option - only works when `test_mode=False`, i.e., we never filter images - during tests. - """ - - CLASSES = None - - def __init__(self, - ann_file, - pipeline, - classes=None, - data_root=None, - img_prefix='', - seg_prefix=None, - proposal_file=None, - test_mode=False, - filter_empty_gt=True): - self.ann_file = ann_file - self.data_root = data_root - self.img_prefix = img_prefix - self.seg_prefix = seg_prefix - self.proposal_file = proposal_file - self.test_mode = test_mode - self.filter_empty_gt = filter_empty_gt - self.CLASSES = self.get_classes(classes) - - # join paths if data_root is specified - if self.data_root is not None: - if not osp.isabs(self.ann_file): - self.ann_file = osp.join(self.data_root, self.ann_file) - if not (self.img_prefix is None or osp.isabs(self.img_prefix)): - self.img_prefix = osp.join(self.data_root, self.img_prefix) - if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): - self.seg_prefix = osp.join(self.data_root, self.seg_prefix) - if not (self.proposal_file is None - or osp.isabs(self.proposal_file)): - self.proposal_file = osp.join(self.data_root, - self.proposal_file) - # load annotations (and proposals) - self.data_infos = self.load_annotations(self.ann_file) - - if self.proposal_file is not None: - self.proposals = self.load_proposals(self.proposal_file) - else: - self.proposals = None - - # filter images too small and containing no annotations - if not test_mode: - valid_inds = self._filter_imgs() - self.data_infos = [self.data_infos[i] for i in valid_inds] - if self.proposals is not None: - self.proposals = [self.proposals[i] for i in valid_inds] - # set group flag for the sampler - self._set_group_flag() - - # processing pipeline - self.pipeline = Compose(pipeline) - - def __len__(self): - """Total number of samples of data.""" - return len(self.data_infos) - - def load_annotations(self, ann_file): - """Load annotation from annotation file.""" - return mmcv.load(ann_file) - - def load_proposals(self, proposal_file): - """Load proposal from proposal file.""" - return mmcv.load(proposal_file) - - def get_ann_info(self, idx): - """Get annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - return self.data_infos[idx]['ann'] - - def get_cat_ids(self, idx): - """Get category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() - - def pre_pipeline(self, results): - """Prepare results dict for pipeline.""" - results['img_prefix'] = self.img_prefix - results['seg_prefix'] = self.seg_prefix - results['proposal_file'] = self.proposal_file - results['bbox_fields'] = [] - results['mask_fields'] = [] - results['seg_fields'] = [] - - def _filter_imgs(self, min_size=32): - """Filter images too small.""" - if self.filter_empty_gt: - warnings.warn( - 'CustomDataset does not support filtering empty gt images.') - valid_inds = [] - for i, img_info in enumerate(self.data_infos): - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - return valid_inds - - def _set_group_flag(self): - """Set flag according to image aspect ratio. - - Images with aspect ratio greater than 1 will be set as group 1, - otherwise group 0. - """ - self.flag = np.zeros(len(self), dtype=np.uint8) - for i in range(len(self)): - img_info = self.data_infos[i] - if img_info['width'] / img_info['height'] > 1: - self.flag[i] = 1 - - def _rand_another(self, idx): - """Get another random index from the same group as the given index.""" - pool = np.where(self.flag == self.flag[idx])[0] - return np.random.choice(pool) - - def __getitem__(self, idx): - """Get training/test data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training/test data (with annotation if `test_mode` is set \ - True). - """ - - if self.test_mode: - return self.prepare_test_img(idx) - while True: - data = self.prepare_train_img(idx) - if data is None: - idx = self._rand_another(idx) - continue - return data - - def prepare_train_img(self, idx): - """Get training data and annotations after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training data and annotation after pipeline with new keys \ - introduced by pipeline. - """ - - img_info = self.data_infos[idx] - ann_info = self.get_ann_info(idx) - results = dict(img_info=img_info, ann_info=ann_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - def prepare_test_img(self, idx): - """Get testing data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Testing data after pipeline with new keys introduced by \ - pipeline. - """ - - img_info = self.data_infos[idx] - results = dict(img_info=img_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - @classmethod - def get_classes(cls, classes=None): - """Get class names of current dataset. - - Args: - classes (Sequence[str] | str | None): If classes is None, use - default CLASSES defined by builtin dataset. If classes is a - string, take it as a file name. The file contains the name of - classes where each line contains one class name. If classes is - a tuple or list, override the CLASSES defined by the dataset. - - Returns: - tuple[str] or list[str]: Names of categories of the dataset. - """ - if classes is None: - return cls.CLASSES - - if isinstance(classes, str): - # take it as a file path - class_names = mmcv.list_from_file(classes) - elif isinstance(classes, (tuple, list)): - class_names = classes - else: - raise ValueError(f'Unsupported type {type(classes)} of classes.') - - return class_names - - def format_results(self, results, **kwargs): - """Place holder to format result to dataset specific output.""" - - def evaluate(self, - results, - metric='mAP', - logger=None, - proposal_nums=(100, 300, 1000), - iou_thr=0.5, - scale_ranges=None): - """Evaluate the dataset. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thr (float | list[float]): IoU threshold. Default: 0.5. - scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. - Default: None. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mAP', 'recall'] - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - annotations = [self.get_ann_info(i) for i in range(len(self))] - eval_results = OrderedDict() - iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr - if metric == 'mAP': - assert isinstance(iou_thrs, list) - mean_aps = [] - for iou_thr in iou_thrs: - print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') - mean_ap, _ = eval_map( - results, - annotations, - scale_ranges=scale_ranges, - iou_thr=iou_thr, - dataset=self.CLASSES, - logger=logger) - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - elif metric == 'recall': - gt_bboxes = [ann['bboxes'] for ann in annotations] - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thr, logger=logger) - for i, num in enumerate(proposal_nums): - for j, iou in enumerate(iou_thrs): - eval_results[f'recall@{num}@{iou}'] = recalls[i, j] - if recalls.shape[1] > 1: - ar = recalls.mean(axis=1) - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - return eval_results diff --git a/spaces/Grezz/generate_human_motion/pyrender/pyrender/node.py b/spaces/Grezz/generate_human_motion/pyrender/pyrender/node.py deleted file mode 100644 index 1f37f7856cc732a37dc58253022a7c331489493e..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/pyrender/pyrender/node.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Nodes, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-node - -Author: Matthew Matl -""" -import numpy as np - -import trimesh.transformations as transformations - -from .camera import Camera -from .mesh import Mesh -from .light import Light - - -class Node(object): - """A node in the node hierarchy. - - Parameters - ---------- - name : str, optional - The user-defined name of this object. - camera : :class:`Camera`, optional - The camera in this node. - children : list of :class:`Node` - The children of this node. - skin : int, optional - The index of the skin referenced by this node. - matrix : (4,4) float, optional - A floating-point 4x4 transformation matrix. - mesh : :class:`Mesh`, optional - The mesh in this node. - rotation : (4,) float, optional - The node's unit quaternion in the order (x, y, z, w), where - w is the scalar. - scale : (3,) float, optional - The node's non-uniform scale, given as the scaling factors along the x, - y, and z axes. - translation : (3,) float, optional - The node's translation along the x, y, and z axes. - weights : (n,) float - The weights of the instantiated Morph Target. Number of elements must - match number of Morph Targets of used mesh. - light : :class:`Light`, optional - The light in this node. - """ - - def __init__(self, - name=None, - camera=None, - children=None, - skin=None, - matrix=None, - mesh=None, - rotation=None, - scale=None, - translation=None, - weights=None, - light=None): - # Set defaults - if children is None: - children = [] - - self._matrix = None - self._scale = None - self._rotation = None - self._translation = None - if matrix is None: - if rotation is None: - rotation = np.array([0.0, 0.0, 0.0, 1.0]) - if translation is None: - translation = np.zeros(3) - if scale is None: - scale = np.ones(3) - self.rotation = rotation - self.translation = translation - self.scale = scale - else: - self.matrix = matrix - - self.name = name - self.camera = camera - self.children = children - self.skin = skin - self.mesh = mesh - self.weights = weights - self.light = light - - @property - def name(self): - """str : The user-defined name of this object. - """ - return self._name - - @name.setter - def name(self, value): - if value is not None: - value = str(value) - self._name = value - - @property - def camera(self): - """:class:`Camera` : The camera in this node. - """ - return self._camera - - @camera.setter - def camera(self, value): - if value is not None and not isinstance(value, Camera): - raise TypeError('Value must be a camera') - self._camera = value - - @property - def children(self): - """list of :class:`Node` : The children of this node. - """ - return self._children - - @children.setter - def children(self, value): - self._children = value - - @property - def skin(self): - """int : The skin index for this node. - """ - return self._skin - - @skin.setter - def skin(self, value): - self._skin = value - - @property - def mesh(self): - """:class:`Mesh` : The mesh in this node. - """ - return self._mesh - - @mesh.setter - def mesh(self, value): - if value is not None and not isinstance(value, Mesh): - raise TypeError('Value must be a mesh') - self._mesh = value - - @property - def light(self): - """:class:`Light` : The light in this node. - """ - return self._light - - @light.setter - def light(self, value): - if value is not None and not isinstance(value, Light): - raise TypeError('Value must be a light') - self._light = value - - @property - def rotation(self): - """(4,) float : The xyzw quaternion for this node. - """ - return self._rotation - - @rotation.setter - def rotation(self, value): - value = np.asanyarray(value) - if value.shape != (4,): - raise ValueError('Quaternion must be a (4,) vector') - if np.abs(np.linalg.norm(value) - 1.0) > 1e-3: - raise ValueError('Quaternion must have norm == 1.0') - self._rotation = value - self._matrix = None - - @property - def translation(self): - """(3,) float : The translation for this node. - """ - return self._translation - - @translation.setter - def translation(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Translation must be a (3,) vector') - self._translation = value - self._matrix = None - - @property - def scale(self): - """(3,) float : The scale for this node. - """ - return self._scale - - @scale.setter - def scale(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Scale must be a (3,) vector') - self._scale = value - self._matrix = None - - @property - def matrix(self): - """(4,4) float : The homogenous transform matrix for this node. - - Note that this matrix's elements are not settable, - it's just a copy of the internal matrix. You can set the whole - matrix, but not an individual element. - """ - if self._matrix is None: - self._matrix = self._m_from_tqs( - self.translation, self.rotation, self.scale - ) - return self._matrix.copy() - - @matrix.setter - def matrix(self, value): - value = np.asanyarray(value) - if value.shape != (4,4): - raise ValueError('Matrix must be a 4x4 numpy ndarray') - if not np.allclose(value[3,:], np.array([0.0, 0.0, 0.0, 1.0])): - raise ValueError('Bottom row of matrix must be [0,0,0,1]') - self.rotation = Node._q_from_m(value) - self.scale = Node._s_from_m(value) - self.translation = Node._t_from_m(value) - self._matrix = value - - @staticmethod - def _t_from_m(m): - return m[:3,3] - - @staticmethod - def _r_from_m(m): - U = m[:3,:3] - norms = np.linalg.norm(U.T, axis=1) - return U / norms - - @staticmethod - def _q_from_m(m): - M = np.eye(4) - M[:3,:3] = Node._r_from_m(m) - q_wxyz = transformations.quaternion_from_matrix(M) - return np.roll(q_wxyz, -1) - - @staticmethod - def _s_from_m(m): - return np.linalg.norm(m[:3,:3].T, axis=1) - - @staticmethod - def _r_from_q(q): - q_wxyz = np.roll(q, 1) - return transformations.quaternion_matrix(q_wxyz)[:3,:3] - - @staticmethod - def _m_from_tqs(t, q, s): - S = np.eye(4) - S[:3,:3] = np.diag(s) - - R = np.eye(4) - R[:3,:3] = Node._r_from_q(q) - - T = np.eye(4) - T[:3,3] = t - - return T.dot(R.dot(S)) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_backtranslation_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_backtranslation_dataset.py deleted file mode 100644 index dffc3b49387dfdc046ea23d7db179377040b7cbc..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_backtranslation_dataset.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import tests.utils as test_utils -import torch -from fairseq.data import ( - BacktranslationDataset, - LanguagePairDataset, - TransformEosDataset, -) -from fairseq.sequence_generator import SequenceGenerator - - -class TestBacktranslationDataset(unittest.TestCase): - def setUp(self): - ( - self.tgt_dict, - self.w1, - self.w2, - self.src_tokens, - self.src_lengths, - self.model, - ) = test_utils.sequence_generator_setup() - - dummy_src_samples = self.src_tokens - - self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) - self.cuda = torch.cuda.is_available() - - def _backtranslation_dataset_helper( - self, - remove_eos_from_input_src, - remove_eos_from_output_src, - ): - tgt_dataset = LanguagePairDataset( - src=self.tgt_dataset, - src_sizes=self.tgt_dataset.sizes, - src_dict=self.tgt_dict, - tgt=None, - tgt_sizes=None, - tgt_dict=None, - ) - - generator = SequenceGenerator( - [self.model], - tgt_dict=self.tgt_dict, - max_len_a=0, - max_len_b=200, - beam_size=2, - unk_penalty=0, - ) - - backtranslation_dataset = BacktranslationDataset( - tgt_dataset=TransformEosDataset( - dataset=tgt_dataset, - eos=self.tgt_dict.eos(), - # remove eos from the input src - remove_eos_from_src=remove_eos_from_input_src, - ), - src_dict=self.tgt_dict, - backtranslation_fn=( - lambda sample: generator.generate([self.model], sample) - ), - output_collater=TransformEosDataset( - dataset=tgt_dataset, - eos=self.tgt_dict.eos(), - # if we remove eos from the input src, then we need to add it - # back to the output tgt - append_eos_to_tgt=remove_eos_from_input_src, - remove_eos_from_src=remove_eos_from_output_src, - ).collater, - cuda=self.cuda, - ) - dataloader = torch.utils.data.DataLoader( - backtranslation_dataset, - batch_size=2, - collate_fn=backtranslation_dataset.collater, - ) - backtranslation_batch_result = next(iter(dataloader)) - - eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 - - # Note that we sort by src_lengths and add left padding, so actually - # ids will look like: [1, 0] - expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) - if remove_eos_from_output_src: - expected_src = expected_src[:, :-1] - expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) - generated_src = backtranslation_batch_result["net_input"]["src_tokens"] - tgt_tokens = backtranslation_batch_result["target"] - - self.assertTensorEqual(expected_src, generated_src) - self.assertTensorEqual(expected_tgt, tgt_tokens) - - def test_backtranslation_dataset_no_eos_in_output_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=False, - remove_eos_from_output_src=True, - ) - - def test_backtranslation_dataset_with_eos_in_output_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=False, - remove_eos_from_output_src=False, - ) - - def test_backtranslation_dataset_no_eos_in_input_src(self): - self._backtranslation_dataset_helper( - remove_eos_from_input_src=True, - remove_eos_from_output_src=False, - ) - - def assertTensorEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertEqual(t1.ne(t2).long().sum(), 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/learn_joint_bpe_and_vocab.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/learn_joint_bpe_and_vocab.py deleted file mode 100644 index d75ff3d3f687c4f4776cc0246b05e3f6765374b2..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/learn_joint_bpe_and_vocab.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Author: Rico Sennrich - -"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text. -This script learns BPE jointly on a concatenation of a list of texts (typically the source and target side of a parallel corpus, -applies the learned operation to each and (optionally) returns the resulting vocabulary of each text. -The vocabulary can be used in apply_bpe.py to avoid producing symbols that are rare or OOV in a training text. - -Reference: -Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units. -Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany. -""" - -from __future__ import unicode_literals - -import sys -import os -import inspect -import codecs -import argparse -import tempfile -import warnings -from collections import Counter -from multiprocessing import cpu_count - -#hack to get imports working if running this as a script, or within a package -if __name__ == '__main__': - import learn_bpe - import apply_bpe -else: - from . import learn_bpe - from . import apply_bpe - -# hack for python2/3 compatibility -from io import open -argparse.open = open - -def create_parser(subparsers=None): - - if subparsers: - parser = subparsers.add_parser('learn-joint-bpe-and-vocab', - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - else: - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - - parser.add_argument( - '--input', '-i', type=argparse.FileType('r'), required=True, nargs = '+', - metavar='PATH', - help="Input texts (multiple allowed).") - parser.add_argument( - '--output', '-o', type=argparse.FileType('w'), required=True, - metavar='PATH', - help="Output file for BPE codes.") - parser.add_argument( - '--symbols', '-s', type=int, default=10000, - help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)") - parser.add_argument( - '--separator', type=str, default='@@', metavar='STR', - help="Separator between non-final subword units (default: '%(default)s')") - parser.add_argument( - '--write-vocabulary', type=argparse.FileType('w'), required=True, nargs = '+', default=None, - metavar='PATH', dest='vocab', - help='Write to these vocabulary files after applying BPE. One per input text. Used for filtering in apply_bpe.py') - parser.add_argument( - '--min-frequency', type=int, default=2, metavar='FREQ', - help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)') - parser.add_argument( - '--total-symbols', '-t', action="store_true", - help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).") - parser.add_argument( - '--num-workers', type=int, default=1, - help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") - parser.add_argument( - '--verbose', '-v', action="store_true", - help="verbose mode.") - - return parser - -def learn_joint_bpe_and_vocab(args): - - if args.vocab and len(args.input) != len(args.vocab): - sys.stderr.write('Error: number of input files and vocabulary files must match\n') - sys.exit(1) - - # read/write files as UTF-8 - args.input = [codecs.open(f.name, encoding='UTF-8') for f in args.input] - args.vocab = [codecs.open(f.name, 'w', encoding='UTF-8') for f in args.vocab] - - # get combined vocabulary of all input texts - full_vocab = Counter() - for f in args.input: - full_vocab += learn_bpe.get_vocabulary(f, num_workers=args.num_workers) - f.seek(0) - - vocab_list = ['{0} {1}'.format(key, freq) for (key, freq) in full_vocab.items()] - - # learn BPE on combined vocabulary - with codecs.open(args.output.name, 'w', encoding='UTF-8') as output: - learn_bpe.learn_bpe(vocab_list, output, args.symbols, args.min_frequency, args.verbose, is_dict=True, total_symbols=args.total_symbols) - - with codecs.open(args.output.name, encoding='UTF-8') as codes: - bpe = apply_bpe.BPE(codes, separator=args.separator) - - # apply BPE to each training corpus and get vocabulary - for train_file, vocab_file in zip(args.input, args.vocab): - - tmp = tempfile.NamedTemporaryFile(delete=False) - tmp.close() - - tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8') - - train_file.seek(0) - bpe.process_lines(train_file.name, tmpout, num_workers=args.num_workers) - - tmpout.close() - tmpin = codecs.open(tmp.name, encoding='UTF-8') - - vocab = learn_bpe.get_vocabulary(tmpin, num_workers=args.num_workers) - tmpin.close() - os.remove(tmp.name) - - for key, freq in sorted(vocab.items(), key=lambda x: x[1], reverse=True): - vocab_file.write("{0} {1}\n".format(key, freq)) - vocab_file.close() - - -if __name__ == '__main__': - - currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) - newdir = os.path.join(currentdir, 'subword_nmt') - if os.path.isdir(newdir): - warnings.simplefilter('default') - warnings.warn( - "this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir), - DeprecationWarning - ) - - # python 2/3 compatibility - if sys.version_info < (3, 0): - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin) - else: - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer) - - parser = create_parser() - args = parser.parse_args() - - if args.num_workers <= 0: - args.num_workers = cpu_count() - - if sys.version_info < (3, 0): - args.separator = args.separator.decode('UTF-8') - if args.num_workers > 1: - args.num_workers = 1 - warnings.warn("Parallel mode is only supported in Python3. Using 1 processor instead.") - - assert(len(args.input) == len(args.vocab)) - - learn_joint_bpe_and_vocab(args) diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.2035cf67.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.2035cf67.js deleted file mode 100644 index 93af8d791462494c1add7ebc358d1b0713248752..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.2035cf67.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as q,i as D,s as E,a7 as I,e as S,c as h,a as F,b as y,f as C,m,g as T,a8 as j,l as K,j as g,k,n as P,o as v,F as L,t as O,h as R,a9 as d,P as U,R as V,T as z,I as A,O as G,U as H,V as J,L as M,K as B}from"./index.396f4a72.js";function N(s){let e;return{c(){e=O(s[1])},m(l,t){C(l,e,t)},p(l,t){t&2&&R(e,l[1])},d(l){l&&P(e)}}}function Q(s){let e,l,t,n,u,c,_;return l=new I({props:{show_label:s[3],$$slots:{default:[N]},$$scope:{ctx:s}}}),{c(){e=S("label"),h(l.$$.fragment),t=F(),n=S("input"),y(n,"type","color"),y(n,"class","gr-box-unrounded"),n.disabled=s[2],y(e,"class","block")},m(r,f){C(r,e,f),m(l,e,null),T(e,t),T(e,n),j(n,s[0]),u=!0,c||(_=K(n,"input",s[5]),c=!0)},p(r,[f]){const a={};f&8&&(a.show_label=r[3]),f&258&&(a.$$scope={dirty:f,ctx:r}),l.$set(a),(!u||f&4)&&(n.disabled=r[2]),f&1&&j(n,r[0])},i(r){u||(g(l.$$.fragment,r),u=!0)},o(r){k(l.$$.fragment,r),u=!1},d(r){r&&P(e),v(l),c=!1,_()}}}function W(s,e,l){let{value:t="#000000"}=e,{style:n={}}=e,{label:u}=e,{disabled:c=!1}=e,{show_label:_=!0}=e;const r=L();function f(i){r("change",i)}function a(){t=this.value,l(0,t)}return s.$$set=i=>{"value"in i&&l(0,t=i.value),"style"in i&&l(4,n=i.style),"label"in i&&l(1,u=i.label),"disabled"in i&&l(2,c=i.disabled),"show_label"in i&&l(3,_=i.show_label)},s.$$.update=()=>{s.$$.dirty&1,s.$$.dirty&1&&f(t)},[t,u,c,_,n,a]}class X extends q{constructor(e){super(),D(this,e,W,Q,E,{value:0,style:4,label:1,disabled:2,show_label:3})}}function Y(s){let e,l,t,n,u;const c=[s[6]];let _={};for(let a=0;aG(t,"value",r)),t.$on("change",s[9]),t.$on("submit",s[10]),{c(){h(e.$$.fragment),l=F(),h(t.$$.fragment)},m(a,i){m(e,a,i),C(a,l,i),m(t,a,i),u=!0},p(a,i){const w=i&64?H(c,[J(a[6])]):{};e.$set(w);const b={};i&32&&(b.style=a[5]),i&2&&(b.label=a[1]),i&16&&(b.show_label=a[4]),i&128&&(b.disabled=a[7]==="static"),!n&&i&1&&(n=!0,b.value=a[0],M(()=>n=!1)),t.$set(b)},i(a){u||(g(e.$$.fragment,a),g(t.$$.fragment,a),u=!0)},o(a){k(e.$$.fragment,a),k(t.$$.fragment,a),u=!1},d(a){v(e,a),a&&P(l),v(t,a)}}}function Z(s){let e,l;return e=new U({props:{visible:s[3],elem_id:s[2],disable:typeof s[5].container=="boolean"&&!s[5].container,$$slots:{default:[Y]},$$scope:{ctx:s}}}),{c(){h(e.$$.fragment)},m(t,n){m(e,t,n),l=!0},p(t,[n]){const u={};n&8&&(u.visible=t[3]),n&4&&(u.elem_id=t[2]),n&32&&(u.disable=typeof t[5].container=="boolean"&&!t[5].container),n&2291&&(u.$$scope={dirty:n,ctx:t}),e.$set(u)},i(t){l||(g(e.$$.fragment,t),l=!0)},o(t){k(e.$$.fragment,t),l=!1},d(t){v(e,t)}}}function p(s,e,l){let{label:t="ColorPicker"}=e,{elem_id:n=""}=e,{visible:u=!0}=e,{value:c}=e,{show_label:_}=e,{style:r={}}=e,{loading_status:f}=e,{mode:a}=e;function i(o){c=o,l(0,c)}function w(o){B.call(this,s,o)}function b(o){B.call(this,s,o)}return s.$$set=o=>{"label"in o&&l(1,t=o.label),"elem_id"in o&&l(2,n=o.elem_id),"visible"in o&&l(3,u=o.visible),"value"in o&&l(0,c=o.value),"show_label"in o&&l(4,_=o.show_label),"style"in o&&l(5,r=o.style),"loading_status"in o&&l(6,f=o.loading_status),"mode"in o&&l(7,a=o.mode)},[c,t,n,u,_,r,f,a,i,w,b]}class x extends q{constructor(e){super(),D(this,e,p,Z,E,{label:1,elem_id:2,visible:3,value:0,show_label:4,style:5,loading_status:6,mode:7})}get label(){return this.$$.ctx[1]}set label(e){this.$$set({label:e}),d()}get elem_id(){return this.$$.ctx[2]}set elem_id(e){this.$$set({elem_id:e}),d()}get visible(){return this.$$.ctx[3]}set visible(e){this.$$set({visible:e}),d()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),d()}get show_label(){return this.$$.ctx[4]}set show_label(e){this.$$set({show_label:e}),d()}get style(){return this.$$.ctx[5]}set style(e){this.$$set({style:e}),d()}get loading_status(){return this.$$.ctx[6]}set loading_status(e){this.$$set({loading_status:e}),d()}get mode(){return this.$$.ctx[7]}set mode(e){this.$$set({mode:e}),d()}}var ee=x;const te=["static","dynamic"],le=s=>({type:"string",description:"hex color code",example_data:s.value??"#000000"});export{ee as Component,le as document,te as modes}; -//# sourceMappingURL=index.2035cf67.js.map diff --git a/spaces/ICML2022/OFA/fairseq/examples/conv_seq2seq/README.md b/spaces/ICML2022/OFA/fairseq/examples/conv_seq2seq/README.md deleted file mode 100644 index 95fe7e7909a77ee0e50fe31d4b8be38daa8f3be7..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/conv_seq2seq/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Convolutional Sequence to Sequence Learning (Gehring et al., 2017) - -## Pre-trained models - -Description | Dataset | Model | Test set(s) ----|---|---|--- -Convolutional
        ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2) | newstest2014:
        [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2)
        newstest2012/2013:
        [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.ntst1213.tar.bz2) -Convolutional
        ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2) | newstest2014:
        [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-de.newstest2014.tar.bz2) -Convolutional
        ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT17 English-German](http://statmt.org/wmt17/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2) | newstest2014:
        [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.v2.en-de.newstest2014.tar.bz2) - -## Example usage - -See the [translation README](../translation/README.md) for instructions on reproducing results for WMT'14 En-De and -WMT'14 En-Fr using the `fconv_wmt_en_de` and `fconv_wmt_en_fr` model architectures. - -## Citation - -```bibtex -@inproceedings{gehring2017convs2s, - title = {Convolutional Sequence to Sequence Learning}, - author = {Gehring, Jonas, and Auli, Michael and Grangier, David and Yarats, Denis and Dauphin, Yann N}, - booktitle = {Proc. of ICML}, - year = 2017, -} -``` diff --git a/spaces/ICML2022/OFA/fairseq/examples/translation_moe/score.py b/spaces/ICML2022/OFA/fairseq/examples/translation_moe/score.py deleted file mode 100644 index 9a529a985019710ea202cb6bf28ae071c0ce4135..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/translation_moe/score.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of -candidate hypotheses. - -See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" -(Shen et al., 2019) `_. -""" - -import argparse -import random -import sys -from itertools import chain - -import numpy as np -from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu - - -def main(): - parser = argparse.ArgumentParser(sys.argv[0]) - parser.add_argument( - "--sys", nargs="*", default="", metavar="FILE", help="path to system output" - ) - parser.add_argument("--ref", default="", metavar="FILE", help="path to references") - parser.add_argument( - "--output", - default="", - metavar="FILE", - help="print outputs into a pretty format", - ) - args = parser.parse_args() - - if args.sys: - src, tgt, hypos, log_probs = load_sys(args.sys) - print("pairwise BLEU: %.2f" % pairwise(hypos)) - if args.output: - merge(src, tgt, hypos, log_probs, args.output) - - if args.ref: - _, _, refs = load_ref(args.ref) - if args.sys: - multi_ref(refs, hypos) - else: - intra_ref(refs) - - -def dictolist(d): - a = sorted(d.items(), key=lambda i: i[0]) - return [i[1] for i in a] - - -def load_sys(paths): - src, tgt, hypos, log_probs = {}, {}, {}, {} - for path in paths: - with open(path) as f: - for line in f: - line = line.rstrip() - # S: source - # T: target - # D: detokenized system output - if line.startswith(("S-", "T-", "D-")): - i = int(line[line.find("-") + 1 : line.find("\t")]) - if line.startswith("S-"): - src[i] = line.split("\t")[1] - if line.startswith("T-"): - tgt[i] = line.split("\t")[1] - if line.startswith("D-"): - if i not in hypos: - hypos[i] = [] - log_probs[i] = [] - hypos[i].append(line.split("\t")[2]) - log_probs[i].append(float(line.split("\t")[1])) - return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs) - - -def load_ref(path): - with open(path) as f: - lines = f.readlines() - src, tgt, refs = [], [], [] - i = 0 - while i < len(lines): - if lines[i].startswith("S-"): - src.append(lines[i].split("\t")[1].rstrip()) - i += 1 - elif lines[i].startswith("T-"): - tgt.append(lines[i].split("\t")[1].rstrip()) - i += 1 - else: - a = [] - while i < len(lines) and lines[i].startswith("R"): - a.append(lines[i].split("\t")[1].rstrip()) - i += 1 - refs.append(a) - return src, tgt, refs - - -def merge(src, tgt, hypos, log_probs, path): - with open(path, "w") as f: - for s, t, hs, lps in zip(src, tgt, hypos, log_probs): - f.write(s + "\n") - f.write(t + "\n") - f.write("\n") - for h, lp in zip(hs, lps): - f.write("\t%f\t%s\n" % (lp, h.strip())) - f.write("------------------------------------------------------\n") - - -def corpus_bleu(sys_stream, ref_streams): - bleu = _corpus_bleu(sys_stream, ref_streams, tokenize="none") - return bleu.score - - -def sentence_bleu(hypothesis, reference): - bleu = _corpus_bleu(hypothesis, reference) - for i in range(1, 4): - bleu.counts[i] += 1 - bleu.totals[i] += 1 - bleu = compute_bleu( - bleu.counts, - bleu.totals, - bleu.sys_len, - bleu.ref_len, - smooth_method="exp", - ) - return bleu.score - - -def pairwise(sents): - _ref, _hypo = [], [] - for s in sents: - for i in range(len(s)): - for j in range(len(s)): - if i != j: - _ref.append(s[i]) - _hypo.append(s[j]) - return corpus_bleu(_hypo, [_ref]) - - -def multi_ref(refs, hypos): - _ref, _hypo = [], [] - ref_cnt = 0 - assert len(refs) == len(hypos) - - # count number of refs covered - for rs, hs in zip(refs, hypos): - a = set() - for h in hs: - s = [sentence_bleu(h, r) for r in rs] - j = np.argmax(s) - _ref.append(rs[j]) - _hypo.append(h) - best = [k for k in range(len(rs)) if s[k] == s[j]] - a.add(random.choice(best)) - ref_cnt += len(a) - print("#refs covered: %.2f" % (ref_cnt / len(refs))) - - # transpose refs and hypos - refs = list(zip(*refs)) - hypos = list(zip(*hypos)) - - # compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref) - k = len(hypos) - m = len(refs) - flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)] - duplicated_refs = [[ref for ref in refs_i for _ in range(k)] for refs_i in refs] - loo_bleus = [] - for held_out_ref in range(m): - remaining_refs = ( - duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref + 1 :] - ) - assert len(remaining_refs) == m - 1 - loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs)) - print("average multi-reference BLEU (leave-one-out): %.2f" % np.mean(loo_bleus)) - - -def intra_ref(refs): - print("ref pairwise BLEU: %.2f" % pairwise(refs)) - refs = list(zip(*refs)) - m = len(refs) - concat_h = [] - concat_rest = [[] for j in range(m - 1)] - for i, h in enumerate(refs): - rest = refs[:i] + refs[i + 1 :] - concat_h.append(h) - for j in range(m - 1): - concat_rest[j].extend(rest[j]) - concat_h = list(chain.from_iterable(concat_h)) - bleu = corpus_bleu(concat_h, concat_rest) - print("multi-reference BLEU (leave-one-out): %.2f" % bleu) - - -if __name__ == "__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/dataclass/initialize.py b/spaces/ICML2022/OFA/fairseq/fairseq/dataclass/initialize.py deleted file mode 100644 index 8f6cbafb805b293611e2175721132078123b81d0..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/dataclass/initialize.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import logging -from hydra.core.config_store import ConfigStore -from fairseq.dataclass.configs import FairseqConfig -from omegaconf import DictConfig, OmegaConf - - -logger = logging.getLogger(__name__) - - -def hydra_init(cfg_name="config") -> None: - - cs = ConfigStore.instance() - cs.store(name=f"{cfg_name}", node=FairseqConfig) - - for k in FairseqConfig.__dataclass_fields__: - v = FairseqConfig.__dataclass_fields__[k].default - try: - cs.store(name=k, node=v) - except BaseException: - logger.error(f"{k} - {v}") - raise - - -def add_defaults(cfg: DictConfig) -> None: - """This function adds default values that are stored in dataclasses that hydra doesn't know about """ - - from fairseq.registry import REGISTRIES - from fairseq.tasks import TASK_DATACLASS_REGISTRY - from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY - from fairseq.dataclass.utils import merge_with_parent - from typing import Any - - OmegaConf.set_struct(cfg, False) - - for k, v in FairseqConfig.__dataclass_fields__.items(): - field_cfg = cfg.get(k) - if field_cfg is not None and v.type == Any: - dc = None - - if isinstance(field_cfg, str): - field_cfg = DictConfig({"_name": field_cfg}) - field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"] - - name = getattr(field_cfg, "_name", None) - - if k == "task": - dc = TASK_DATACLASS_REGISTRY.get(name) - elif k == "model": - name = ARCH_MODEL_NAME_REGISTRY.get(name, name) - dc = MODEL_DATACLASS_REGISTRY.get(name) - elif k in REGISTRIES: - dc = REGISTRIES[k]["dataclass_registry"].get(name) - - if dc is not None: - cfg[k] = merge_with_parent(dc, field_cfg) diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/edvr_model.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/edvr_model.py deleted file mode 100644 index 9bdbf7b94fe3f06c76fbf2a4941621f64e0003e7..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/edvr_model.py +++ /dev/null @@ -1,62 +0,0 @@ -from basicsr.utils import get_root_logger -from basicsr.utils.registry import MODEL_REGISTRY -from .video_base_model import VideoBaseModel - - -@MODEL_REGISTRY.register() -class EDVRModel(VideoBaseModel): - """EDVR Model. - - Paper: EDVR: Video Restoration with Enhanced Deformable Convolutional Networks. # noqa: E501 - """ - - def __init__(self, opt): - super(EDVRModel, self).__init__(opt) - if self.is_train: - self.train_tsa_iter = opt['train'].get('tsa_iter') - - def setup_optimizers(self): - train_opt = self.opt['train'] - dcn_lr_mul = train_opt.get('dcn_lr_mul', 1) - logger = get_root_logger() - logger.info(f'Multiple the learning rate for dcn with {dcn_lr_mul}.') - if dcn_lr_mul == 1: - optim_params = self.net_g.parameters() - else: # separate dcn params and normal params for different lr - normal_params = [] - dcn_params = [] - for name, param in self.net_g.named_parameters(): - if 'dcn' in name: - dcn_params.append(param) - else: - normal_params.append(param) - optim_params = [ - { # add normal params first - 'params': normal_params, - 'lr': train_opt['optim_g']['lr'] - }, - { - 'params': dcn_params, - 'lr': train_opt['optim_g']['lr'] * dcn_lr_mul - }, - ] - - optim_type = train_opt['optim_g'].pop('type') - self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g']) - self.optimizers.append(self.optimizer_g) - - def optimize_parameters(self, current_iter): - if self.train_tsa_iter: - if current_iter == 1: - logger = get_root_logger() - logger.info(f'Only train TSA module for {self.train_tsa_iter} iters.') - for name, param in self.net_g.named_parameters(): - if 'fusion' not in name: - param.requires_grad = False - elif current_iter == self.train_tsa_iter: - logger = get_root_logger() - logger.warning('Train all the parameters.') - for param in self.net_g.parameters(): - param.requires_grad = True - - super(EDVRModel, self).optimize_parameters(current_iter) diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/model.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/model.py deleted file mode 100644 index 8ae94c06bfb48f1cc189de8fcf1050d69c8993c3..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/model.py +++ /dev/null @@ -1,1103 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange - -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention - -from basicsr.archs.arch_util import default_init_weights, make_layer, pixel_unshuffle -from basicsr.archs.rrdbnet_arch import RRDB - -try: - import xformers - import xformers.ops - XFORMERS_IS_AVAILBLE = True -except: - XFORMERS_IS_AVAILBLE = False - -def calc_mean_std(feat, eps=1e-5): - """Calculate mean and std for adaptive_instance_normalization. - Args: - feat (Tensor): 4D tensor. - eps (float): A small value added to the variance to avoid - divide-by-zero. Default: 1e-5. - """ - size = feat.size() - assert len(size) == 4, 'The input feature should be 4D tensor.' - b, c = size[:2] - feat_var = feat.view(b, c, -1).var(dim=2) + eps - feat_std = feat_var.sqrt().view(b, c, 1, 1) - feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) - return feat_mean, feat_std - -def adaptive_instance_normalization(content_feat, style_feat): - """Adaptive instance normalization. - Adjust the reference features to have the similar color and illuminations - as those in the degradate features. - Args: - content_feat (Tensor): The reference feature. - style_feat (Tensor): The degradate features. - """ - size = content_feat.size() - style_mean, style_std = calc_mean_std(style_feat) - content_mean, content_std = calc_mean_std(content_feat) - normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) - return normalized_feat * style_std.expand(size) + style_mean.expand(size) - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class LinAttnBlock(LinearAttention): - """to match AttnBlock usage""" - def __init__(self, in_channels): - super().__init__(dim=in_channels, heads=1, dim_head=in_channels) - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - -class MemoryEfficientAttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.attention_op: Optional[Any] = None - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q, k, v = map( - lambda t:t.reshape(b, t.shape[1], t.shape[2]*t.shape[3], 1) - .squeeze(3) - .permute(0,2,1) - .contiguous(), - (q, k, v), - ) - - # actually compute the attention, what we cannot get enough of - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, scale=(int(c)**(-0.5)), op=self.attention_op) - - h_ = ( - out.permute(0,2,1) - .unsqueeze(3) - .reshape(b, c, h, w) - ) - - h_ = self.proj_out(h_) - - return x+h_ - - -def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - if XFORMERS_IS_AVAILBLE: - return MemoryEfficientAttnBlock(in_channels) - else: - return AttnBlock(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - return LinAttnBlock(in_channels) - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, return_fea=False): - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - fea_list = [] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if return_fea: - if i_level==1 or i_level==2: - fea_list.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - - if return_fea: - return h, fea_list - - return h - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - -class Decoder_Mix(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", num_fuse_block=2, fusion_w=1.0, **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - self.fusion_w = fusion_w - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - - if i_level != self.num_resolutions-1: - if i_level != 0: - fuse_layer = Fuse_sft_block_RRDB(in_ch=block_out, out_ch=block_out, num_block=num_fuse_block) - setattr(self, 'fusion_layer_{}'.format(i_level), fuse_layer) - - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z, enc_fea): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - - if i_level != self.num_resolutions-1 and i_level != 0: - cur_fuse_layer = getattr(self, 'fusion_layer_{}'.format(i_level)) - h = cur_fuse_layer(enc_fea[i_level-1], h, self.fusion_w) - - if i_level != 0: - h = self.up[i_level].upsample(h) - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - -class ResBlock(nn.Module): - def __init__(self, in_channels, out_channels=None): - super(ResBlock, self).__init__() - self.in_channels = in_channels - self.out_channels = in_channels if out_channels is None else out_channels - self.norm1 = Normalize(in_channels) - self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.norm2 = Normalize(out_channels) - self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) - if self.in_channels != self.out_channels: - self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, x_in): - x = x_in - x = self.norm1(x) - x = nonlinearity(x) - x = self.conv1(x) - x = self.norm2(x) - x = nonlinearity(x) - x = self.conv2(x) - if self.in_channels != self.out_channels: - x_in = self.conv_out(x_in) - - return x + x_in - -class Fuse_sft_block_RRDB(nn.Module): - def __init__(self, in_ch, out_ch, num_block=1, num_grow_ch=32): - super().__init__() - self.encode_enc_1 = ResBlock(2*in_ch, in_ch) - self.encode_enc_2 = make_layer(RRDB, num_block, num_feat=in_ch, num_grow_ch=num_grow_ch) - self.encode_enc_3 = ResBlock(in_ch, out_ch) - - def forward(self, enc_feat, dec_feat, w=1): - enc_feat = self.encode_enc_1(torch.cat([enc_feat, dec_feat], dim=1)) - enc_feat = self.encode_enc_2(enc_feat) - enc_feat = self.encode_enc_3(enc_feat) - residual = w * enc_feat - out = dec_feat + residual - return out - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - if XFORMERS_IS_AVAILBLE: - self.attn = MemoryEfficientAttnBlock(mid_channels) - else: - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) - x = self.attn(x) - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) - - def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: - return x - else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) - return x - -class FirstStagePostProcessor(nn.Module): - - def __init__(self, ch_mult:list, in_channels, - pretrained_model:nn.Module=None, - reshape=False, - n_channels=None, - dropout=0., - pretrained_config=None): - super().__init__() - if pretrained_config is None: - assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.pretrained_model = pretrained_model - else: - assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.instantiate_pretrained(pretrained_config) - - self.do_reshape = reshape - - if n_channels is None: - n_channels = self.pretrained_model.encoder.ch - - self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) - self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, - stride=1,padding=1) - - blocks = [] - downs = [] - ch_in = n_channels - for m in ch_mult: - blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) - ch_in = m * n_channels - downs.append(Downsample(ch_in, with_conv=False)) - - self.model = nn.ModuleList(blocks) - self.downsampler = nn.ModuleList(downs) - - - def instantiate_pretrained(self, config): - model = instantiate_from_config(config) - self.pretrained_model = model.eval() - # self.pretrained_model.train = False - for param in self.pretrained_model.parameters(): - param.requires_grad = False - - - @torch.no_grad() - def encode_with_pretrained(self,x): - c = self.pretrained_model.encode(x) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - return c - - def forward(self,x): - z_fs = self.encode_with_pretrained(x) - z = self.proj_norm(z_fs) - z = self.proj(z) - z = nonlinearity(z) - - for submodel, downmodel in zip(self.model,self.downsampler): - z = submodel(z,temb=None) - z = downmodel(z) - - if self.do_reshape: - z = rearrange(z,'b c h w -> b (h w) c') - return z diff --git a/spaces/Illumotion/Koboldcpp/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py b/spaces/Illumotion/Koboldcpp/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py deleted file mode 100644 index 351e7bc2d2a95f2117b574b3d8002c44dfcd4ba3..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/env python3 -# train-text-from-scratch checkpoint --> gguf conversion - -import argparse -import os -import struct -import sys -import numpy as np -from pathlib import Path - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / '..' / '..' / 'gguf-py' / 'gguf')) -import gguf - -# gguf constants -LLM_KV_OPTIMIZER_TYPE = "optimizer.type" -LLM_KV_OPTIMIZER_TYPE_ADAM = "adam" -LLM_KV_OPTIMIZER_TYPE_LBFGS = "lbfgs" -LLM_KV_OPTIMIZER_FILE_VERSION = "optimizer.file_version" -LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT = "optimizer.convergence_past_count" -LLM_KV_OPTIMIZER_PARAMETER_COUNT = "optimizer.parameter_count" -LLM_KV_OPTIMIZER_ITERATION_COUNT = "optimizer.iteration_count" -LLM_KV_OPTIMIZER_JUST_INITIALIZED = "optimizer.just_initialized" -LLM_KV_OPTIMIZER_ADAM_BEST_LOSS = "optimizer.adam.best_loss" -LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS = "optimizer.adam.previous_loss" -LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT = "optimizer.adam.no_improvement_count" -LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT = "optimizer.lbfgs.approx_hessian_count" -LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS = "optimizer.lbfgs.best_loss" -LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP = "optimizer.lbfgs.line_search_step" -LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J = "optimizer.lbfgs.line_search_j" -LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K = "optimizer.lbfgs.line_search_k" -LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END = "optimizer.lbfgs.line_search_end" -LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT = "optimizer.lbfgs.no_improvement_count" - -LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS = "optimizer.adam.first_moments" -LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS = "optimizer.adam.second_moments" -LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES = "optimizer.adam.past_loss_values" - -LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS = "optimizer.lbfgs.current_parameters" -LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS = "optimizer.lbfgs.previous_parameters" -LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS = "optimizer.lbfgs.current_gradients" -LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS = "optimizer.lbfgs.previous_gradients" -LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION = "optimizer.lbfgs.search_direction" -LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES = "optimizer.lbfgs.past_loss_values" -LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA = "optimizer.lbfgs.memory_alpha" -LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS = "optimizer.lbfgs.memory_ys" -LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S = "optimizer.lbfgs.memory_s" -LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y = "optimizer.lbfgs.memory_y" - -LLM_KV_TRAINING_TYPE_TRAIN_MODEL = "train_model" -LLM_KV_TRAINING_TYPE_FINETUNE_LORA = "finetune_lora" -LLM_KV_TRAINING_TYPE = "training.type" -LLM_KV_TRAINING_FILE_VERSION = "training.file_version" -LLM_KV_TRAINING_ITERATION_COUNT = "training.iteration_count" -LLM_KV_TRAINING_SAMPLE_COUNT = "training.sample_count" -LLM_KV_TRAINING_TOKEN_COUNT = "training.token_count" - -class Tensor: - def __init__(self, dtype='f', ne=None): - if ne is None: - ne = [] - self.dtype = dtype - self.ne = ne - self.nbytes = 0 - if self.dtype == 'f': - if len(self.ne) == 0: - self.nbytes = 0 - else: - self.nbytes = int(np.product(self.ne)) * 4 - else: - raise ValueError(f"Unhandled data type '{self.dtype}'") - - def load(self, data, offset): - nd = struct.unpack(' 0 else []) - - self.lbfgs_x = Tensor('f', [self.nx]) - self.lbfgs_xp = Tensor('f', [self.nx]) - self.lbfgs_g = Tensor('f', [self.nx]) - self.lbfgs_gp = Tensor('f', [self.nx]) - self.lbfgs_d = Tensor('f', [self.nx]) - self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else []) - self.lbfgs_lmal = Tensor('f', [self.lbfgs_m]) - self.lbfgs_lmys = Tensor('f', [self.lbfgs_m]) - self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m]) - self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m]) - - if self.type == 0: - # these tensors are stored, but we don't need their data - x = Tensor('f', [self.nx]) - g = Tensor('f', [self.nx]) - g2 = Tensor('f', [self.nx]) - mh = Tensor('f', [self.nx]) - vh = Tensor('f', [self.nx]) - - offset = x.load(data, offset) - offset = g.load(data, offset) - offset = g2.load(data, offset) - offset = self.adam_m.load(data, offset) - offset = self.adam_v.load(data, offset) - offset = mh.load(data, offset) - offset = vh.load(data, offset) - offset = self.adam_pf.load(data, offset) - - self.adam_fx_best = struct.unpack(' 0 else []) - - self.lbfgs_x = Tensor('f', [self.nx]) - self.lbfgs_xp = Tensor('f', [self.nx]) - self.lbfgs_g = Tensor('f', [self.nx]) - self.lbfgs_gp = Tensor('f', [self.nx]) - self.lbfgs_d = Tensor('f', [self.nx]) - self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else []) - self.lbfgs_lmal = Tensor('f', [self.lbfgs_m]) - self.lbfgs_lmys = Tensor('f', [self.lbfgs_m]) - self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m]) - self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m]) - - # forgot to save type in version 1: - # guess self.type from number of remaining bytes - size_type_0 = 12 + sum([t.max_storage_size() for t in - [self.adam_m, self.adam_v] - +([self.adam_pf] if (self.past > 0) else [])]) - size_type_1 = 24 + sum([t.max_storage_size() for t in - [self.lbfgs_x, self.lbfgs_xp, self.lbfgs_g, - self.lbfgs_gp, self.lbfgs_d, self.lbfgs_pf, - self.lbfgs_lmal, self.lbfgs_lmys, - self.lbfgs_lms, self.lbfgs_lmy] - +([self.lbfgs_pf] if (self.past > 0) else [])]) - # due to alignment padding the size might not by exact - # but the difference in size for both types is significant, - # so we can just use whichever is closest - remaining = len(data) - offset - if abs(remaining - size_type_0) < abs(remaining - size_type_1): - self.type = 0 - else: - self.type = 1 - - if self.type == 0: - offset = self.adam_m.load(data, offset) - offset = self.adam_v.load(data, offset) - offset = self.adam_pf.load(data,offset) - - self.adam_fx_best = struct.unpack(' 0: - self.adam_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES) - - elif self.type == 1: - gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_LBFGS) - gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT, self.lbfgs_m) - gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS, self.lbfgs_fx_best) - gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP, self.lbfgs_step) - gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J, self.lbfgs_j) - gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K, self.lbfgs_k) - gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END, self.lbfgs_end) - gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT, self.lbfgs_n_no_improvement) - - self.lbfgs_x.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS) - self.lbfgs_xp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS) - self.lbfgs_g.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS) - self.lbfgs_gp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS) - self.lbfgs_d.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION) - if self.past > 0: - self.lbfgs_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES) - self.lbfgs_lmal.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA) - self.lbfgs_lmys.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS) - self.lbfgs_lms.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S) - self.lbfgs_lmy.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y) - else: - raise ValueError('Unknown optimizer type') - -class ModelParams: - def __init__(self): - pass - - def load(self, data, offset): - self.n_vocab = struct.unpack('Made by Young Ho Shin

        -

        - Email | - Github | - Linkedin -

        \ No newline at end of file diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/utils/misc.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/utils/misc.py deleted file mode 100644 index 52e2c0343f972d5bd5c735c5cfbf8b28bca6dd55..0000000000000000000000000000000000000000 --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/facelib/utils/misc.py +++ /dev/null @@ -1,174 +0,0 @@ -import cv2 -import os -import os.path as osp -import numpy as np -from PIL import Image -import torch -from torch.hub import download_url_to_file, get_dir -from urllib.parse import urlparse -# from basicsr.utils.download_util import download_file_from_google_drive -# import gdown - - -ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - - -def download_pretrained_models(file_ids, save_path_root): - os.makedirs(save_path_root, exist_ok=True) - - for file_name, file_id in file_ids.items(): - file_url = 'https://drive.google.com/uc?id='+file_id - save_path = osp.abspath(osp.join(save_path_root, file_name)) - if osp.exists(save_path): - user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N\n') - if user_response.lower() == 'y': - print(f'Covering {file_name} to {save_path}') - # gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - elif user_response.lower() == 'n': - print(f'Skipping {file_name}') - else: - raise ValueError('Wrong input. Only accepts Y/N.') - else: - print(f'Downloading {file_name} to {save_path}') - # gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - - -def imwrite(img, file_path, params=None, auto_mkdir=True): - """Write image to file. - - Args: - img (ndarray): Image array to be written. - file_path (str): Image file path. - params (None or list): Same as opencv's :func:`imwrite` interface. - auto_mkdir (bool): If the parent folder of `file_path` does not exist, - whether to create it automatically. - - Returns: - bool: Successful or not. - """ - if auto_mkdir: - dir_name = os.path.abspath(os.path.dirname(file_path)) - os.makedirs(dir_name, exist_ok=True) - return cv2.imwrite(file_path, img, params) - - -def img2tensor(imgs, bgr2rgb=True, float32=True): - """Numpy array to tensor. - - Args: - imgs (list[ndarray] | ndarray): Input images. - bgr2rgb (bool): Whether to change bgr to rgb. - float32 (bool): Whether to change to float32. - - Returns: - list[tensor] | tensor: Tensor images. If returned results only have - one element, just return tensor. - """ - - def _totensor(img, bgr2rgb, float32): - if img.shape[2] == 3 and bgr2rgb: - if img.dtype == 'float64': - img = img.astype('float32') - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = torch.from_numpy(img.transpose(2, 0, 1)) - if float32: - img = img.float() - return img - - if isinstance(imgs, list): - return [_totensor(img, bgr2rgb, float32) for img in imgs] - else: - return _totensor(imgs, bgr2rgb, float32) - - -def load_file_from_url(url, model_dir=None, progress=True, file_name=None): - """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py - """ - if model_dir is None: - hub_dir = get_dir() - model_dir = os.path.join(hub_dir, 'checkpoints') - - os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True) - - parts = urlparse(url) - filename = os.path.basename(parts.path) - if file_name is not None: - filename = file_name - cached_file = os.path.abspath(os.path.join(ROOT_DIR, model_dir, filename)) - if not os.path.exists(cached_file): - print(f'Downloading: "{url}" to {cached_file}\n') - download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) - return cached_file - - -def scandir(dir_path, suffix=None, recursive=False, full_path=False): - """Scan a directory to find the interested files. - Args: - dir_path (str): Path of the directory. - suffix (str | tuple(str), optional): File suffix that we are - interested in. Default: None. - recursive (bool, optional): If set to True, recursively scan the - directory. Default: False. - full_path (bool, optional): If set to True, include the dir_path. - Default: False. - Returns: - A generator for all the interested files with relative paths. - """ - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('"suffix" must be a string or tuple of strings') - - root = dir_path - - def _scandir(dir_path, suffix, recursive): - for entry in os.scandir(dir_path): - if not entry.name.startswith('.') and entry.is_file(): - if full_path: - return_path = entry.path - else: - return_path = osp.relpath(entry.path, root) - - if suffix is None: - yield return_path - elif return_path.endswith(suffix): - yield return_path - else: - if recursive: - yield from _scandir(entry.path, suffix=suffix, recursive=recursive) - else: - continue - - return _scandir(dir_path, suffix=suffix, recursive=recursive) - - -def is_gray(img, threshold=10): - img = Image.fromarray(img) - if len(img.getbands()) == 1: - return True - img1 = np.asarray(img.getchannel(channel=0), dtype=np.int16) - img2 = np.asarray(img.getchannel(channel=1), dtype=np.int16) - img3 = np.asarray(img.getchannel(channel=2), dtype=np.int16) - diff1 = (img1 - img2).var() - diff2 = (img2 - img3).var() - diff3 = (img3 - img1).var() - diff_sum = (diff1 + diff2 + diff3) / 3.0 - if diff_sum <= threshold: - return True - else: - return False - -def rgb2gray(img, out_channel=3): - r, g, b = img[:,:,0], img[:,:,1], img[:,:,2] - gray = 0.2989 * r + 0.5870 * g + 0.1140 * b - if out_channel == 3: - gray = gray[:,:,np.newaxis].repeat(3, axis=2) - return gray - -def bgr2gray(img, out_channel=3): - b, g, r = img[:,:,0], img[:,:,1], img[:,:,2] - gray = 0.2989 * r + 0.5870 * g + 0.1140 * b - if out_channel == 3: - gray = gray[:,:,np.newaxis].repeat(3, axis=2) - return gray diff --git a/spaces/Jeff2323/ai-comic-factory/src/app/layout.tsx b/spaces/Jeff2323/ai-comic-factory/src/app/layout.tsx deleted file mode 100644 index 5c483885eda7b5d2003cc6052f014f0474b9749a..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/app/layout.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import './globals.css' -import type { Metadata } from 'next' -import { Inter } from 'next/font/google' - -const inter = Inter({ subsets: ['latin'] }) - -export const metadata: Metadata = { - title: 'AI Comic Factory: generate your own comics! Powered by Hugging Face 🤗', - description: 'Generate comic panels using a LLM + SDXL. Powered by Hugging Face 🤗', -} - -export default function RootLayout({ - children, -}: { - children: React.ReactNode -}) { - return ( - - - {children} - - - ) -} diff --git a/spaces/JosephTK/review-sentiment-analyzer/README.md b/spaces/JosephTK/review-sentiment-analyzer/README.md deleted file mode 100644 index 953e6613a2057f107efc0cfe7b062b6ef974fc3f..0000000000000000000000000000000000000000 --- a/spaces/JosephTK/review-sentiment-analyzer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Review Sentiment Analyzer -emoji: ⚡ -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_panoptic_metric.py b/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_panoptic_metric.py deleted file mode 100644 index 475e51dbc1979289dff8462bd7178521b6267fdc..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_panoptic_metric.py +++ /dev/null @@ -1,612 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import itertools -import os.path as osp -import tempfile -from typing import Dict, Optional, Sequence, Tuple, Union - -import mmcv -import numpy as np -from mmengine.evaluator import BaseMetric -from mmengine.fileio import dump, get_local_path, load -from mmengine.logging import MMLogger, print_log -from terminaltables import AsciiTable - -from mmdet.datasets.api_wrappers import COCOPanoptic -from mmdet.registry import METRICS -from ..functional import (INSTANCE_OFFSET, pq_compute_multi_core, - pq_compute_single_core) - -try: - import panopticapi - from panopticapi.evaluation import VOID, PQStat - from panopticapi.utils import id2rgb, rgb2id -except ImportError: - panopticapi = None - id2rgb = None - rgb2id = None - VOID = None - PQStat = None - - -@METRICS.register_module() -class CocoPanopticMetric(BaseMetric): - """COCO panoptic segmentation evaluation metric. - - Evaluate PQ, SQ RQ for panoptic segmentation tasks. Please refer to - https://cocodataset.org/#panoptic-eval for more details. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None. - seg_prefix (str, optional): Path to the directory which contains the - coco panoptic segmentation mask. It should be specified when - evaluate. Defaults to None. - classwise (bool): Whether to evaluate the metric class-wise. - Defaults to False. - outfile_prefix (str, optional): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. - It should be specified when format_only is True. Defaults to None. - format_only (bool): Format the output results without perform - evaluation. It is useful when you want to format the result - to a specific format and submit it to the test server. - Defaults to False. - nproc (int): Number of processes for panoptic quality computing. - Defaults to 32. When ``nproc`` exceeds the number of cpu cores, - the number of cpu cores is used. - file_client_args (dict, optional): Arguments to instantiate the - corresponding backend in mmdet <= 3.0.0rc6. Defaults to None. - backend_args (dict, optional): Arguments to instantiate the - corresponding backend. Defaults to None. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be 'cpu' or - 'gpu'. Defaults to 'cpu'. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, self.default_prefix - will be used instead. Defaults to None. - """ - default_prefix: Optional[str] = 'coco_panoptic' - - def __init__(self, - ann_file: Optional[str] = None, - seg_prefix: Optional[str] = None, - classwise: bool = False, - format_only: bool = False, - outfile_prefix: Optional[str] = None, - nproc: int = 32, - file_client_args: dict = None, - backend_args: dict = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - if panopticapi is None: - raise RuntimeError( - 'panopticapi is not installed, please install it by: ' - 'pip install git+https://github.com/cocodataset/' - 'panopticapi.git.') - - super().__init__(collect_device=collect_device, prefix=prefix) - self.classwise = classwise - self.format_only = format_only - if self.format_only: - assert outfile_prefix is not None, 'outfile_prefix must be not' - 'None when format_only is True, otherwise the result files will' - 'be saved to a temp directory which will be cleaned up at the end.' - - self.tmp_dir = None - # outfile_prefix should be a prefix of a path which points to a shared - # storage when train or test with multi nodes. - self.outfile_prefix = outfile_prefix - if outfile_prefix is None: - self.tmp_dir = tempfile.TemporaryDirectory() - self.outfile_prefix = osp.join(self.tmp_dir.name, 'results') - # the directory to save predicted panoptic segmentation mask - self.seg_out_dir = f'{self.outfile_prefix}.panoptic' - self.nproc = nproc - self.seg_prefix = seg_prefix - - self.cat_ids = None - self.cat2label = None - - self.backend_args = backend_args - if file_client_args is not None: - raise RuntimeError( - 'The `file_client_args` is deprecated, ' - 'please use `backend_args` instead, please refer to' - 'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501 - ) - - if ann_file: - with get_local_path( - ann_file, backend_args=self.backend_args) as local_path: - self._coco_api = COCOPanoptic(local_path) - self.categories = self._coco_api.cats - else: - self._coco_api = None - self.categories = None - - def __del__(self) -> None: - """Clean up.""" - if self.tmp_dir is not None: - self.tmp_dir.cleanup() - - def gt_to_coco_json(self, gt_dicts: Sequence[dict], - outfile_prefix: str) -> Tuple[str, str]: - """Convert ground truth to coco panoptic segmentation format json file. - - Args: - gt_dicts (Sequence[dict]): Ground truth of the dataset. - outfile_prefix (str): The filename prefix of the json file. If the - prefix is "somepath/xxx", the json file will be named - "somepath/xxx.gt.json". - - Returns: - Tuple[str, str]: The filename of the json file and the name of the\ - directory which contains panoptic segmentation masks. - """ - assert len(gt_dicts) > 0, 'gt_dicts is empty.' - gt_folder = osp.dirname(gt_dicts[0]['seg_map_path']) - converted_json_path = f'{outfile_prefix}.gt.json' - - categories = [] - for id, name in enumerate(self.dataset_meta['classes']): - isthing = 1 if name in self.dataset_meta['thing_classes'] else 0 - categories.append({'id': id, 'name': name, 'isthing': isthing}) - - image_infos = [] - annotations = [] - for gt_dict in gt_dicts: - img_id = gt_dict['image_id'] - image_info = { - 'id': img_id, - 'width': gt_dict['width'], - 'height': gt_dict['height'], - 'file_name': osp.split(gt_dict['seg_map_path'])[-1] - } - image_infos.append(image_info) - - pan_png = mmcv.imread(gt_dict['seg_map_path']).squeeze() - pan_png = pan_png[:, :, ::-1] - pan_png = rgb2id(pan_png) - segments_info = [] - for segment_info in gt_dict['segments_info']: - id = segment_info['id'] - label = segment_info['category'] - mask = pan_png == id - isthing = categories[label]['isthing'] - if isthing: - iscrowd = 1 if not segment_info['is_thing'] else 0 - else: - iscrowd = 0 - - new_segment_info = { - 'id': id, - 'category_id': label, - 'isthing': isthing, - 'iscrowd': iscrowd, - 'area': mask.sum() - } - segments_info.append(new_segment_info) - - segm_file = image_info['file_name'].replace('jpg', 'png') - annotation = dict( - image_id=img_id, - segments_info=segments_info, - file_name=segm_file) - annotations.append(annotation) - pan_png = id2rgb(pan_png) - - info = dict( - date_created=str(datetime.datetime.now()), - description='Coco json file converted by mmdet CocoPanopticMetric.' - ) - coco_json = dict( - info=info, - images=image_infos, - categories=categories, - licenses=None, - ) - if len(annotations) > 0: - coco_json['annotations'] = annotations - dump(coco_json, converted_json_path) - return converted_json_path, gt_folder - - def result2json(self, results: Sequence[dict], - outfile_prefix: str) -> Tuple[str, str]: - """Dump the panoptic results to a COCO style json file and a directory. - - Args: - results (Sequence[dict]): Testing results of the dataset. - outfile_prefix (str): The filename prefix of the json files and the - directory. - - Returns: - Tuple[str, str]: The json file and the directory which contains \ - panoptic segmentation masks. The filename of the json is - "somepath/xxx.panoptic.json" and name of the directory is - "somepath/xxx.panoptic". - """ - label2cat = dict((v, k) for (k, v) in self.cat2label.items()) - pred_annotations = [] - for idx in range(len(results)): - result = results[idx] - for segment_info in result['segments_info']: - sem_label = segment_info['category_id'] - # convert sem_label to json label - cat_id = label2cat[sem_label] - segment_info['category_id'] = label2cat[sem_label] - is_thing = self.categories[cat_id]['isthing'] - segment_info['isthing'] = is_thing - pred_annotations.append(result) - pan_json_results = dict(annotations=pred_annotations) - json_filename = f'{outfile_prefix}.panoptic.json' - dump(pan_json_results, json_filename) - return json_filename, ( - self.seg_out_dir - if self.tmp_dir is None else tempfile.gettempdir()) - - def _parse_predictions(self, - pred: dict, - img_id: int, - segm_file: str, - label2cat=None) -> dict: - """Parse panoptic segmentation predictions. - - Args: - pred (dict): Panoptic segmentation predictions. - img_id (int): Image id. - segm_file (str): Segmentation file name. - label2cat (dict): Mapping from label to category id. - Defaults to None. - - Returns: - dict: Parsed predictions. - """ - result = dict() - result['img_id'] = img_id - # shape (1, H, W) -> (H, W) - pan = pred['pred_panoptic_seg']['sem_seg'].cpu().numpy()[0] - pan_labels = np.unique(pan) - segments_info = [] - for pan_label in pan_labels: - sem_label = pan_label % INSTANCE_OFFSET - # We reserve the length of dataset_meta['classes'] for VOID label - if sem_label == len(self.dataset_meta['classes']): - continue - mask = pan == pan_label - area = mask.sum() - segments_info.append({ - 'id': - int(pan_label), - # when ann_file provided, sem_label should be cat_id, otherwise - # sem_label should be a continuous id, not the cat_id - # defined in dataset - 'category_id': - label2cat[sem_label] if label2cat else sem_label, - 'area': - int(area) - }) - # evaluation script uses 0 for VOID label. - pan[pan % INSTANCE_OFFSET == len(self.dataset_meta['classes'])] = VOID - pan = id2rgb(pan).astype(np.uint8) - mmcv.imwrite(pan[:, :, ::-1], osp.join(self.seg_out_dir, segm_file)) - result = { - 'image_id': img_id, - 'segments_info': segments_info, - 'file_name': segm_file - } - - return result - - def _compute_batch_pq_stats(self, data_samples: Sequence[dict]): - """Process gts and predictions when ``outfile_prefix`` is not set, gts - are from dataset or a json file which is defined by ``ann_file``. - - Intermediate results, ``pq_stats``, are computed here and put into - ``self.results``. - """ - if self._coco_api is None: - categories = dict() - for id, name in enumerate(self.dataset_meta['classes']): - isthing = 1 if name in self.dataset_meta['thing_classes']\ - else 0 - categories[id] = {'id': id, 'name': name, 'isthing': isthing} - label2cat = None - else: - categories = self.categories - cat_ids = self._coco_api.get_cat_ids( - cat_names=self.dataset_meta['classes']) - label2cat = {i: cat_id for i, cat_id in enumerate(cat_ids)} - - for data_sample in data_samples: - # parse pred - img_id = data_sample['img_id'] - segm_file = osp.basename(data_sample['img_path']).replace( - 'jpg', 'png') - result = self._parse_predictions( - pred=data_sample, - img_id=img_id, - segm_file=segm_file, - label2cat=label2cat) - - # parse gt - gt = dict() - gt['image_id'] = img_id - gt['width'] = data_sample['ori_shape'][1] - gt['height'] = data_sample['ori_shape'][0] - gt['file_name'] = segm_file - - if self._coco_api is None: - # get segments_info from data_sample - seg_map_path = osp.join(self.seg_prefix, segm_file) - pan_png = mmcv.imread(seg_map_path).squeeze() - pan_png = pan_png[:, :, ::-1] - pan_png = rgb2id(pan_png) - segments_info = [] - - for segment_info in data_sample['segments_info']: - id = segment_info['id'] - label = segment_info['category'] - mask = pan_png == id - isthing = categories[label]['isthing'] - if isthing: - iscrowd = 1 if not segment_info['is_thing'] else 0 - else: - iscrowd = 0 - - new_segment_info = { - 'id': id, - 'category_id': label, - 'isthing': isthing, - 'iscrowd': iscrowd, - 'area': mask.sum() - } - segments_info.append(new_segment_info) - else: - # get segments_info from annotation file - segments_info = self._coco_api.imgToAnns[img_id] - - gt['segments_info'] = segments_info - - pq_stats = pq_compute_single_core( - proc_id=0, - annotation_set=[(gt, result)], - gt_folder=self.seg_prefix, - pred_folder=self.seg_out_dir, - categories=categories, - backend_args=self.backend_args) - - self.results.append(pq_stats) - - def _process_gt_and_predictions(self, data_samples: Sequence[dict]): - """Process gts and predictions when ``outfile_prefix`` is set. - - The predictions will be saved to directory specified by - ``outfile_predfix``. The matched pair (gt, result) will be put into - ``self.results``. - """ - for data_sample in data_samples: - # parse pred - img_id = data_sample['img_id'] - segm_file = osp.basename(data_sample['img_path']).replace( - 'jpg', 'png') - result = self._parse_predictions( - pred=data_sample, img_id=img_id, segm_file=segm_file) - - # parse gt - gt = dict() - gt['image_id'] = img_id - gt['width'] = data_sample['ori_shape'][1] - gt['height'] = data_sample['ori_shape'][0] - - if self._coco_api is None: - # get segments_info from dataset - gt['segments_info'] = data_sample['segments_info'] - gt['seg_map_path'] = data_sample['seg_map_path'] - - self.results.append((gt, result)) - - # TODO: data_batch is no longer needed, consider adjusting the - # parameter position - def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (dict): A batch of data from the dataloader. - data_samples (Sequence[dict]): A batch of data samples that - contain annotations and predictions. - """ - # If ``self.tmp_dir`` is none, it will save gt and predictions to - # self.results, otherwise, it will compute pq_stats here. - if self.tmp_dir is None: - self._process_gt_and_predictions(data_samples) - else: - self._compute_batch_pq_stats(data_samples) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. There - are two cases: - - - When ``outfile_prefix`` is not provided, the elements in - results are pq_stats which can be summed directly to get PQ. - - When ``outfile_prefix`` is provided, the elements in - results are tuples like (gt, pred). - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - if self.tmp_dir is None: - # do evaluation after collect all the results - - # split gt and prediction list - gts, preds = zip(*results) - - if self._coco_api is None: - # use converted gt json file to initialize coco api - logger.info('Converting ground truth to coco format...') - coco_json_path, gt_folder = self.gt_to_coco_json( - gt_dicts=gts, outfile_prefix=self.outfile_prefix) - self._coco_api = COCOPanoptic(coco_json_path) - else: - gt_folder = self.seg_prefix - - self.cat_ids = self._coco_api.get_cat_ids( - cat_names=self.dataset_meta['classes']) - self.cat2label = { - cat_id: i - for i, cat_id in enumerate(self.cat_ids) - } - self.img_ids = self._coco_api.get_img_ids() - self.categories = self._coco_api.cats - - # convert predictions to coco format and dump to json file - json_filename, pred_folder = self.result2json( - results=preds, outfile_prefix=self.outfile_prefix) - - if self.format_only: - logger.info('results are saved in ' - f'{osp.dirname(self.outfile_prefix)}') - return dict() - - imgs = self._coco_api.imgs - gt_json = self._coco_api.img_ann_map - gt_json = [{ - 'image_id': k, - 'segments_info': v, - 'file_name': imgs[k]['segm_file'] - } for k, v in gt_json.items()] - pred_json = load(json_filename) - pred_json = dict( - (el['image_id'], el) for el in pred_json['annotations']) - - # match the gt_anns and pred_anns in the same image - matched_annotations_list = [] - for gt_ann in gt_json: - img_id = gt_ann['image_id'] - if img_id not in pred_json.keys(): - raise Exception('no prediction for the image' - ' with id: {}'.format(img_id)) - matched_annotations_list.append((gt_ann, pred_json[img_id])) - - pq_stat = pq_compute_multi_core( - matched_annotations_list, - gt_folder, - pred_folder, - self.categories, - backend_args=self.backend_args, - nproc=self.nproc) - - else: - # aggregate the results generated in process - if self._coco_api is None: - categories = dict() - for id, name in enumerate(self.dataset_meta['classes']): - isthing = 1 if name in self.dataset_meta[ - 'thing_classes'] else 0 - categories[id] = { - 'id': id, - 'name': name, - 'isthing': isthing - } - self.categories = categories - - pq_stat = PQStat() - for result in results: - pq_stat += result - - metrics = [('All', None), ('Things', True), ('Stuff', False)] - pq_results = {} - - for name, isthing in metrics: - pq_results[name], classwise_results = pq_stat.pq_average( - self.categories, isthing=isthing) - if name == 'All': - pq_results['classwise'] = classwise_results - - classwise_results = None - if self.classwise: - classwise_results = { - k: v - for k, v in zip(self.dataset_meta['classes'], - pq_results['classwise'].values()) - } - - print_panoptic_table(pq_results, classwise_results, logger=logger) - results = parse_pq_results(pq_results) - - return results - - -def parse_pq_results(pq_results: dict) -> dict: - """Parse the Panoptic Quality results. - - Args: - pq_results (dict): Panoptic Quality results. - - Returns: - dict: Panoptic Quality results parsed. - """ - result = dict() - result['PQ'] = 100 * pq_results['All']['pq'] - result['SQ'] = 100 * pq_results['All']['sq'] - result['RQ'] = 100 * pq_results['All']['rq'] - result['PQ_th'] = 100 * pq_results['Things']['pq'] - result['SQ_th'] = 100 * pq_results['Things']['sq'] - result['RQ_th'] = 100 * pq_results['Things']['rq'] - result['PQ_st'] = 100 * pq_results['Stuff']['pq'] - result['SQ_st'] = 100 * pq_results['Stuff']['sq'] - result['RQ_st'] = 100 * pq_results['Stuff']['rq'] - return result - - -def print_panoptic_table( - pq_results: dict, - classwise_results: Optional[dict] = None, - logger: Optional[Union['MMLogger', str]] = None) -> None: - """Print the panoptic evaluation results table. - - Args: - pq_results(dict): The Panoptic Quality results. - classwise_results(dict, optional): The classwise Panoptic Quality. - results. The keys are class names and the values are metrics. - Defaults to None. - logger (:obj:`MMLogger` | str, optional): Logger used for printing - related information during evaluation. Default: None. - """ - - headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] - data = [headers] - for name in ['All', 'Things', 'Stuff']: - numbers = [ - f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] - ] - row = [name] + numbers + [pq_results[name]['n']] - data.append(row) - table = AsciiTable(data) - print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) - - if classwise_results is not None: - class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' - for k in ['pq', 'sq', 'rq']) - for name, metrics in classwise_results.items()] - num_columns = min(8, len(class_metrics) * 4) - results_flatten = list(itertools.chain(*class_metrics)) - headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) - results_2d = itertools.zip_longest( - *[results_flatten[i::num_columns] for i in range(num_columns)]) - data = [headers] - data += [result for result in results_2d] - table = AsciiTable(data) - print_log( - 'Classwise Panoptic Evaluation Results:\n' + table.table, - logger=logger) diff --git a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/openimages_metric.py b/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/openimages_metric.py deleted file mode 100644 index d75c59e0e711c90bb1e5fbcc1529e95864e99e9a..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/openimages_metric.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -from collections import OrderedDict -from typing import List, Optional, Sequence, Union - -import numpy as np -from mmengine.evaluator import BaseMetric -from mmengine.logging import MMLogger, print_log - -from mmdet.registry import METRICS -from ..functional import eval_map - - -@METRICS.register_module() -class OpenImagesMetric(BaseMetric): - """OpenImages evaluation metric. - - Evaluate detection mAP for OpenImages. Please refer to - https://storage.googleapis.com/openimages/web/evaluation.html for more - details. - - Args: - iou_thrs (float or List[float]): IoU threshold. Defaults to 0.5. - ioa_thrs (float or List[float]): IoA threshold. Defaults to 0.5. - scale_ranges (List[tuple], optional): Scale ranges for evaluating - mAP. If not specified, all bounding boxes would be included in - evaluation. Defaults to None - use_group_of (bool): Whether consider group of groud truth bboxes - during evaluating. Defaults to True. - get_supercategory (bool): Whether to get parent class of the - current class. Default: True. - filter_labels (bool): Whether filter unannotated classes. - Default: True. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be 'cpu' or - 'gpu'. Defaults to 'cpu'. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, self.default_prefix - will be used instead. Defaults to None. - """ - default_prefix: Optional[str] = 'openimages' - - def __init__(self, - iou_thrs: Union[float, List[float]] = 0.5, - ioa_thrs: Union[float, List[float]] = 0.5, - scale_ranges: Optional[List[tuple]] = None, - use_group_of: bool = True, - get_supercategory: bool = True, - filter_labels: bool = True, - collect_device: str = 'cpu', - prefix: Optional[str] = None) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - self.iou_thrs = [iou_thrs] if isinstance(iou_thrs, float) else iou_thrs - self.ioa_thrs = [ioa_thrs] if (isinstance(ioa_thrs, float) - or ioa_thrs is None) else ioa_thrs - assert isinstance(self.iou_thrs, list) and isinstance( - self.ioa_thrs, list) - assert len(self.iou_thrs) == len(self.ioa_thrs) - - self.scale_ranges = scale_ranges - self.use_group_of = use_group_of - self.get_supercategory = get_supercategory - self.filter_labels = filter_labels - - def _get_supercategory_ann(self, instances: List[dict]) -> List[dict]: - """Get parent classes's annotation of the corresponding class. - - Args: - instances (List[dict]): A list of annotations of the instances. - - Returns: - List[dict]: Annotations extended with super-category. - """ - supercat_instances = [] - relation_matrix = self.dataset_meta['RELATION_MATRIX'] - for instance in instances: - labels = np.where(relation_matrix[instance['bbox_label']])[0] - for label in labels: - if label == instance['bbox_label']: - continue - new_instance = copy.deepcopy(instance) - new_instance['bbox_label'] = label - supercat_instances.append(new_instance) - return supercat_instances - - def _process_predictions(self, pred_bboxes: np.ndarray, - pred_scores: np.ndarray, pred_labels: np.ndarray, - gt_instances: list, - image_level_labels: np.ndarray) -> tuple: - """Process results of the corresponding class of the detection bboxes. - - Note: It will choose to do the following two processing according to - the parameters: - - 1. Whether to add parent classes of the corresponding class of the - detection bboxes. - - 2. Whether to ignore the classes that unannotated on that image. - - Args: - pred_bboxes (np.ndarray): bboxes predicted by the model - pred_scores (np.ndarray): scores predicted by the model - pred_labels (np.ndarray): labels predicted by the model - gt_instances (list): ground truth annotations - image_level_labels (np.ndarray): human-verified image level labels - - Returns: - tuple: Processed bboxes, scores, and labels. - """ - processed_bboxes = copy.deepcopy(pred_bboxes) - processed_scores = copy.deepcopy(pred_scores) - processed_labels = copy.deepcopy(pred_labels) - gt_labels = np.array([ins['bbox_label'] for ins in gt_instances], - dtype=np.int64) - if image_level_labels is not None: - allowed_classes = np.unique( - np.append(gt_labels, image_level_labels)) - else: - allowed_classes = np.unique(gt_labels) - relation_matrix = self.dataset_meta['RELATION_MATRIX'] - pred_classes = np.unique(pred_labels) - for pred_class in pred_classes: - classes = np.where(relation_matrix[pred_class])[0] - for cls in classes: - if (cls in allowed_classes and cls != pred_class - and self.get_supercategory): - # add super-supercategory preds - index = np.where(pred_labels == pred_class)[0] - processed_scores = np.concatenate( - [processed_scores, pred_scores[index]]) - processed_bboxes = np.concatenate( - [processed_bboxes, pred_bboxes[index]]) - extend_labels = np.full(index.shape, cls, dtype=np.int64) - processed_labels = np.concatenate( - [processed_labels, extend_labels]) - elif cls not in allowed_classes and self.filter_labels: - # remove unannotated preds - index = np.where(processed_labels != cls)[0] - processed_scores = processed_scores[index] - processed_bboxes = processed_bboxes[index] - processed_labels = processed_labels[index] - return processed_bboxes, processed_scores, processed_labels - - # TODO: data_batch is no longer needed, consider adjusting the - # parameter position - def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (dict): A batch of data from the dataloader. - data_samples (Sequence[dict]): A batch of data samples that - contain annotations and predictions. - """ - for data_sample in data_samples: - gt = copy.deepcopy(data_sample) - # add super-category instances - # TODO: Need to refactor to support LoadAnnotations - instances = gt['instances'] - if self.get_supercategory: - supercat_instances = self._get_supercategory_ann(instances) - instances.extend(supercat_instances) - gt_labels = [] - gt_bboxes = [] - is_group_ofs = [] - for ins in instances: - gt_labels.append(ins['bbox_label']) - gt_bboxes.append(ins['bbox']) - is_group_ofs.append(ins['is_group_of']) - ann = dict( - labels=np.array(gt_labels, dtype=np.int64), - bboxes=np.array(gt_bboxes, dtype=np.float32).reshape((-1, 4)), - gt_is_group_ofs=np.array(is_group_ofs, dtype=bool)) - - image_level_labels = gt.get('image_level_labels', None) - pred = data_sample['pred_instances'] - pred_bboxes = pred['bboxes'].cpu().numpy() - pred_scores = pred['scores'].cpu().numpy() - pred_labels = pred['labels'].cpu().numpy() - - pred_bboxes, pred_scores, pred_labels = self._process_predictions( - pred_bboxes, pred_scores, pred_labels, instances, - image_level_labels) - - dets = [] - for label in range(len(self.dataset_meta['classes'])): - index = np.where(pred_labels == label)[0] - pred_bbox_scores = np.hstack( - [pred_bboxes[index], pred_scores[index].reshape((-1, 1))]) - dets.append(pred_bbox_scores) - self.results.append((ann, dets)) - - def compute_metrics(self, results: list) -> dict: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - dict: The computed metrics. The keys are the names of the metrics, - and the values are corresponding results. - """ - logger = MMLogger.get_current_instance() - gts, preds = zip(*results) - eval_results = OrderedDict() - # get dataset type - dataset_type = self.dataset_meta.get('dataset_type') - if dataset_type not in ['oid_challenge', 'oid_v6']: - dataset_type = 'oid_v6' - print_log( - 'Cannot infer dataset type from the length of the' - ' classes. Set `oid_v6` as dataset type.', - logger='current') - mean_aps = [] - for i, (iou_thr, - ioa_thr) in enumerate(zip(self.iou_thrs, self.ioa_thrs)): - if self.use_group_of: - assert ioa_thr is not None, 'ioa_thr must have value when' \ - ' using group_of in evaluation.' - print_log(f'\n{"-" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}' - f'{"-" * 15}') - mean_ap, _ = eval_map( - preds, - gts, - scale_ranges=self.scale_ranges, - iou_thr=iou_thr, - ioa_thr=ioa_thr, - dataset=dataset_type, - logger=logger, - use_group_of=self.use_group_of) - - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - return eval_results diff --git a/spaces/KyanChen/RSPrompter/mmpl/datasets/ssdd_ins_dataset.py b/spaces/KyanChen/RSPrompter/mmpl/datasets/ssdd_ins_dataset.py deleted file mode 100644 index 7bab673e796aca77b34600b730b5a1ca46eee011..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/datasets/ssdd_ins_dataset.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import List -from mmpl.registry import DATASETS -from mmdet.datasets.coco import CocoDataset - - -@DATASETS.register_module() -class SSDDInsSegDataset(CocoDataset): - """Dataset for Cityscapes.""" - - METAINFO = { - 'classes': ['ship'], - 'palette': [(0, 0, 255)] - } - - def filter_data(self) -> List[dict]: - """Filter annotations according to filter_cfg. - - Returns: - List[dict]: Filtered results. - """ - # if self.test_mode: - # return self.data_list - - if self.filter_cfg is None: - return self.data_list - - filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False) - min_size = self.filter_cfg.get('min_size', 0) - - # obtain images that contain annotation - ids_with_ann = set(data_info['img_id'] for data_info in self.data_list) - # obtain images that contain annotations of the required categories - ids_in_cat = set() - for i, class_id in enumerate(self.cat_ids): - ids_in_cat |= set(self.cat_img_map[class_id]) - # merge the image id sets of the two conditions and use the merged set - # to filter out images if self.filter_empty_gt=True - ids_in_cat &= ids_with_ann - - valid_data_infos = [] - for i, data_info in enumerate(self.data_list): - img_id = data_info['img_id'] - width = data_info['width'] - height = data_info['height'] - all_is_crowd = all([ - instance['ignore_flag'] == 1 - for instance in data_info['instances'] - ]) - if filter_empty_gt and (img_id not in ids_in_cat or all_is_crowd): - continue - if min(width, height) >= min_size: - valid_data_infos.append(data_info) - - return valid_data_infos diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h b/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h deleted file mode 100644 index 51bb27e9ee828f967e8aa854c2d55574040c6d7e..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h +++ /dev/null @@ -1,38 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -/*! -* Copyright (c) Facebook, Inc. and its affiliates. -* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR -*/ - -#pragma once -#include - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - - diff --git a/spaces/LanguageBind/LanguageBind/open_clip/version.py b/spaces/LanguageBind/LanguageBind/open_clip/version.py deleted file mode 100644 index a910817da22d06aa0244c6d488b40d30da2bfb7e..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/open_clip/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '2.20.0' diff --git a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/transforms.py b/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/infer/infer-pm-index256.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/infer/infer-pm-index256.py deleted file mode 100644 index 1883634052acb7909b1bd31a858b4373bc7ce3de..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/infer/infer-pm-index256.py +++ /dev/null @@ -1,202 +0,0 @@ -""" - -对源特征进行检索 -""" -import os -import logging - -logger = logging.getLogger(__name__) - -import parselmouth -import torch - -os.environ["CUDA_VISIBLE_DEVICES"] = "0" -# import torchcrepe -from time import time as ttime - -# import pyworld -import librosa -import numpy as np -import soundfile as sf -import torch.nn.functional as F -from fairseq import checkpoint_utils - -# from models import SynthesizerTrn256#hifigan_nonsf -# from lib.infer.infer_pack.models import SynthesizerTrn256NSF as SynthesizerTrn256#hifigan_nsf -from lib.infer.infer_libs.infer_pack.models import ( - SynthesizerTrnMs256NSFsid as SynthesizerTrn256, -) # hifigan_nsf -from scipy.io import wavfile - -# from lib.infer.infer_pack.models import SynthesizerTrnMs256NSFsid_sim as SynthesizerTrn256#hifigan_nsf -# from models import SynthesizerTrn256NSFsim as SynthesizerTrn256#hifigan_nsf -# from models import SynthesizerTrn256NSFsimFlow as SynthesizerTrn256#hifigan_nsf - - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model_path = r"E:\codes\py39\vits_vc_gpu_train\assets\hubert\hubert_base.pt" # -logger.info("Load model(s) from {}".format(model_path)) -models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", -) -model = models[0] -model = model.to(device) -model = model.half() -model.eval() - -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],183,256,is_half=True)#hifigan#512#256 -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],109,256,is_half=True)#hifigan#512#256 -net_g = SynthesizerTrn256( - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 10, 2, 2], - 512, - [16, 16, 4, 4], - 183, - 256, - is_half=True, -) # hifigan#512#256#no_dropout -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,3,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],0)#ts3 -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2],512,[16,16,4],0)#hifigan-ps-sr -# -# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [5,5], 512, [15,15], 0)#ms -# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,10], 512, [16,16], 0)#idwt2 - -# weights=torch.load("infer/ft-mi_1k-noD.pt") -# weights=torch.load("infer/ft-mi-freeze-vocoder-flow-enc_q_1k.pt") -# weights=torch.load("infer/ft-mi-freeze-vocoder_true_1k.pt") -# weights=torch.load("infer/ft-mi-sim1k.pt") -weights = torch.load("infer/ft-mi-no_opt-no_dropout.pt") -logger.debug(net_g.load_state_dict(weights, strict=True)) - -net_g.eval().to(device) -net_g.half() - - -def get_f0(x, p_len, f0_up_key=0): - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0 *= pow(2, f0_up_key / 12) - f0bak = f0.copy() - - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - # f0_mel[f0_mel > 188] = 188 - f0_coarse = np.rint(f0_mel).astype(np.int32) - return f0_coarse, f0bak - - -import faiss - -index = faiss.read_index("infer/added_IVF512_Flat_mi_baseline_src_feat.index") -big_npy = np.load("infer/big_src_feature_mi.npy") -ta0 = ta1 = ta2 = 0 -for idx, name in enumerate( - [ - "冬之花clip1.wav", - ] -): ## - wav_path = "todo-songs/%s" % name # - f0_up_key = -2 # - audio, sampling_rate = sf.read(wav_path) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - - feats = torch.from_numpy(audio).float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.half().to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9, # layer 9 - } - if torch.cuda.is_available(): - torch.cuda.synchronize() - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - ####索引优化 - npy = feats[0].cpu().numpy().astype("float32") - D, I = index.search(npy, 1) - feats = ( - torch.from_numpy(big_npy[I.squeeze()].astype("float16")).unsqueeze(0).to(device) - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if torch.cuda.is_available(): - torch.cuda.synchronize() - t1 = ttime() - # p_len = min(feats.shape[1],10000,pitch.shape[0])#太大了爆显存 - p_len = min(feats.shape[1], 10000) # - pitch, pitchf = get_f0(audio, p_len, f0_up_key) - p_len = min(feats.shape[1], 10000, pitch.shape[0]) # 太大了爆显存 - if torch.cuda.is_available(): - torch.cuda.synchronize() - t2 = ttime() - feats = feats[:, :p_len, :] - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - p_len = torch.LongTensor([p_len]).to(device) - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - sid = torch.LongTensor([0]).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - with torch.no_grad(): - audio = ( - net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - .numpy() - ) # nsf - if torch.cuda.is_available(): - torch.cuda.synchronize() - t3 = ttime() - ta0 += t1 - t0 - ta1 += t2 - t1 - ta2 += t3 - t2 - # wavfile.write("ft-mi_1k-index256-noD-%s.wav"%name, 40000, audio)## - # wavfile.write("ft-mi-freeze-vocoder-flow-enc_q_1k-%s.wav"%name, 40000, audio)## - # wavfile.write("ft-mi-sim1k-%s.wav"%name, 40000, audio)## - wavfile.write("ft-mi-no_opt-no_dropout-%s.wav" % name, 40000, audio) ## - - -logger.debug("%.2fs %.2fs %.2fs", ta0, ta1, ta2) # diff --git a/spaces/Lbin123/Lbingo/src/app/layout.tsx b/spaces/Lbin123/Lbingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
        - {/* @ts-ignore */} -
        -
        {children}
        -
        - -
        - - - ) -} diff --git a/spaces/Lianjd/stock_dashboard/backtrader/flt.py b/spaces/Lianjd/stock_dashboard/backtrader/flt.py deleted file mode 100644 index 43e7f78fe58b7d65ef7493dabd6b79b704765653..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/flt.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - - -from .metabase import MetaParams -from .utils.py3 import with_metaclass - - -__all__ = ['Filter'] - - -class MetaFilter(MetaParams): - pass - - -class Filter(with_metaclass(MetaParams, object)): - - _firsttime = True - - def __init__(self, data): - pass - - def __call__(self, data): - if self._firsttime: - self.nextstart(data) - self._firsttime = False - - self.next(data) - - def nextstart(self, data): - pass - - def next(self, data): - pass diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/robust_scanner/README.md b/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/robust_scanner/README.md deleted file mode 100644 index 165ef248c56640f55772ac5c1d2aae29e69d42e8..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/robust_scanner/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# RobustScanner - -> [RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition](https://arxiv.org/abs/2007.07542) - - - -## Abstract - -The attention-based encoder-decoder framework has recently achieved impressive results for scene text recognition, and many variants have emerged with improvements in recognition quality. However, it performs poorly on contextless texts (e.g., random character sequences) which is unacceptable in most of real application scenarios. In this paper, we first deeply investigate the decoding process of the decoder. We empirically find that a representative character-level sequence decoder utilizes not only context information but also positional information. Contextual information, which the existing approaches heavily rely on, causes the problem of attention drift. To suppress such side-effect, we propose a novel position enhancement branch, and dynamically fuse its outputs with those of the decoder attention module for scene text recognition. Specifically, it contains a position aware module to enable the encoder to output feature vectors encoding their own spatial positions, and an attention module to estimate glimpses using the positional clue (i.e., the current decoding time step) only. The dynamic fusion is conducted for more robust feature via an element-wise gate mechanism. Theoretically, our proposed method, dubbed \\emph{RobustScanner}, decodes individual characters with dynamic ratio between context and positional clues, and utilizes more positional ones when the decoding sequences with scarce context, and thus is robust and practical. Empirically, it has achieved new state-of-the-art results on popular regular and irregular text recognition benchmarks while without much performance drop on contextless benchmarks, validating its robustness in both contextual and contextless application scenarios. - -
        - -
        - -## Dataset - -### Train Dataset - -| trainset | instance_num | repeat_num | source | -| :--------: | :----------: | :--------: | :------------------------: | -| icdar_2011 | 3567 | 20 | real | -| icdar_2013 | 848 | 20 | real | -| icdar2015 | 4468 | 20 | real | -| coco_text | 42142 | 20 | real | -| IIIT5K | 2000 | 20 | real | -| SynthText | 2400000 | 1 | synth | -| SynthAdd | 1216889 | 1 | synth, 1.6m in [\[1\]](#1) | -| Syn90k | 2400000 | 1 | synth | - -### Test Dataset - -| testset | instance_num | type | -| :-----: | :----------: | :---------------------------: | -| IIIT5K | 3000 | regular | -| SVT | 647 | regular | -| IC13 | 1015 | regular | -| IC15 | 2077 | irregular | -| SVTP | 645 | irregular, 639 in [\[1\]](#1) | -| CT80 | 288 | irregular | - -## Results and Models - -| Methods | GPUs | | Regular Text | | | | Irregular Text | | download | -| :------------------------------------------------------------------------: | :--: | :----: | :----------: | :--: | :-: | :--: | :------------: | :--: | :-------------------------------------------------------------------------: | -| | | IIIT5K | SVT | IC13 | | IC15 | SVTP | CT80 | | -| [RobustScanner](configs/textrecog/robust_scanner/robustscanner_r31_academic.py) | 16 | 95.1 | 89.2 | 93.1 | | 77.8 | 80.3 | 90.3 | [model](https://download.openmmlab.com/mmocr/textrecog/robustscanner/robustscanner_r31_academic-5f05874f.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/robustscanner/20210401_170932.log.json) | - -## References - -\[1\] Li, Hui and Wang, Peng and Shen, Chunhua and Zhang, Guyu. Show, attend and read: A simple and strong baseline for irregular text recognition. In AAAI 2019. - -## Citation - -```bibtex -@inproceedings{yue2020robustscanner, - title={RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition}, - author={Yue, Xiaoyu and Kuang, Zhanghui and Lin, Chenhao and Sun, Hongbin and Zhang, Wayne}, - booktitle={European Conference on Computer Vision}, - year={2020} -} -``` diff --git a/spaces/LuxOAI/zenFace-Recognition-SDK/README.md b/spaces/LuxOAI/zenFace-Recognition-SDK/README.md deleted file mode 100644 index 6e64d1f4654da459e7669c3c01114209995c076f..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/zenFace-Recognition-SDK/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Face Recognition SDK -emoji: ⚡ -colorFrom: yellow -colorTo: pink -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MCkernick/Image_Restoration_Colorization/README.md b/spaces/MCkernick/Image_Restoration_Colorization/README.md deleted file mode 100644 index e464c759c7b0f5be457d654514b4a880ebdb0927..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Image Restoration and Colorization -emoji: 🌟 -colorFrom: blue -colorTo: green -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: manhkhanhUIT/Image_Restoration_Colorization ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. \ No newline at end of file diff --git a/spaces/MFawad/Emergency_vehicle_classifier/app.py b/spaces/MFawad/Emergency_vehicle_classifier/app.py deleted file mode 100644 index 586e61ed49833887d5f4ab85ee39a471a7a615f8..0000000000000000000000000000000000000000 --- a/spaces/MFawad/Emergency_vehicle_classifier/app.py +++ /dev/null @@ -1,12 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -learn=load_learner('export.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} -title='Emergency Vehicle Classifier' -interpretation='default' -gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(512, 512)), outputs=gr.outputs.Label(num_top_classes=3), title=title, interpretation=interpretation).launch(share=True) \ No newline at end of file diff --git a/spaces/ML701G7/taim-gan/src/data/datasets.py b/spaces/ML701G7/taim-gan/src/data/datasets.py deleted file mode 100644 index e3d0879cae8681beb289c3a6912fb9101b949002..0000000000000000000000000000000000000000 --- a/spaces/ML701G7/taim-gan/src/data/datasets.py +++ /dev/null @@ -1,387 +0,0 @@ -"""Pytorch Dataset classes for the datasets used in the project.""" - -import os -import pickle -from collections import defaultdict -from typing import Any - -import nltk -import numpy as np -import pandas as pd -import torch -import torchvision.transforms.functional as F -from nltk.tokenize import RegexpTokenizer -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - - -class TextImageDataset(Dataset): # type: ignore - """Custom PyTorch Dataset class to load Image and Text data.""" - - # pylint: disable=too-many-instance-attributes - # pylint: disable=too-many-locals - # pylint: disable=too-many-function-args - - def __init__( - self, data_path: str, split: str, num_captions: int, transform: Any = None - ): - """ - :param data_path: Path to the data directory. [i.e. can be './birds/', or './coco/] - :param split: 'train' or 'test' split - :param num_captions: number of captions present per image. - [For birds, this is 10, for coco, this is 5] - :param transform: PyTorch transform to apply to the images. - """ - self.transform = transform - self.bound_box_map = None - self.file_names = self.load_filenames(data_path, split) - self.data_path = data_path - self.num_captions_per_image = num_captions - ( - self.captions, - self.ix_to_word, - self.word_to_ix, - self.vocab_len, - ) = self.get_capt_and_vocab(data_path, split) - self.normalize = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] - ) - self.class_ids = self.get_class_id(data_path, split, len(self.file_names)) - if self.data_path.endswith("birds/"): - self.bound_box_map = self.get_bound_box(data_path) - - elif self.data_path.endswith("coco/"): - pass - - else: - raise ValueError( - "Invalid data path. Please ensure the data [CUB/COCO] is stored in correct folders." - ) - - def __len__(self) -> int: - """Return the length of the dataset.""" - return len(self.file_names) - - def __getitem__(self, idx: int) -> Any: - """ - Return the item at index idx. - :param idx: index of the item to return - :return img_tensor: image tensor - :return correct_caption: correct caption for the image [list of word indices] - :return curr_class_id: class id of the image - :return word_labels: POS_tagged word labels [1 for noun and adjective, 0 else] - - """ - file_name = self.file_names[idx] - curr_class_id = self.class_ids[idx] - - if self.bound_box_map is not None: - bbox = self.bound_box_map[file_name] - images_dir = os.path.join(self.data_path, "CUB_200_2011/images") - else: - bbox = None - images_dir = os.path.join(self.data_path, "images") - - img_path = os.path.join(images_dir, file_name + ".jpg") - img_tensor = self.get_image(img_path, bbox, self.transform) - - rand_sent_idx = np.random.randint(0, self.num_captions_per_image) - rand_sent_idx = idx * self.num_captions_per_image + rand_sent_idx - - correct_caption = torch.tensor(self.captions[rand_sent_idx], dtype=torch.int64) - num_words = len(correct_caption) - - capt_token_list = [] - for i in range(num_words): - capt_token_list.append(self.ix_to_word[correct_caption[i].item()]) - - pos_tag_list = nltk.tag.pos_tag(capt_token_list) - word_labels = [] - - for pos_tag in pos_tag_list: - if ( - "NN" in pos_tag[1] or "JJ" in pos_tag[1] - ): # check for Nouns and Adjective only - word_labels.append(1) - else: - word_labels.append(0) - - word_labels = torch.tensor(word_labels).float() # type: ignore - - curr_class_id = torch.tensor(curr_class_id, dtype=torch.int64).unsqueeze(0) - - return ( - img_tensor, - correct_caption, - curr_class_id, - word_labels, - ) - - def get_capt_and_vocab(self, data_dir: str, split: str) -> Any: - """ - Helper function to get the captions, vocab dict for each image. - :param data_dir: path to the data directory [i.e. './birds/' or './coco/'] - :param split: 'train' or 'test' split - :return captions: list of all captions for each image - :return ix_to_word: dictionary mapping index to word - :return word_to_ix: dictionary mapping word to index - :return num_words: number of unique words in the vocabulary - """ - captions_ckpt_path = os.path.join(data_dir, "stubs/captions.pickle") - if os.path.exists( - captions_ckpt_path - ): # check if previously processed captions exist - with open(captions_ckpt_path, "rb") as ckpt_file: - captions = pickle.load(ckpt_file) - train_captions, test_captions = captions[0], captions[1] - ix_to_word, word_to_ix = captions[2], captions[3] - num_words = len(ix_to_word) - del captions - if split == "train": - return train_captions, ix_to_word, word_to_ix, num_words - return test_captions, ix_to_word, word_to_ix, num_words - - else: # if not, process the captions and save them - train_files = self.load_filenames(data_dir, "train") - test_files = self.load_filenames(data_dir, "test") - - train_captions_tokenized = self.get_tokenized_captions( - data_dir, train_files - ) - test_captions_tokenized = self.get_tokenized_captions( - data_dir, test_files - ) # we need both train and test captions to build the vocab - - ( - train_captions, - test_captions, - ix_to_word, - word_to_ix, - num_words, - ) = self.build_vocab( # type: ignore - train_captions_tokenized, test_captions_tokenized, split - ) - vocab_list = [train_captions, test_captions, ix_to_word, word_to_ix] - with open(captions_ckpt_path, "wb") as ckpt_file: - pickle.dump(vocab_list, ckpt_file) - - if split == "train": - return train_captions, ix_to_word, word_to_ix, num_words - if split == "test": - return test_captions, ix_to_word, word_to_ix, num_words - raise ValueError("Invalid split. Please use 'train' or 'test'") - - def build_vocab( - self, tokenized_captions_train: list, tokenized_captions_test: list # type: ignore - ) -> Any: - """ - Helper function which builds the vocab dicts. - :param tokenized_captions_train: list containing all the - train tokenized captions in the dataset. This is list of lists. - :param tokenized_captions_test: list containing all the - test tokenized captions in the dataset. This is list of lists. - :return train_captions_int: list of all captions in training, - where each word is replaced by its index in the vocab - :return test_captions_int: list of all captions in test, - where each word is replaced by its index in the vocab - :return ix_to_word: dictionary mapping index to word - :return word_to_ix: dictionary mapping word to index - :return num_words: number of unique words in the vocabulary - """ - vocab = defaultdict(int) # type: ignore - total_captions = tokenized_captions_train + tokenized_captions_test - for caption in total_captions: - for word in caption: - vocab[word] += 1 - - # sort vocab dict by frequency in descending order - vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True) # type: ignore - - ix_to_word = {} - word_to_ix = {} - ix_to_word[0] = "" - word_to_ix[""] = 0 - - word_idx = 1 - for word, _ in vocab: - word_to_ix[word] = word_idx - ix_to_word[word_idx] = word - word_idx += 1 - - train_captions_int = [] # we want to convert words to indices in vocab. - for caption in tokenized_captions_train: - curr_caption_int = [] - for word in caption: - curr_caption_int.append(word_to_ix[word]) - - train_captions_int.append(curr_caption_int) - - test_captions_int = [] - for caption in tokenized_captions_test: - curr_caption_int = [] - for word in caption: - curr_caption_int.append(word_to_ix[word]) - - test_captions_int.append(curr_caption_int) - - return ( - train_captions_int, - test_captions_int, - ix_to_word, - word_to_ix, - len(ix_to_word), - ) - - def get_tokenized_captions(self, data_dir: str, filenames: list) -> Any: # type: ignore - """ - Helper function to tokenize and return captions for each image in filenames. - :param data_dir: path to the data directory [i.e. './birds/' or './coco/'] - :param filenames: list of all filenames corresponding to the split - :return tokenized_captions: list of all tokenized captions for all files in filenames. - [this returns a list, where each element is again a list of tokens/words] - """ - - all_captions = [] - for filename in filenames: - caption_path = os.path.join(data_dir, "text", filename + ".txt") - with open(caption_path, "r", encoding="utf8") as txt_file: - captions = txt_file.readlines() - count = 0 - for caption in captions: - if len(caption) == 0: - continue - - caption = caption.replace("\ufffd\ufffd", " ") - tokenizer = RegexpTokenizer(r"\w+") - tokens = tokenizer.tokenize( - caption.lower() - ) # splits current caption/line to list of words/tokens - if len(tokens) == 0: - continue - - tokens = [ - t.encode("ascii", "ignore").decode("ascii") for t in tokens - ] - tokens = [t for t in tokens if len(t) > 0] - - all_captions.append(tokens) - count += 1 - if count == self.num_captions_per_image: - break - if count < self.num_captions_per_image: - raise ValueError( - f"Number of captions for {filename} is only {count},\ - which is less than {self.num_captions_per_image}." - ) - - return all_captions - - def get_image(self, img_path: str, bbox: list, transform: Any) -> Any: # type: ignore - """ - Helper function to load and transform an image. - :param img_path: path to the image - :param bbox: bounding box coordinates [x, y, width, height] - :param transform: PyTorch transform to apply to the image - :return img_tensor: transformed image tensor - """ - img = Image.open(img_path).convert("RGB") - width, height = img.size - - if bbox is not None: - r_val = int(np.maximum(bbox[2], bbox[3]) * 0.75) - - center_x = int((2 * bbox[0] + bbox[2]) / 2) - center_y = int((2 * bbox[1] + bbox[3]) / 2) - y1_coord = np.maximum(0, center_y - r_val) - y2_coord = np.minimum(height, center_y + r_val) - x1_coord = np.maximum(0, center_x - r_val) - x2_coord = np.minimum(width, center_x + r_val) - - img = img.crop( - [x1_coord, y1_coord, x2_coord, y2_coord] - ) # This preprocessing steps seems to follow from - # Stackgan: Text to photo-realistic image synthesis - - if transform is not None: - img_tensor = transform(img) # this scales to 304x304, i.e. 256 x (76/64). - x_val = np.random.randint(0, 48) # 304 - 256 = 48 - y_val = np.random.randint(0, 48) - flip = np.random.rand() > 0.5 - - # crop - img_tensor = img_tensor.crop( - [x_val, y_val, x_val + 256, y_val + 256] - ) # this crops to 256x256 - if flip: - img_tensor = F.hflip(img_tensor) - - img_tensor = self.normalize(img_tensor) - - return img_tensor - - def load_filenames(self, data_dir: str, split: str) -> Any: - """ - Helper function to get list of all image filenames. - :param data_dir: path to the data directory [i.e. './birds/' or './coco/'] - :param split: 'train' or 'test' split - :return filenames: list of all image filenames - """ - filepath = f"{data_dir}{split}/filenames.pickle" - if os.path.isfile(filepath): - with open(filepath, "rb") as pick_file: - filenames = pickle.load(pick_file) - else: - raise ValueError( - "Invalid split. Please use 'train' or 'test',\ - or make sure the filenames.pickle file exists." - ) - return filenames - - def get_class_id(self, data_dir: str, split: str, total_elems: int) -> Any: - """ - Helper function to get list of all image class ids. - :param data_dir: path to the data directory [i.e. './birds/' or './coco/'] - :param split: 'train' or 'test' split - :param total_elems: total number of elements in the dataset - :return class_ids: list of all image class ids - """ - filepath = f"{data_dir}{split}/class_info.pickle" - if os.path.isfile(filepath): - with open(filepath, "rb") as class_file: - class_ids = pickle.load(class_file, encoding="latin1") - else: - class_ids = np.arange(total_elems) - return class_ids - - def get_bound_box(self, data_path: str) -> Any: - """ - Helper function to get the bounding box for birds dataset. - :param data_path: path to birds data directory [i.e. './data/birds/'] - :return imageToBox: dictionary mapping image name to bounding box coordinates - """ - bbox_path = os.path.join(data_path, "CUB_200_2011/bounding_boxes.txt") - df_bounding_boxes = pd.read_csv( - bbox_path, delim_whitespace=True, header=None - ).astype(int) - - filepath = os.path.join(data_path, "CUB_200_2011/images.txt") - df_filenames = pd.read_csv(filepath, delim_whitespace=True, header=None) - filenames = df_filenames[ - 1 - ].tolist() # df_filenames[0] just contains the index or ID. - - img_to_box = { # type: ignore - img_file[:-4]: [] for img_file in filenames - } # remove the .jpg extension from the names - num_imgs = len(filenames) - - for i in range(0, num_imgs): - bbox = df_bounding_boxes.iloc[i][1:].tolist() - key = filenames[i][:-4] - img_to_box[key] = bbox - - return img_to_box diff --git a/spaces/Mahiruoshi/vits-chatbot/text/symbols.py b/spaces/Mahiruoshi/vits-chatbot/text/symbols.py deleted file mode 100644 index 3705de1c96d52d5643eab9bc80671fe9cb7e4363..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/vits-chatbot/text/symbols.py +++ /dev/null @@ -1,67 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' -# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚αᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/Marshalls/testmtd/models/flowplusplus/inv_conv.py b/spaces/Marshalls/testmtd/models/flowplusplus/inv_conv.py deleted file mode 100644 index ae77515dbcbb27ecd8a6d619dacf67c620a28ba0..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/models/flowplusplus/inv_conv.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -import torch -import numpy as np -import scipy.linalg - -class InvertibleConv1x1(nn.Module): - def __init__(self, num_channels, LU_decomposed=True): - super().__init__() - w_shape = [num_channels, num_channels] - w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32) - if not LU_decomposed: - # Sample a random orthogonal matrix: - self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init))) - else: - # import pdb;pdb.set_trace() - np_p, np_l, np_u = scipy.linalg.lu(w_init) - np_s = np.diag(np_u) - np_sign_s = np.sign(np_s) - np_log_s = np.log(np.abs(np_s)) - np_u = np.triu(np_u, k=1) - l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1) - eye = np.eye(*w_shape, dtype=np.float32) - self.register_buffer('p', torch.Tensor(np_p.astype(np.float32))) - self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32))) - self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32))) - self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32))) - self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32))) - self.l_mask = torch.Tensor(l_mask) - self.eye = torch.Tensor(eye) - self.w_shape = w_shape - self.LU = LU_decomposed - self.first_pass = True - self.saved_weight = None - self.saved_dsldj = None - - def get_weight(self, input, reverse): - w_shape = self.w_shape - if not self.LU: - dlogdet = torch.slogdet(self.weight)[1] * input.size(2) * input.size(3) - if not reverse: - weight = self.weight.view(w_shape[0], w_shape[1], 1, 1) - else: - weight = torch.inverse(self.weight.double()).float()\ - .view(w_shape[0], w_shape[1], 1, 1) - return weight, dlogdet - else: - self.p = self.p.to(input.device) - self.sign_s = self.sign_s.to(input.device) - self.l_mask = self.l_mask.to(input.device) - self.eye = self.eye.to(input.device) - l = self.l * self.l_mask + self.eye - u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s)) - dlogdet = self.log_s.sum() * input.size(2) * input.size(3) - if not reverse: - w = torch.matmul(self.p, torch.matmul(l, u)) - else: - l = torch.inverse(l.double()).float() - u = torch.inverse(u.double()).float() - w = torch.matmul(u, torch.matmul(l, self.p.inverse())) - return w.view(w_shape[0], w_shape[1], 1, 1), dlogdet - - def forward(self, x, cond, sldj=None, reverse=False): - """ - log-det = log|abs(|W|)| * pixels - """ - x = torch.cat(x, dim=1) - if not reverse: - weight, dsldj = self.get_weight(x, reverse) - else: - if self.first_pass: - weight, dsldj = self.get_weight(x, reverse) - self.saved_weight = weight - if sldj is not None: - self.saved_dsldj = dsldj - self.first_pass = False - else: - weight = self.saved_weight - if sldj is not None: - dsldj = self.saved_dsldj - - if not reverse: - x = F.conv2d(x, weight) - if sldj is not None: - sldj = sldj + dsldj - else: - x = F.conv2d(x, weight) - if sldj is not None: - sldj = sldj - dsldj - x = x.chunk(2, dim=1) - return x, sldj - - -class InvConv(nn.Module): - """Invertible 1x1 Convolution for 2D inputs. Originally described in Glow - (https://arxiv.org/abs/1807.03039). Does not support LU-decomposed version. - - Args: - num_channels (int): Number of channels in the input and output. - random_init (bool): Initialize with a random orthogonal matrix. - Otherwise initialize with noisy identity. - """ - def __init__(self, num_channels, random_init=False): - super(InvConv, self).__init__() - self.num_channels = num_channels - - if random_init: - # Initialize with a random orthogonal matrix - w_init = np.random.randn(self.num_channels, self.num_channels) - w_init = np.linalg.qr(w_init)[0] - else: - # Initialize as identity permutation with some noise - w_init = np.eye(self.num_channels, self.num_channels) \ - + 1e-3 * np.random.randn(self.num_channels, self.num_channels) - self.weight = nn.Parameter(torch.from_numpy(w_init.astype(np.float32))) - - def forward(self, x, cond, sldj, reverse=False): - x = torch.cat(x, dim=1) - - ldj = torch.slogdet(self.weight)[1] * x.size(2) * x.size(3) - - if reverse: - weight = torch.inverse(self.weight.double()).float() - sldj = sldj - ldj - else: - weight = self.weight - sldj = sldj + ldj - - weight = weight.view(self.num_channels, self.num_channels, 1, 1) - x = F.conv2d(x, weight) - x = x.chunk(2, dim=1) - - return x, sldj diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/utils_image.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/utils_image.py deleted file mode 100644 index 0175f155ad900ae33c3c46ed87f49b352e3faf98..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/utils_image.py +++ /dev/null @@ -1,916 +0,0 @@ -import os -import math -import random -import numpy as np -import torch -import cv2 -from torchvision.utils import make_grid -from datetime import datetime -#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py - - -os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" - - -''' -# -------------------------------------------- -# Kai Zhang (github: https://github.com/cszn) -# 03/Mar/2019 -# -------------------------------------------- -# https://github.com/twhui/SRGAN-pyTorch -# https://github.com/xinntao/BasicSR -# -------------------------------------------- -''' - - -IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def get_timestamp(): - return datetime.now().strftime('%y%m%d-%H%M%S') - - -def imshow(x, title=None, cbar=False, figsize=None): - plt.figure(figsize=figsize) - plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') - if title: - plt.title(title) - if cbar: - plt.colorbar() - plt.show() - - -def surf(Z, cmap='rainbow', figsize=None): - plt.figure(figsize=figsize) - ax3 = plt.axes(projection='3d') - - w, h = Z.shape[:2] - xx = np.arange(0,w,1) - yy = np.arange(0,h,1) - X, Y = np.meshgrid(xx, yy) - ax3.plot_surface(X,Y,Z,cmap=cmap) - #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) - plt.show() - - -''' -# -------------------------------------------- -# get image pathes -# -------------------------------------------- -''' - - -def get_image_paths(dataroot): - paths = None # return None if dataroot is None - if dataroot is not None: - paths = sorted(_get_paths_from_images(dataroot)) - return paths - - -def _get_paths_from_images(path): - assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) - images = [] - for dirpath, _, fnames in sorted(os.walk(path)): - for fname in sorted(fnames): - if is_image_file(fname): - img_path = os.path.join(dirpath, fname) - images.append(img_path) - assert images, '{:s} has no valid image file'.format(path) - return images - - -''' -# -------------------------------------------- -# split large images into small images -# -------------------------------------------- -''' - - -def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): - w, h = img.shape[:2] - patches = [] - if w > p_max and h > p_max: - w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) - h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) - w1.append(w-p_size) - h1.append(h-p_size) -# print(w1) -# print(h1) - for i in w1: - for j in h1: - patches.append(img[i:i+p_size, j:j+p_size,:]) - else: - patches.append(img) - - return patches - - -def imssave(imgs, img_path): - """ - imgs: list, N images of size WxHxC - """ - img_name, ext = os.path.splitext(os.path.basename(img_path)) - - for i, img in enumerate(imgs): - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') - cv2.imwrite(new_path, img) - - -def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): - """ - split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), - and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) - will be splitted. - Args: - original_dataroot: - taget_dataroot: - p_size: size of small images - p_overlap: patch size in training is a good choice - p_max: images with smaller size than (p_max)x(p_max) keep unchanged. - """ - paths = get_image_paths(original_dataroot) - for img_path in paths: - # img_name, ext = os.path.splitext(os.path.basename(img_path)) - img = imread_uint(img_path, n_channels=n_channels) - patches = patches_from_image(img, p_size, p_overlap, p_max) - imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) - #if original_dataroot == taget_dataroot: - #del img_path - -''' -# -------------------------------------------- -# makedir -# -------------------------------------------- -''' - - -def mkdir(path): - if not os.path.exists(path): - os.makedirs(path) - - -def mkdirs(paths): - if isinstance(paths, str): - mkdir(paths) - else: - for path in paths: - mkdir(path) - - -def mkdir_and_rename(path): - if os.path.exists(path): - new_name = path + '_archived_' + get_timestamp() - print('Path already exists. Rename it to [{:s}]'.format(new_name)) - os.rename(path, new_name) - os.makedirs(path) - - -''' -# -------------------------------------------- -# read image from path -# opencv is fast, but read BGR numpy image -# -------------------------------------------- -''' - - -# -------------------------------------------- -# get uint8 image of size HxWxn_channles (RGB) -# -------------------------------------------- -def imread_uint(path, n_channels=3): - # input: path - # output: HxWx3(RGB or GGG), or HxWx1 (G) - if n_channels == 1: - img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE - img = np.expand_dims(img, axis=2) # HxWx1 - elif n_channels == 3: - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG - else: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB - return img - - -# -------------------------------------------- -# matlab's imwrite -# -------------------------------------------- -def imsave(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - -def imwrite(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - - - -# -------------------------------------------- -# get single image of size HxWxn_channles (BGR) -# -------------------------------------------- -def read_img(path): - # read image by cv2 - # return: Numpy float32, HWC, BGR, [0,1] - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE - img = img.astype(np.float32) / 255. - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - # some images have 4 channels - if img.shape[2] > 3: - img = img[:, :, :3] - return img - - -''' -# -------------------------------------------- -# image format conversion -# -------------------------------------------- -# numpy(single) <---> numpy(unit) -# numpy(single) <---> tensor -# numpy(unit) <---> tensor -# -------------------------------------------- -''' - - -# -------------------------------------------- -# numpy(single) [0, 1] <---> numpy(unit) -# -------------------------------------------- - - -def uint2single(img): - - return np.float32(img/255.) - - -def single2uint(img): - - return np.uint8((img.clip(0, 1)*255.).round()) - - -def uint162single(img): - - return np.float32(img/65535.) - - -def single2uint16(img): - - return np.uint16((img.clip(0, 1)*65535.).round()) - - -# -------------------------------------------- -# numpy(unit) (HxWxC or HxW) <---> tensor -# -------------------------------------------- - - -# convert uint to 4-dimensional torch tensor -def uint2tensor4(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) - - -# convert uint to 3-dimensional torch tensor -def uint2tensor3(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) - - -# convert 2/3/4-dimensional torch tensor to uint -def tensor2uint(img): - img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - return np.uint8((img*255.0).round()) - - -# -------------------------------------------- -# numpy(single) (HxWxC) <---> tensor -# -------------------------------------------- - - -# convert single (HxWxC) to 3-dimensional torch tensor -def single2tensor3(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() - - -# convert single (HxWxC) to 4-dimensional torch tensor -def single2tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) - - -# convert torch tensor to single -def tensor2single(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - - return img - -# convert torch tensor to single -def tensor2single3(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - elif img.ndim == 2: - img = np.expand_dims(img, axis=2) - return img - - -def single2tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) - - -def single32tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) - - -def single42tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() - - -# from skimage.io import imread, imsave -def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): - ''' - Converts a torch Tensor into an image Numpy array of BGR channel order - Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order - Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) - ''' - tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp - tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] - n_dim = tensor.dim() - if n_dim == 4: - n_img = len(tensor) - img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 3: - img_np = tensor.numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 2: - img_np = tensor.numpy() - else: - raise TypeError( - 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) - if out_type == np.uint8: - img_np = (img_np * 255.0).round() - # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. - return img_np.astype(out_type) - - -''' -# -------------------------------------------- -# Augmentation, flipe and/or rotate -# -------------------------------------------- -# The following two are enough. -# (1) augmet_img: numpy image of WxHxC or WxH -# (2) augment_img_tensor4: tensor image 1xCxWxH -# -------------------------------------------- -''' - - -def augment_img(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return np.flipud(np.rot90(img)) - elif mode == 2: - return np.flipud(img) - elif mode == 3: - return np.rot90(img, k=3) - elif mode == 4: - return np.flipud(np.rot90(img, k=2)) - elif mode == 5: - return np.rot90(img) - elif mode == 6: - return np.rot90(img, k=2) - elif mode == 7: - return np.flipud(np.rot90(img, k=3)) - - -def augment_img_tensor4(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return img.rot90(1, [2, 3]).flip([2]) - elif mode == 2: - return img.flip([2]) - elif mode == 3: - return img.rot90(3, [2, 3]) - elif mode == 4: - return img.rot90(2, [2, 3]).flip([2]) - elif mode == 5: - return img.rot90(1, [2, 3]) - elif mode == 6: - return img.rot90(2, [2, 3]) - elif mode == 7: - return img.rot90(3, [2, 3]).flip([2]) - - -def augment_img_tensor(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - img_size = img.size() - img_np = img.data.cpu().numpy() - if len(img_size) == 3: - img_np = np.transpose(img_np, (1, 2, 0)) - elif len(img_size) == 4: - img_np = np.transpose(img_np, (2, 3, 1, 0)) - img_np = augment_img(img_np, mode=mode) - img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) - if len(img_size) == 3: - img_tensor = img_tensor.permute(2, 0, 1) - elif len(img_size) == 4: - img_tensor = img_tensor.permute(3, 2, 0, 1) - - return img_tensor.type_as(img) - - -def augment_img_np3(img, mode=0): - if mode == 0: - return img - elif mode == 1: - return img.transpose(1, 0, 2) - elif mode == 2: - return img[::-1, :, :] - elif mode == 3: - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 4: - return img[:, ::-1, :] - elif mode == 5: - img = img[:, ::-1, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 6: - img = img[:, ::-1, :] - img = img[::-1, :, :] - return img - elif mode == 7: - img = img[:, ::-1, :] - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - - -def augment_imgs(img_list, hflip=True, rot=True): - # horizontal flip OR rotate - hflip = hflip and random.random() < 0.5 - vflip = rot and random.random() < 0.5 - rot90 = rot and random.random() < 0.5 - - def _augment(img): - if hflip: - img = img[:, ::-1, :] - if vflip: - img = img[::-1, :, :] - if rot90: - img = img.transpose(1, 0, 2) - return img - - return [_augment(img) for img in img_list] - - -''' -# -------------------------------------------- -# modcrop and shave -# -------------------------------------------- -''' - - -def modcrop(img_in, scale): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - if img.ndim == 2: - H, W = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r] - elif img.ndim == 3: - H, W, C = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r, :] - else: - raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) - return img - - -def shave(img_in, border=0): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - h, w = img.shape[:2] - img = img[border:h-border, border:w-border] - return img - - -''' -# -------------------------------------------- -# image processing process on numpy image -# channel_convert(in_c, tar_type, img_list): -# rgb2ycbcr(img, only_y=True): -# bgr2ycbcr(img, only_y=True): -# ycbcr2rgb(img): -# -------------------------------------------- -''' - - -def rgb2ycbcr(img, only_y=True): - '''same as matlab rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def ycbcr2rgb(img): - '''same as matlab ycbcr2rgb - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def bgr2ycbcr(img, only_y=True): - '''bgr version of rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def channel_convert(in_c, tar_type, img_list): - # conversion among BGR, gray and y - if in_c == 3 and tar_type == 'gray': # BGR to gray - gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] - return [np.expand_dims(img, axis=2) for img in gray_list] - elif in_c == 3 and tar_type == 'y': # BGR to y - y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] - return [np.expand_dims(img, axis=2) for img in y_list] - elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR - return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] - else: - return img_list - - -''' -# -------------------------------------------- -# metric, PSNR and SSIM -# -------------------------------------------- -''' - - -# -------------------------------------------- -# PSNR -# -------------------------------------------- -def calculate_psnr(img1, img2, border=0): - # img1 and img2 have range [0, 255] - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - mse = np.mean((img1 - img2)**2) - if mse == 0: - return float('inf') - return 20 * math.log10(255.0 / math.sqrt(mse)) - - -# -------------------------------------------- -# SSIM -# -------------------------------------------- -def calculate_ssim(img1, img2, border=0): - '''calculate SSIM - the same outputs as MATLAB's - img1, img2: [0, 255] - ''' - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - if img1.ndim == 2: - return ssim(img1, img2) - elif img1.ndim == 3: - if img1.shape[2] == 3: - ssims = [] - for i in range(3): - ssims.append(ssim(img1[:,:,i], img2[:,:,i])) - return np.array(ssims).mean() - elif img1.shape[2] == 1: - return ssim(np.squeeze(img1), np.squeeze(img2)) - else: - raise ValueError('Wrong input image dimensions.') - - -def ssim(img1, img2): - C1 = (0.01 * 255)**2 - C2 = (0.03 * 255)**2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1**2 - mu2_sq = mu2**2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * - (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -''' -# -------------------------------------------- -# matlab's bicubic imresize (numpy and torch) [0, 1] -# -------------------------------------------- -''' - - -# matlab 'imresize' function, now only support 'bicubic' -def cubic(x): - absx = torch.abs(x) - absx2 = absx**2 - absx3 = absx**3 - return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ - (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) - - -def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): - if (scale < 1) and (antialiasing): - # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width - kernel_width = kernel_width / scale - - # Output-space coordinates - x = torch.linspace(1, out_length, out_length) - - # Input-space coordinates. Calculate the inverse mapping such that 0.5 - # in output space maps to 0.5 in input space, and 0.5+scale in output - # space maps to 1.5 in input space. - u = x / scale + 0.5 * (1 - 1 / scale) - - # What is the left-most pixel that can be involved in the computation? - left = torch.floor(u - kernel_width / 2) - - # What is the maximum number of pixels that can be involved in the - # computation? Note: it's OK to use an extra pixel here; if the - # corresponding weights are all zero, it will be eliminated at the end - # of this function. - P = math.ceil(kernel_width) + 2 - - # The indices of the input pixels involved in computing the k-th output - # pixel are in row k of the indices matrix. - indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( - 1, P).expand(out_length, P) - - # The weights used to compute the k-th output pixel are in row k of the - # weights matrix. - distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices - # apply cubic kernel - if (scale < 1) and (antialiasing): - weights = scale * cubic(distance_to_center * scale) - else: - weights = cubic(distance_to_center) - # Normalize the weights matrix so that each row sums to 1. - weights_sum = torch.sum(weights, 1).view(out_length, 1) - weights = weights / weights_sum.expand(out_length, P) - - # If a column in weights is all zero, get rid of it. only consider the first and last column. - weights_zero_tmp = torch.sum((weights == 0), 0) - if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): - indices = indices.narrow(1, 1, P - 2) - weights = weights.narrow(1, 1, P - 2) - if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): - indices = indices.narrow(1, 0, P - 2) - weights = weights.narrow(1, 0, P - 2) - weights = weights.contiguous() - indices = indices.contiguous() - sym_len_s = -indices.min() + 1 - sym_len_e = indices.max() - in_length - indices = indices + sym_len_s - 1 - return weights, indices, int(sym_len_s), int(sym_len_e) - - -# -------------------------------------------- -# imresize for tensor image [0, 1] -# -------------------------------------------- -def imresize(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: pytorch tensor, CHW or HW [0,1] - # output: CHW or HW [0,1] w/o round - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(0) - in_C, in_H, in_W = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) - img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:, :sym_len_Hs, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[:, -sym_len_He:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(in_C, out_H, in_W) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) - out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :, :sym_len_Ws] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, :, -sym_len_We:] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(in_C, out_H, out_W) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - return out_2 - - -# -------------------------------------------- -# imresize for numpy image [0, 1] -# -------------------------------------------- -def imresize_np(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: Numpy, HWC or HW [0,1] - # output: HWC or HW [0,1] w/o round - img = torch.from_numpy(img) - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(2) - - in_H, in_W, in_C = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) - img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:sym_len_Hs, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[-sym_len_He:, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(out_H, in_W, in_C) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) - out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :sym_len_Ws, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, -sym_len_We:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(out_H, out_W, in_C) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - - return out_2.numpy() - - -if __name__ == '__main__': - print('---') -# img = imread_uint('test.bmp', 3) -# img = uint2single(img) -# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/rembg/sessions/u2net_cloth_seg.py b/spaces/Mellow-ai/PhotoAI_Mellow/rembg/sessions/u2net_cloth_seg.py deleted file mode 100644 index abd9aee4608c0d5c1e58df2926a3a84593dfe1d0..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/rembg/sessions/u2net_cloth_seg.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -from typing import List - -import numpy as np -import pooch -from PIL import Image -from PIL.Image import Image as PILImage -from scipy.special import log_softmax - -from .base import BaseSession - -pallete1 = [ - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, - 0, - 0, - 0, -] - -pallete2 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, -] - -pallete3 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, -] - - -class Unet2ClothSession(BaseSession): - def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, - self.normalize( - img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (768, 768) - ), - ) - - pred = ort_outs - pred = log_softmax(pred[0], 1) - pred = np.argmax(pred, axis=1, keepdims=True) - pred = np.squeeze(pred, 0) - pred = np.squeeze(pred, 0) - - mask = Image.fromarray(pred.astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - masks = [] - - mask1 = mask.copy() - mask1.putpalette(pallete1) - mask1 = mask1.convert("RGB").convert("L") - masks.append(mask1) - - mask2 = mask.copy() - mask2.putpalette(pallete2) - mask2 = mask2.convert("RGB").convert("L") - masks.append(mask2) - - mask3 = mask.copy() - mask3.putpalette(pallete3) - mask3 = mask3.convert("RGB").convert("L") - masks.append(mask3) - - return masks - - @classmethod - def download_models(cls, *args, **kwargs): - fname = f"{cls.name()}.onnx" - pooch.retrieve( - "https://github.com/danielgatis/rembg/releases/download/v0.0.0/u2net_cloth_seg.onnx", - "md5:2434d1f3cb744e0e49386c906e5a08bb", - fname=fname, - path=cls.u2net_home(), - progressbar=True, - ) - - return os.path.join(cls.u2net_home(), fname) - - @classmethod - def name(cls, *args, **kwargs): - return "u2net_cloth_seg" diff --git a/spaces/MoonQiu/LongerCrafter/lvdm/basics.py b/spaces/MoonQiu/LongerCrafter/lvdm/basics.py deleted file mode 100644 index 65c771d13a7f4a932ac370f08797a8b6ba9e85ff..0000000000000000000000000000000000000000 --- a/spaces/MoonQiu/LongerCrafter/lvdm/basics.py +++ /dev/null @@ -1,100 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! - -import torch.nn as nn -from utils.utils import instantiate_from_config - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def nonlinearity(type='silu'): - if type == 'silu': - return nn.SiLU() - elif type == 'leaky_relu': - return nn.LeakyReLU() - - -class GroupNormSpecific(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - - -def normalization(channels, num_groups=32): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNormSpecific(num_groups, channels) - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} \ No newline at end of file diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/dot_product_attention_layer.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/dot_product_attention_layer.py deleted file mode 100644 index 6d9cdb6528d90d9ec6e0bf0ac2a2343bd7227cc2..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/layers/dot_product_attention_layer.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class DotProductAttentionLayer(nn.Module): - - def __init__(self, dim_model=None): - super().__init__() - - self.scale = dim_model**-0.5 if dim_model is not None else 1. - - def forward(self, query, key, value, mask=None): - n, seq_len = mask.size() - logits = torch.matmul(query.permute(0, 2, 1), key) * self.scale - - if mask is not None: - mask = mask.view(n, 1, seq_len) - logits = logits.masked_fill(mask, float('-inf')) - - weights = F.softmax(logits, dim=2) - - glimpse = torch.matmul(weights, value.transpose(1, 2)) - - glimpse = glimpse.permute(0, 2, 1).contiguous() - - return glimpse diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax.py deleted file mode 100644 index 42a9e97a329e6c2892bb584f38375888a7fbdd2f..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Keras-based softmax layer with optional masking.""" -# pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import tensorflow as tf - - -@tf.keras.utils.register_keras_serializable(package='Text') -class MaskedSoftmax(tf.keras.layers.Layer): - """Performs a softmax with optional masking on a tensor. - - Arguments: - mask_expansion_axes: Any axes that should be padded on the mask tensor. - normalization_axes: On which axes the softmax should perform. - """ - - def __init__(self, - mask_expansion_axes=None, - normalization_axes=None, - **kwargs): - self._mask_expansion_axes = mask_expansion_axes - if normalization_axes is None: - self._normalization_axes = (-1,) - else: - self._normalization_axes = normalization_axes - super(MaskedSoftmax, self).__init__(**kwargs) - - def call(self, scores, mask=None): - - if mask is not None: - for _ in range(len(scores.shape) - len(mask.shape)): - mask = tf.expand_dims(mask, axis=self._mask_expansion_axes) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - adder = (1.0 - tf.cast(mask, scores.dtype)) * -10000.0 - - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - scores += adder - - if len(self._normalization_axes) == 1: - return tf.nn.softmax(scores, axis=self._normalization_axes[0]) - else: - return tf.math.exp(scores - tf.math.reduce_logsumexp( - scores, axis=self._normalization_axes, keepdims=True)) - - def get_config(self): - config = { - 'mask_expansion_axes': self._mask_expansion_axes, - 'normalization_axes': self._normalization_axes - } - base_config = super(MaskedSoftmax, self).get_config() - return dict(list(base_config.items()) + list(config.items())) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py deleted file mode 100644 index 113ac655b8c0a585fe43797e99674e445098edd0..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import numpy as np -from sklearn.cluster import MiniBatchKMeans - -import joblib - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("learn_kmeans") - - -def get_km_model( - n_clusters, - init, - max_iter, - batch_size, - tol, - max_no_improvement, - n_init, - reassignment_ratio, -): - return MiniBatchKMeans( - n_clusters=n_clusters, - init=init, - max_iter=max_iter, - batch_size=batch_size, - verbose=1, - compute_labels=False, - tol=tol, - max_no_improvement=max_no_improvement, - init_size=None, - n_init=n_init, - reassignment_ratio=reassignment_ratio, - ) - - -def load_feature_shard(feat_dir, split, nshard, rank, percent): - feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" - leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" - with open(leng_path, "r") as f: - lengs = [int(line.rstrip()) for line in f] - offsets = [0] + np.cumsum(lengs[:-1]).tolist() - - if percent < 0: - return np.load(feat_path, mmap_mode="r") - else: - nsample = int(np.ceil(len(lengs) * percent)) - indices = np.random.choice(len(lengs), nsample, replace=False) - feat = np.load(feat_path, mmap_mode="r") - sampled_feat = np.concatenate( - [feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0 - ) - logger.info( - ( - f"sampled {nsample} utterances, {len(sampled_feat)} frames " - f"from shard {rank}/{nshard}" - ) - ) - return sampled_feat - - -def load_feature(feat_dir, split, nshard, seed, percent): - assert percent <= 1.0 - feat = np.concatenate( - [ - load_feature_shard(feat_dir, split, nshard, r, percent) - for r in range(nshard) - ], - axis=0, - ) - logging.info(f"loaded feature with dimension {feat.shape}") - return feat - - -def learn_kmeans( - feat_dir, - split, - nshard, - km_path, - n_clusters, - seed, - percent, - init, - max_iter, - batch_size, - tol, - n_init, - reassignment_ratio, - max_no_improvement, -): - np.random.seed(seed) - feat = load_feature(feat_dir, split, nshard, seed, percent) - km_model = get_km_model( - n_clusters, - init, - max_iter, - batch_size, - tol, - max_no_improvement, - n_init, - reassignment_ratio, - ) - km_model.fit(feat) - joblib.dump(km_model, km_path) - - inertia = -km_model.score(feat) / len(feat) - logger.info("total intertia: %.5f", inertia) - logger.info("finished successfully") - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("feat_dir", type=str) - parser.add_argument("split", type=str) - parser.add_argument("nshard", type=int) - parser.add_argument("km_path", type=str) - parser.add_argument("n_clusters", type=int) - parser.add_argument("--seed", default=0, type=int) - parser.add_argument( - "--percent", default=-1, type=float, help="sample a subset; -1 for all" - ) - parser.add_argument("--init", default="k-means++") - parser.add_argument("--max_iter", default=100, type=int) - parser.add_argument("--batch_size", default=10000, type=int) - parser.add_argument("--tol", default=0.0, type=float) - parser.add_argument("--max_no_improvement", default=100, type=int) - parser.add_argument("--n_init", default=20, type=int) - parser.add_argument("--reassignment_ratio", default=0.0, type=float) - args = parser.parse_args() - logging.info(str(args)) - - learn_kmeans(**vars(args)) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/tacotron2_loss.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/tacotron2_loss.py deleted file mode 100644 index 8c7b655c8c52f8fa478b4568850ec8f741dab78e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/tacotron2_loss.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -from typing import Any, Dict, List -from functools import lru_cache -from dataclasses import dataclass, field - -import torch -from omegaconf import II - -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.data.data_utils import lengths_to_mask -import torch.nn.functional as F - - -logger = logging.getLogger(__name__) - - -@dataclass -class Tacotron2CriterionConfig(FairseqDataclass): - bce_pos_weight: float = field( - default=1.0, - metadata={"help": "weight of positive examples for BCE loss"}, - ) - n_frames_per_step: int = field( - default=0, - metadata={"help": "Number of frames per decoding step"}, - ) - use_guided_attention_loss: bool = field( - default=False, - metadata={"help": "use guided attention loss"}, - ) - guided_attention_loss_sigma: float = field( - default=0.4, - metadata={"help": "weight of positive examples for BCE loss"}, - ) - ctc_weight: float = field( - default=0.0, metadata={"help": "weight for CTC loss"} - ) - sentence_avg: bool = II("optimization.sentence_avg") - - -class GuidedAttentionLoss(torch.nn.Module): - """ - Efficiently Trainable Text-to-Speech System Based on Deep Convolutional - Networks with Guided Attention (https://arxiv.org/abs/1710.08969) - """ - - def __init__(self, sigma): - super().__init__() - self.sigma = sigma - - @staticmethod - @lru_cache(maxsize=8) - def _get_weight(s_len, t_len, sigma): - grid_x, grid_y = torch.meshgrid(torch.arange(t_len), torch.arange(s_len)) - grid_x = grid_x.to(s_len.device) - grid_y = grid_y.to(s_len.device) - w = (grid_y.float() / s_len - grid_x.float() / t_len) ** 2 - return 1.0 - torch.exp(-w / (2 * (sigma ** 2))) - - def _get_weights(self, src_lens, tgt_lens): - bsz, max_s_len, max_t_len = len(src_lens), max(src_lens), max(tgt_lens) - weights = torch.zeros((bsz, max_t_len, max_s_len)) - for i, (s_len, t_len) in enumerate(zip(src_lens, tgt_lens)): - weights[i, :t_len, :s_len] = self._get_weight(s_len, t_len, - self.sigma) - return weights - - @staticmethod - def _get_masks(src_lens, tgt_lens): - in_masks = lengths_to_mask(src_lens) - out_masks = lengths_to_mask(tgt_lens) - return out_masks.unsqueeze(2) & in_masks.unsqueeze(1) - - def forward(self, attn, src_lens, tgt_lens, reduction="mean"): - weights = self._get_weights(src_lens, tgt_lens).to(attn.device) - masks = self._get_masks(src_lens, tgt_lens).to(attn.device) - loss = (weights * attn.transpose(1, 2)).masked_select(masks) - loss = torch.sum(loss) if reduction == "sum" else torch.mean(loss) - return loss - - -@register_criterion("tacotron2", dataclass=Tacotron2CriterionConfig) -class Tacotron2Criterion(FairseqCriterion): - def __init__(self, task, sentence_avg, n_frames_per_step, - use_guided_attention_loss, guided_attention_loss_sigma, - bce_pos_weight, ctc_weight): - super().__init__(task) - self.sentence_avg = sentence_avg - self.n_frames_per_step = n_frames_per_step - self.bce_pos_weight = bce_pos_weight - - self.guided_attn = None - if use_guided_attention_loss: - self.guided_attn = GuidedAttentionLoss(guided_attention_loss_sigma) - self.ctc_weight = ctc_weight - - def forward(self, model, sample, reduction="mean"): - bsz, max_len, _ = sample["target"].size() - feat_tgt = sample["target"] - feat_len = sample["target_lengths"].view(bsz, 1).expand(-1, max_len) - eos_tgt = torch.arange(max_len).to(sample["target"].device) - eos_tgt = eos_tgt.view(1, max_len).expand(bsz, -1) - eos_tgt = (eos_tgt == (feat_len - 1)).float() - src_tokens = sample["net_input"]["src_tokens"] - src_lens = sample["net_input"]["src_lengths"] - tgt_lens = sample["target_lengths"] - - feat_out, eos_out, extra = model( - src_tokens=src_tokens, - src_lengths=src_lens, - prev_output_tokens=sample["net_input"]["prev_output_tokens"], - incremental_state=None, - target_lengths=tgt_lens, - speaker=sample["speaker"] - ) - - l1_loss, mse_loss, eos_loss = self.compute_loss( - extra["feature_out"], feat_out, eos_out, feat_tgt, eos_tgt, - tgt_lens, reduction, - ) - attn_loss = torch.tensor(0.).type_as(l1_loss) - if self.guided_attn is not None: - attn_loss = self.guided_attn(extra['attn'], src_lens, tgt_lens, reduction) - ctc_loss = torch.tensor(0.).type_as(l1_loss) - if self.ctc_weight > 0.: - net_output = (feat_out, eos_out, extra) - lprobs = model.get_normalized_probs(net_output, log_probs=True) - lprobs = lprobs.transpose(0, 1) # T x B x C - src_mask = lengths_to_mask(src_lens) - src_tokens_flat = src_tokens.masked_select(src_mask) - ctc_loss = F.ctc_loss( - lprobs, src_tokens_flat, tgt_lens, src_lens, - reduction=reduction, zero_infinity=True - ) * self.ctc_weight - loss = l1_loss + mse_loss + eos_loss + attn_loss + ctc_loss - - sample_size = sample["nsentences"] if self.sentence_avg \ - else sample["ntokens"] - logging_output = { - "loss": utils.item(loss.data), - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - "l1_loss": utils.item(l1_loss.data), - "mse_loss": utils.item(mse_loss.data), - "eos_loss": utils.item(eos_loss.data), - "attn_loss": utils.item(attn_loss.data), - "ctc_loss": utils.item(ctc_loss.data), - } - return loss, sample_size, logging_output - - def compute_loss(self, feat_out, feat_out_post, eos_out, feat_tgt, - eos_tgt, tgt_lens, reduction="mean"): - mask = lengths_to_mask(tgt_lens) - _eos_out = eos_out[mask].squeeze() - _eos_tgt = eos_tgt[mask] - _feat_tgt = feat_tgt[mask] - _feat_out = feat_out[mask] - _feat_out_post = feat_out_post[mask] - - l1_loss = ( - F.l1_loss(_feat_out, _feat_tgt, reduction=reduction) + - F.l1_loss(_feat_out_post, _feat_tgt, reduction=reduction) - ) - mse_loss = ( - F.mse_loss(_feat_out, _feat_tgt, reduction=reduction) + - F.mse_loss(_feat_out_post, _feat_tgt, reduction=reduction) - ) - eos_loss = F.binary_cross_entropy_with_logits( - _eos_out, _eos_tgt, pos_weight=torch.tensor(self.bce_pos_weight), - reduction=reduction - ) - return l1_loss, mse_loss, eos_loss - - @classmethod - def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: - ns = [log.get("sample_size", 0) for log in logging_outputs] - ntot = sum(ns) - ws = [n / (ntot + 1e-8) for n in ns] - for key in ["loss", "l1_loss", "mse_loss", "eos_loss", "attn_loss", "ctc_loss"]: - vals = [log.get(key, 0) for log in logging_outputs] - val = sum(val * w for val, w in zip(vals, ws)) - metrics.log_scalar(key, val, ntot, round=3) - metrics.log_scalar("sample_size", ntot, len(logging_outputs)) - - # inference metrics - if "targ_frames" not in logging_outputs[0]: - return - n = sum(log.get("targ_frames", 0) for log in logging_outputs) - for key, new_key in [ - ("mcd_loss", "mcd_loss"), - ("pred_frames", "pred_ratio"), - ("nins", "ins_rate"), - ("ndel", "del_rate"), - ]: - val = sum(log.get(key, 0) for log in logging_outputs) - metrics.log_scalar(new_key, val / n, n, round=3) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - return False diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/frm_text_to_speech_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/frm_text_to_speech_dataset.py deleted file mode 100644 index 125b1fc0c0a67190e6d9ba4866664cbc9006a142..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/frm_text_to_speech_dataset.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory.abs - -import csv -import logging -import os.path as op -from typing import List, Optional - -import numpy as np -import torch -from fairseq.data import Dictionary -from fairseq.data.audio.speech_to_text_dataset import ( - S2TDataConfig -) -from fairseq.data.audio.text_to_speech_dataset import ( - TextToSpeechDataset, TextToSpeechDatasetCreator -) - -logger = logging.getLogger(__name__) - - -class FrmTextToSpeechDataset(TextToSpeechDataset): - def __init__( - self, - split: str, - is_train_split: bool, - data_cfg: S2TDataConfig, - audio_paths: List[str], - n_frames: List[int], - src_texts: Optional[List[str]] = None, - tgt_texts: Optional[List[str]] = None, - speakers: Optional[List[str]] = None, - src_langs: Optional[List[str]] = None, - tgt_langs: Optional[List[str]] = None, - ids: Optional[List[str]] = None, - tgt_dict: Optional[Dictionary] = None, - pre_tokenizer=None, - bpe_tokenizer=None, - n_frames_per_step=1, - speaker_to_id=None, - do_chunk=False, - chunk_bound=-1, - chunk_init=50, - chunk_incr=5, - add_eos=True, - dedup=True, - ref_fpu=-1 - ): - # It assumes texts are encoded at a fixed frame-rate - super().__init__( - split=split, - is_train_split=is_train_split, - data_cfg=data_cfg, - audio_paths=audio_paths, - n_frames=n_frames, - src_texts=src_texts, - tgt_texts=tgt_texts, - speakers=speakers, - src_langs=src_langs, - tgt_langs=tgt_langs, - ids=ids, - tgt_dict=tgt_dict, - pre_tokenizer=pre_tokenizer, - bpe_tokenizer=bpe_tokenizer, - n_frames_per_step=n_frames_per_step, - speaker_to_id=speaker_to_id - ) - - self.do_chunk = do_chunk - self.chunk_bound = chunk_bound - self.chunk_init = chunk_init - self.chunk_incr = chunk_incr - self.add_eos = add_eos - self.dedup = dedup - self.ref_fpu = ref_fpu - - self.chunk_size = -1 - - if do_chunk: - assert self.chunk_incr >= 0 - assert self.pre_tokenizer is None - - def __getitem__(self, index): - index, source, target, speaker_id, _, _, _ = super().__getitem__(index) - if target[-1].item() == self.tgt_dict.eos_index: - target = target[:-1] - - fpu = source.size(0) / target.size(0) # frame-per-unit - fps = self.n_frames_per_step - assert ( - self.ref_fpu == -1 or - abs((fpu * fps - self.ref_fpu) / self.ref_fpu) < 0.1 - ), f"{fpu*fps} != {self.ref_fpu}" - - # only chunk training split - if self.is_train_split and self.do_chunk and self.chunk_size > 0: - lang = target[:int(self.data_cfg.prepend_tgt_lang_tag)] - text = target[int(self.data_cfg.prepend_tgt_lang_tag):] - size = len(text) - chunk_size = min(self.chunk_size, size) - chunk_start = np.random.randint(size - chunk_size + 1) - text = text[chunk_start:chunk_start+chunk_size] - target = torch.cat((lang, text), 0) - - f_size = int(np.floor(chunk_size * fpu)) - f_start = int(np.floor(chunk_start * fpu)) - assert(f_size > 0) - source = source[f_start:f_start+f_size, :] - - if self.dedup: - target = torch.unique_consecutive(target) - - if self.add_eos: - eos_idx = self.tgt_dict.eos_index - target = torch.cat((target, torch.LongTensor([eos_idx])), 0) - - return index, source, target, speaker_id - - def set_epoch(self, epoch): - if self.is_train_split and self.do_chunk: - old = self.chunk_size - self.chunk_size = self.chunk_init + epoch * self.chunk_incr - if self.chunk_bound > 0: - self.chunk_size = min(self.chunk_size, self.chunk_bound) - logger.info(( - f"{self.split}: setting chunk size " - f"from {old} to {self.chunk_size}" - )) - - -class FrmTextToSpeechDatasetCreator(TextToSpeechDatasetCreator): - # inherit for key names - @classmethod - def from_tsv( - cls, - root: str, - data_cfg: S2TDataConfig, - split: str, - tgt_dict, - pre_tokenizer, - bpe_tokenizer, - is_train_split: bool, - n_frames_per_step: int, - speaker_to_id, - do_chunk: bool = False, - chunk_bound: int = -1, - chunk_init: int = 50, - chunk_incr: int = 5, - add_eos: bool = True, - dedup: bool = True, - ref_fpu: float = -1 - ) -> FrmTextToSpeechDataset: - tsv_path = op.join(root, f"{split}.tsv") - if not op.isfile(tsv_path): - raise FileNotFoundError(f"Dataset not found: {tsv_path}") - with open(tsv_path) as f: - reader = csv.DictReader( - f, - delimiter="\t", - quotechar=None, - doublequote=False, - lineterminator="\n", - quoting=csv.QUOTE_NONE, - ) - s = [dict(e) for e in reader] - assert len(s) > 0 - - ids = [ss[cls.KEY_ID] for ss in s] - audio_paths = [ - op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s - ] - n_frames = [int(ss[cls.KEY_N_FRAMES]) for ss in s] - tgt_texts = [ss[cls.KEY_TGT_TEXT] for ss in s] - src_texts = [ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s] - speakers = [ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s] - src_langs = [ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s] - tgt_langs = [ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s] - - return FrmTextToSpeechDataset( - split=split, - is_train_split=is_train_split, - data_cfg=data_cfg, - audio_paths=audio_paths, - n_frames=n_frames, - src_texts=src_texts, - tgt_texts=tgt_texts, - speakers=speakers, - src_langs=src_langs, - tgt_langs=tgt_langs, - ids=ids, - tgt_dict=tgt_dict, - pre_tokenizer=pre_tokenizer, - bpe_tokenizer=bpe_tokenizer, - n_frames_per_step=n_frames_per_step, - speaker_to_id=speaker_to_id, - do_chunk=do_chunk, - chunk_bound=chunk_bound, - chunk_init=chunk_init, - chunk_incr=chunk_incr, - add_eos=add_eos, - dedup=dedup, - ref_fpu=ref_fpu - ) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/transform_eos_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/transform_eos_dataset.py deleted file mode 100644 index fb14ff018edf13b20f5d0e486692dfb0a37ec6d1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/transform_eos_dataset.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import FairseqDataset - - -class TransformEosDataset(FairseqDataset): - """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. - - Note that the transformation is applied in :func:`collater`. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset to wrap - eos (int): index of the end-of-sentence symbol - append_eos_to_src (bool, optional): append EOS to the end of src - remove_eos_from_src (bool, optional): remove EOS from the end of src - append_eos_to_tgt (bool, optional): append EOS to the end of tgt - remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt - """ - - def __init__( - self, - dataset, - eos, - append_eos_to_src=False, - remove_eos_from_src=False, - append_eos_to_tgt=False, - remove_eos_from_tgt=False, - has_target=True, - ): - if not isinstance(dataset, FairseqDataset): - raise ValueError("dataset must be an instance of FairseqDataset") - if append_eos_to_src and remove_eos_from_src: - raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src") - if append_eos_to_tgt and remove_eos_from_tgt: - raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt") - - self.dataset = dataset - self.eos = torch.LongTensor([eos]) - self.append_eos_to_src = append_eos_to_src - self.remove_eos_from_src = remove_eos_from_src - self.append_eos_to_tgt = append_eos_to_tgt - self.remove_eos_from_tgt = remove_eos_from_tgt - self.has_target = has_target - - # precompute how we should adjust the reported sizes - self._src_delta = 0 - self._src_delta += 1 if append_eos_to_src else 0 - self._src_delta -= 1 if remove_eos_from_src else 0 - self._tgt_delta = 0 - self._tgt_delta += 1 if append_eos_to_tgt else 0 - self._tgt_delta -= 1 if remove_eos_from_tgt else 0 - - self._checked_src = False - self._checked_tgt = False - - def _check_src(self, src, expect_eos): - if not self._checked_src: - assert (src[-1] == self.eos[0]) == expect_eos - self._checked_src = True - - def _check_tgt(self, tgt, expect_eos): - if self.has_target and not self._checked_tgt: - assert (tgt[-1] == self.eos[0]) == expect_eos - self._checked_tgt = True - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples): - def transform(item): - if self.append_eos_to_src: - self.eos = self.eos.to(device=item["source"].device) - self._check_src(item["source"], expect_eos=False) - item["source"] = torch.cat([item["source"], self.eos]) - if self.remove_eos_from_src: - self.eos = self.eos.to(device=item["source"].device) - self._check_src(item["source"], expect_eos=True) - item["source"] = item["source"][:-1] - if self.append_eos_to_tgt: - self.eos = self.eos.to(device=item["target"].device) - self._check_tgt(item["target"], expect_eos=False) - item["target"] = torch.cat([item["target"], self.eos]) - if self.remove_eos_from_tgt: - self.eos = self.eos.to(device=item["target"].device) - self._check_tgt(item["target"], expect_eos=True) - item["target"] = item["target"][:-1] - return item - - samples = list(map(transform, samples)) - return self.dataset.collater(samples) - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - if self.has_target: - src_len, tgt_len = self.dataset.size(index) - return (src_len + self._src_delta, tgt_len + self._tgt_delta) - else: - return self.dataset.size(index) - - def ordered_indices(self): - # NOTE: we assume that the ordering does not change based on the - # addition or removal of eos - return self.dataset.ordered_indices() - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/lru_cache_dataset.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/lru_cache_dataset.py deleted file mode 100644 index a7854ac1701392754ce5795cafe9c634671aebdf..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/lru_cache_dataset.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from functools import lru_cache - -from . import BaseWrapperDataset - - -class LRUCacheDataset(BaseWrapperDataset): - def __init__(self, dataset, token=None): - super().__init__(dataset) - - @lru_cache(maxsize=8) - def __getitem__(self, index): - return self.dataset[index] - - @lru_cache(maxsize=8) - def collater(self, samples): - return self.dataset.collater(samples) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py deleted file mode 100644 index 5f0d70fdad92ba4f554d971710b60f2f9e8d9298..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py +++ /dev/null @@ -1,18 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Defines the set of symbols used in text input to the model. - -The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' -from . import cmudict - -_pad = '_' -_punctuation = '!\'(),.:;? ' -_special = '-' -_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' - -# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): -_arpabet = ['@' + s for s in cmudict.valid_symbols] - -# Export all symbols: -symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/iterative_refinement_generator.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/iterative_refinement_generator.py deleted file mode 100644 index 4fb0946f499329ceb130761b59675d761df1c158..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/iterative_refinement_generator.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import namedtuple - -import numpy as np -import torch -from fairseq import utils - - -DecoderOut = namedtuple( - "IterativeRefinementDecoderOut", - ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], -) - - -class IterativeRefinementGenerator(object): - def __init__( - self, - tgt_dict, - models=None, - eos_penalty=0.0, - max_iter=10, - max_ratio=2, - beam_size=1, - decoding_format=None, - retain_dropout=False, - adaptive=True, - retain_history=False, - reranking=False, - ): - """ - Generates translations based on iterative refinement. - - Args: - tgt_dict: target dictionary - eos_penalty: if > 0.0, it penalized early-stopping in decoding - max_iter: maximum number of refinement iterations - max_ratio: generate sequences of maximum length ax, where x is the source length - decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} - retain_dropout: retaining dropout in the inference - adaptive: decoding with early stop - """ - self.bos = tgt_dict.bos() - self.pad = tgt_dict.pad() - self.unk = tgt_dict.unk() - self.eos = tgt_dict.eos() - self.vocab_size = len(tgt_dict) - self.eos_penalty = eos_penalty - self.max_iter = max_iter - self.max_ratio = max_ratio - self.beam_size = beam_size - self.reranking = reranking - self.decoding_format = decoding_format - self.retain_dropout = retain_dropout - self.retain_history = retain_history - self.adaptive = adaptive - self.models = models - - def generate_batched_itr( - self, - data_itr, - maxlen_a=None, - maxlen_b=None, - cuda=False, - timer=None, - prefix_size=0, - ): - """Iterate over a batched dataset and yield individual translations. - - Args: - maxlen_a/b: generate sequences of maximum length ax + b, - where x is the source sentence length. - cuda: use GPU for generation - timer: StopwatchMeter for timing generations. - """ - - for sample in data_itr: - if "net_input" not in sample: - continue - if timer is not None: - timer.start() - with torch.no_grad(): - hypos = self.generate( - self.models, - sample, - prefix_tokens=sample["target"][:, :prefix_size] - if prefix_size > 0 - else None, - ) - if timer is not None: - timer.stop(sample["ntokens"]) - for i, id in enumerate(sample["id"]): - # remove padding - src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) - ref = utils.strip_pad(sample["target"][i, :], self.pad) - yield id, src, ref, hypos[i] - - @torch.no_grad() - def generate(self, models, sample, prefix_tokens=None, constraints=None): - if constraints is not None: - raise NotImplementedError( - "Constrained decoding with the IterativeRefinementGenerator is not supported" - ) - - # TODO: iterative refinement generator does not support ensemble for now. - if not self.retain_dropout: - for model in models: - model.eval() - - model, reranker = models[0], None - if self.reranking: - assert len(models) > 1, "Assuming the last checkpoint is the reranker" - assert ( - self.beam_size > 1 - ), "Reranking requires multiple translation for each example" - - reranker = models[-1] - models = models[:-1] - - if len(models) > 1 and hasattr(model, "enable_ensemble"): - assert model.allow_ensemble, "{} does not support ensembling".format( - model.__class__.__name__ - ) - model.enable_ensemble(models) - - # TODO: better encoder inputs? - src_tokens = sample["net_input"]["src_tokens"] - src_lengths = sample["net_input"]["src_lengths"] - bsz, src_len = src_tokens.size() - - # initialize - encoder_out = model.forward_encoder([src_tokens, src_lengths]) - prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) - - if self.beam_size > 1: - assert ( - model.allow_length_beam - ), "{} does not support decoding with length beam.".format( - model.__class__.__name__ - ) - - # regenerate data based on length-beam - length_beam_order = ( - utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) - ) - encoder_out = model.encoder.reorder_encoder_out( - encoder_out, length_beam_order - ) - prev_decoder_out = model.regenerate_length_beam( - prev_decoder_out, self.beam_size - ) - bsz = bsz * self.beam_size - - sent_idxs = torch.arange(bsz) - prev_output_tokens = prev_decoder_out.output_tokens.clone() - - if self.retain_history: - prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) - - finalized = [[] for _ in range(bsz)] - - def is_a_loop(x, y, s, a): - b, l_x, l_y = x.size(0), x.size(1), y.size(1) - if l_x > l_y: - y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) - s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) - if a is not None: - a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) - elif l_x < l_y: - x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) - return (x == y).all(1), y, s, a - - def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): - cutoff = prev_out_token.ne(self.pad) - tokens = prev_out_token[cutoff] - if prev_out_score is None: - scores, score = None, None - else: - scores = prev_out_score[cutoff] - score = scores.mean() - - if prev_out_attn is None: - hypo_attn, alignment = None, None - else: - hypo_attn = prev_out_attn[cutoff] - alignment = hypo_attn.max(dim=1)[1] - return { - "steps": step, - "tokens": tokens, - "positional_scores": scores, - "score": score, - "hypo_attn": hypo_attn, - "alignment": alignment, - } - - for step in range(self.max_iter + 1): - - decoder_options = { - "eos_penalty": self.eos_penalty, - "max_ratio": self.max_ratio, - "decoding_format": self.decoding_format, - } - prev_decoder_out = prev_decoder_out._replace( - step=step, - max_step=self.max_iter + 1, - ) - - decoder_out = model.forward_decoder( - prev_decoder_out, encoder_out, **decoder_options - ) - - if self.adaptive: - # terminate if there is a loop - terminated, out_tokens, out_scores, out_attn = is_a_loop( - prev_output_tokens, - decoder_out.output_tokens, - decoder_out.output_scores, - decoder_out.attn, - ) - decoder_out = decoder_out._replace( - output_tokens=out_tokens, - output_scores=out_scores, - attn=out_attn, - ) - - else: - terminated = decoder_out.output_tokens.new_zeros( - decoder_out.output_tokens.size(0) - ).bool() - - if step == self.max_iter: # reach last iteration, terminate - terminated.fill_(1) - - # collect finalized sentences - finalized_idxs = sent_idxs[terminated] - finalized_tokens = decoder_out.output_tokens[terminated] - finalized_scores = decoder_out.output_scores[terminated] - finalized_attn = ( - None - if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) - else decoder_out.attn[terminated] - ) - - if self.retain_history: - finalized_history_tokens = [h[terminated] for h in decoder_out.history] - - for i in range(finalized_idxs.size(0)): - finalized[finalized_idxs[i]] = [ - finalized_hypos( - step, - finalized_tokens[i], - finalized_scores[i], - None if finalized_attn is None else finalized_attn[i], - ) - ] - - if self.retain_history: - finalized[finalized_idxs[i]][0]["history"] = [] - for j in range(len(finalized_history_tokens)): - finalized[finalized_idxs[i]][0]["history"].append( - finalized_hypos( - step, finalized_history_tokens[j][i], None, None - ) - ) - - # check if all terminated - if terminated.sum() == terminated.size(0): - break - - # for next step - not_terminated = ~terminated - prev_decoder_out = decoder_out._replace( - output_tokens=decoder_out.output_tokens[not_terminated], - output_scores=decoder_out.output_scores[not_terminated], - attn=decoder_out.attn[not_terminated] - if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) - else None, - history=[h[not_terminated] for h in decoder_out.history] - if decoder_out.history is not None - else None, - ) - encoder_out = model.encoder.reorder_encoder_out( - encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() - ) - sent_idxs = sent_idxs[not_terminated] - prev_output_tokens = prev_decoder_out.output_tokens.clone() - - if self.beam_size > 1: - if reranker is not None: - finalized = self.rerank( - reranker, finalized, [src_tokens, src_lengths], self.beam_size - ) - - # aggregate information from length beam - finalized = [ - finalized[ - np.argmax( - [ - finalized[self.beam_size * i + j][0]["score"] - for j in range(self.beam_size) - ] - ) - + self.beam_size * i - ] - for i in range(len(finalized) // self.beam_size) - ] - - return finalized - - def rerank(self, reranker, finalized, encoder_input, beam_size): - def rebuild_batch(finalized): - finalized_tokens = [f[0]["tokens"] for f in finalized] - finalized_maxlen = max(f.size(0) for f in finalized_tokens) - final_output_tokens = ( - finalized_tokens[0] - .new_zeros(len(finalized_tokens), finalized_maxlen) - .fill_(self.pad) - ) - for i, f in enumerate(finalized_tokens): - final_output_tokens[i, : f.size(0)] = f - return final_output_tokens - - final_output_tokens = rebuild_batch(finalized) - final_output_tokens[ - :, 0 - ] = self.eos # autoregressive model assumes starting with EOS - - reranker_encoder_out = reranker.encoder(*encoder_input) - length_beam_order = ( - utils.new_arange( - final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) - ) - .t() - .reshape(-1) - ) - reranker_encoder_out = reranker.encoder.reorder_encoder_out( - reranker_encoder_out, length_beam_order - ) - reranking_scores = reranker.get_normalized_probs( - reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), - True, - None, - ) - reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) - reranking_masks = final_output_tokens[:, 1:].ne(self.pad) - reranking_scores = ( - reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) - ) - reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( - reranking_scores - ) - - for i in range(len(finalized)): - finalized[i][0]["score"] = reranking_scores[i] - - return finalized diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert.py deleted file mode 100644 index 232a5e402a146023e5c93f3c2574ecec98faf9d5..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from typing import Dict, List, Optional, Tuple - -import numpy as np - -import torch -import torch.nn as nn -from dataclasses import dataclass, field -from fairseq import utils -from fairseq.data.data_utils import compute_mask_indices -from fairseq.data.dictionary import Dictionary -from fairseq.dataclass import ChoiceEnum, FairseqDataclass -from fairseq.models import BaseFairseqModel, register_model -from fairseq.models.wav2vec.wav2vec2 import ( - ConvFeatureExtractionModel, - TransformerEncoder, -) -from fairseq.modules import GradMultiply, LayerNorm -from fairseq.tasks.hubert_pretraining import ( - HubertPretrainingConfig, - HubertPretrainingTask, -) -from omegaconf import II - -logger = logging.getLogger(__name__) - -EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) -MASKING_DISTRIBUTION_CHOICES = ChoiceEnum( - ["static", "uniform", "normal", "poisson"] -) - - -@dataclass -class HubertConfig(FairseqDataclass): - label_rate: int = II("task.label_rate") - - extractor_mode: EXTRACTOR_MODE_CHOICES = field( - default="default", - metadata={ - "help": "mode for feature extractor. default has a single group " - "norm with d groups in the first conv block, whereas layer_norm " - "has layer norms in every block (meant to use with normalize=True)" - }, - ) - encoder_layers: int = field( - default=12, metadata={"help": "num encoder layers in the transformer"} - ) - encoder_embed_dim: int = field( - default=768, metadata={"help": "encoder embedding dimension"} - ) - encoder_ffn_embed_dim: int = field( - default=3072, metadata={"help": "encoder embedding dimension for FFN"} - ) - encoder_attention_heads: int = field( - default=12, metadata={"help": "num encoder attention heads"} - ) - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="gelu", metadata={"help": "activation function to use"} - ) - - # dropouts - dropout: float = field( - default=0.1, - metadata={"help": "dropout probability for the transformer"}, - ) - attention_dropout: float = field( - default=0.1, - metadata={"help": "dropout probability for attention weights"}, - ) - activation_dropout: float = field( - default=0.0, - metadata={"help": "dropout probability after activation in FFN"}, - ) - encoder_layerdrop: float = field( - default=0.0, - metadata={"help": "probability of dropping a tarnsformer layer"}, - ) - dropout_input: float = field( - default=0.0, - metadata={"help": "dropout to apply to the input (after feat extr)"}, - ) - dropout_features: float = field( - default=0.0, - metadata={ - "help": "dropout to apply to the features (after feat extr)" - }, - ) - - final_dim: int = field( - default=0, - metadata={ - "help": "project final representations and targets to this many " - "dimensions. set to encoder_embed_dim is <= 0" - }, - ) - untie_final_proj: bool = field( - default=False, - metadata={"help": "use separate projection for each target"}, - ) - layer_norm_first: bool = field( - default=False, - metadata={"help": "apply layernorm first in the transformer"}, - ) - conv_feature_layers: str = field( - default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2", - metadata={ - "help": "string describing convolutional feature extraction " - "layers in form of a python list that contains " - "[(dim, kernel_size, stride), ...]" - }, - ) - conv_bias: bool = field( - default=False, metadata={"help": "include bias in conv encoder"} - ) - logit_temp: float = field( - default=0.1, metadata={"help": "temperature to divide logits by"} - ) - target_glu: bool = field( - default=False, metadata={"help": "adds projection + glu to targets"} - ) - feature_grad_mult: float = field( - default=1.0, - metadata={"help": "multiply feature extractor var grads by this"}, - ) - - # masking - mask_length: int = field(default=10, metadata={"help": "mask length"}) - mask_prob: float = field( - default=0.65, - metadata={"help": "probability of replacing a token with mask"}, - ) - mask_selection: MASKING_DISTRIBUTION_CHOICES = field( - default="static", metadata={"help": "how to choose mask length"} - ) - mask_other: float = field( - default=0, - metadata={ - "help": "secondary mask argument " - "(used for more complex distributions), " - "see help in compute_mask_indicesh" - }, - ) - no_mask_overlap: bool = field( - default=False, metadata={"help": "whether to allow masks to overlap"} - ) - mask_min_space: int = field( - default=1, - metadata={ - "help": "min space between spans (if no overlap is enabled)" - }, - ) - - # channel masking - mask_channel_length: int = field( - default=10, - metadata={"help": "length of the mask for features (channels)"}, - ) - mask_channel_prob: float = field( - default=0.0, - metadata={"help": "probability of replacing a feature with 0"}, - ) - mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( - default="static", - metadata={"help": "how to choose mask length for channel masking"}, - ) - mask_channel_other: float = field( - default=0, - metadata={ - "help": "secondary mask argument " - "(used for more complex distributions), " - "see help in compute_mask_indicesh" - }, - ) - no_mask_channel_overlap: bool = field( - default=False, - metadata={"help": "whether to allow channel masks to overlap"}, - ) - mask_channel_min_space: int = field( - default=1, - metadata={ - "help": "min space between spans (if no overlap is enabled)" - }, - ) - - # positional embeddings - conv_pos: int = field( - default=128, - metadata={ - "help": "number of filters for convolutional positional embeddings" - }, - ) - conv_pos_groups: int = field( - default=16, - metadata={ - "help": "number of groups for convolutional positional embedding" - }, - ) - - latent_temp: Tuple[float, float, float] = field( - default=(2, 0.5, 0.999995), - metadata={"help": "legacy (to be removed)"}, - ) - - # loss computation - skip_masked: bool = field( - default=False, - metadata={"help": "skip computing losses over masked frames"}, - ) - skip_nomask: bool = field( - default=False, - metadata={"help": "skip computing losses over unmasked frames"}, - ) - - -@register_model("hubert", dataclass=HubertConfig) -class HubertModel(BaseFairseqModel): - def __init__( - self, - cfg: HubertConfig, - task_cfg: HubertPretrainingConfig, - dictionaries: List[Dictionary], - ) -> None: - super().__init__() - logger.info(f"HubertModel Config: {cfg}") - - feature_enc_layers = eval(cfg.conv_feature_layers) # noqa - self.embed = feature_enc_layers[-1][0] - - self.feature_extractor = ConvFeatureExtractionModel( - conv_layers=feature_enc_layers, - dropout=0.0, - mode=cfg.extractor_mode, - conv_bias=cfg.conv_bias, - ) - feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers]) - self.feat2tar_ratio = ( - cfg.label_rate * feature_ds_rate / task_cfg.sample_rate - ) - - self.post_extract_proj = ( - nn.Linear(self.embed, cfg.encoder_embed_dim) - if self.embed != cfg.encoder_embed_dim - else None - ) - - self.mask_prob = cfg.mask_prob - self.mask_selection = cfg.mask_selection - self.mask_other = cfg.mask_other - self.mask_length = cfg.mask_length - self.no_mask_overlap = cfg.no_mask_overlap - self.mask_min_space = cfg.mask_min_space - - self.mask_channel_prob = cfg.mask_channel_prob - self.mask_channel_selection = cfg.mask_channel_selection - self.mask_channel_other = cfg.mask_channel_other - self.mask_channel_length = cfg.mask_channel_length - self.no_mask_channel_overlap = cfg.no_mask_channel_overlap - self.mask_channel_min_space = cfg.mask_channel_min_space - - self.dropout_input = nn.Dropout(cfg.dropout_input) - self.dropout_features = nn.Dropout(cfg.dropout_features) - - self.feature_grad_mult = cfg.feature_grad_mult - self.logit_temp = cfg.logit_temp - self.skip_masked = cfg.skip_masked - self.skip_nomask = cfg.skip_nomask - - final_dim = ( - cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim - ) - - self.mask_emb = nn.Parameter( - torch.FloatTensor(cfg.encoder_embed_dim).uniform_() - ) - - self.encoder = TransformerEncoder(cfg) - self.layer_norm = LayerNorm(self.embed) - - self.target_glu = None - if cfg.target_glu: - self.target_glu = nn.Sequential( - nn.Linear(final_dim, final_dim * 2), nn.GLU() - ) - - self.untie_final_proj = cfg.untie_final_proj - if self.untie_final_proj: - self.final_proj = nn.Linear( - cfg.encoder_embed_dim, final_dim * len(dictionaries) - ) - else: - self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) - - # modules below are not needed during fine-tuning - if any([d is None for d in dictionaries]): - logger.info( - "cannot find dictionary. assume will be used for fine-tuning" - ) - else: - self.num_classes = [len(d) for d in dictionaries] - self.label_embs_concat = nn.Parameter( - torch.FloatTensor(sum(self.num_classes), final_dim) - ) - nn.init.uniform_(self.label_embs_concat) - - def upgrade_state_dict_named(self, state_dict, name): - """Upgrade a (possibly old) state dict for new versions of fairseq.""" - - super().upgrade_state_dict_named(state_dict, name) - return state_dict - - @classmethod - def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask): - """Build a new model instance.""" - - model = HubertModel(cfg, task.cfg, task.dictionaries) - return model - - def apply_mask(self, x, padding_mask, target_list): - B, T, C = x.shape - if self.mask_prob > 0: - mask_indices = compute_mask_indices( - (B, T), - padding_mask, - self.mask_prob, - self.mask_length, - self.mask_selection, - self.mask_other, - min_masks=2, - no_overlap=self.no_mask_overlap, - min_space=self.mask_min_space, - ) - mask_indices = torch.from_numpy(mask_indices).to(x.device) - x[mask_indices] = self.mask_emb - else: - mask_indices = None - - if self.mask_channel_prob > 0: - mask_channel_indices = compute_mask_indices( - (B, C), - None, - self.mask_channel_prob, - self.mask_channel_length, - self.mask_channel_selection, - self.mask_channel_other, - no_overlap=self.no_mask_channel_overlap, - min_space=self.mask_channel_min_space, - ) - mask_channel_indices = ( - torch.from_numpy(mask_channel_indices) - .to(x.device) - .unsqueeze(1) - .expand(-1, T, -1) - ) - x[mask_channel_indices] = 0 - - return x, mask_indices - - def compute_nce(self, x, pos, negs): - neg_is_pos = (pos == negs).all(-1) - pos = pos.unsqueeze(0) - targets = torch.cat([pos, negs], dim=0) - - logits = torch.cosine_similarity( - x.float(), targets.float(), dim=-1 - ).type_as(x) - logits /= self.logit_temp - if neg_is_pos.any(): - logits[1:][neg_is_pos] = float("-inf") - logits = logits.transpose(0, 1) # (num_x, num_cls+1) - return logits - - def forward_features(self, source: torch.Tensor) -> torch.Tensor: - if self.feature_grad_mult > 0: - features = self.feature_extractor(source) - if self.feature_grad_mult != 1.0: - features = GradMultiply.apply(features, self.feature_grad_mult) - else: - with torch.no_grad(): - features = self.feature_extractor(source) - return features - - def forward_targets( - self, features: torch.Tensor, target_list: List[torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Trim features to ensure labels exist and then get aligned labels - feat_tsz = features.size(2) - targ_tsz = min([t.size(1) for t in target_list]) - if self.feat2tar_ratio * feat_tsz > targ_tsz: - feat_tsz = int(targ_tsz / self.feat2tar_ratio) - features = features[..., :feat_tsz] - target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio - target_list = [t[:, target_inds.long()] for t in target_list] - return features, target_list - - def forward_padding_mask( - self, features: torch.Tensor, padding_mask: torch.Tensor, - ) -> torch.Tensor: - extra = padding_mask.size(1) % features.size(1) - if extra > 0: - padding_mask = padding_mask[:, :-extra] - padding_mask = padding_mask.view( - padding_mask.size(0), features.size(1), -1 - ) - padding_mask = padding_mask.all(-1) - return padding_mask - - def forward( - self, - source: torch.Tensor, - target_list: Optional[List[torch.Tensor]] = None, - padding_mask: Optional[torch.Tensor] = None, - mask: bool = True, - features_only: bool = False, - output_layer: Optional[int] = None, - ) -> Dict[str, torch.Tensor]: - """output layer is 1-based""" - features = self.forward_features(source) - if target_list is not None: - features, target_list = self.forward_targets(features, target_list) - - features_pen = features.float().pow(2).mean() - - features = features.transpose(1, 2) - features = self.layer_norm(features) - unmasked_features = features.clone() - - if padding_mask is not None: - padding_mask = self.forward_padding_mask(features, padding_mask) - - if self.post_extract_proj is not None: - features = self.post_extract_proj(features) - - features = self.dropout_input(features) - unmasked_features = self.dropout_features(unmasked_features) - - if mask: - x, mask_indices = self.apply_mask( - features, padding_mask, target_list - ) - else: - x = features - mask_indices = None - - # feature: (B, T, D), float - # target: (B, T), long - # x: (B, T, D), float - # padding_mask: (B, T), bool - # mask_indices: (B, T), bool - x, _ = self.encoder( - x, - padding_mask=padding_mask, - layer=None if output_layer is None else output_layer - 1 - ) - - if features_only: - return {"x": x, "padding_mask": padding_mask, "features": features} - - def compute_pred(proj_x, target, label_embs): - # compute logits for the i-th label set - y = torch.index_select(label_embs, 0, target.long()) - negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1) - if self.target_glu: - y = self.target_glu(y) - negs = self.target_glu(negs) - # proj_x: (S, D) - # y: (S, D) - # negs: (Neg, S, D) - return self.compute_nce(proj_x, y, negs) - - label_embs_list = self.label_embs_concat.split(self.num_classes, 0) - - if not self.skip_masked: - masked_indices = torch.logical_and(~padding_mask, mask_indices) - proj_x_m = self.final_proj(x[masked_indices]) - if self.untie_final_proj: - proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1) - else: - proj_x_m_list = [proj_x_m for _ in range(len(target_list))] - logit_m_list = [ - compute_pred(proj_x_m, t[masked_indices], label_embs_list[i]) - for i, (proj_x_m, t) in enumerate( - zip(proj_x_m_list, target_list) - ) - ] - else: - logit_m_list = [None for _ in target_list] - - if not self.skip_nomask: - nomask_indices = torch.logical_and(~padding_mask, ~mask_indices) - proj_x_u = self.final_proj(x[nomask_indices]) - if self.untie_final_proj: - proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1) - else: - proj_x_u_list = [proj_x_u for _ in range(len(target_list))] - - logit_u_list = [ - compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i]) - for i, (proj_x_u, t) in enumerate( - zip(proj_x_u_list, target_list) - ) - ] - else: - logit_u_list = [None for _ in target_list] - - result = { - "logit_m_list": logit_m_list, - "logit_u_list": logit_u_list, - "padding_mask": padding_mask, - "features_pen": features_pen, - } - return result - - def extract_features( - self, - source: torch.Tensor, - padding_mask: Optional[torch.Tensor] = None, - mask: bool = False, - ret_conv: bool = False, - output_layer: Optional[int] = None, - ) -> Tuple[torch.Tensor, torch.Tensor]: - res = self.forward( - source, - padding_mask=padding_mask, - mask=mask, - features_only=True, - output_layer=output_layer, - ) - feature = res["features"] if ret_conv else res["x"] - return feature, res["padding_mask"] - - def get_logits(self, net_output, is_masked=True): - if is_masked: - logits_list = net_output["logit_m_list"] - else: - logits_list = net_output["logit_u_list"] - logits_list = [x.float() for x in logits_list if x is not None] - return logits_list - - def get_targets(self, net_output, is_masked=True): - logits_list = self.get_logits(net_output, is_masked) - targets_list = [ - x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list - ] - return targets_list - - def get_extra_losses(self, net_output): - extra_losses = [] - names = [] - - if "features_pen" in net_output: - extra_losses.append(net_output["features_pen"]) - names.append("features_pen") - - return extra_losses, names - - def remove_pretraining_modules(self): - self.target_glu = None - self.final_proj = None diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer_lm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer_lm.py deleted file mode 100644 index eedd5151ba5b1a7050b37639023cf8a158fae8d4..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer_lm.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from dataclasses import dataclass, field -from typing import Optional - -from fairseq import options, utils -from fairseq.dataclass import ChoiceEnum, FairseqDataclass -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import ( - DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder -) -from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder -from fairseq.utils import safe_getattr, safe_hasattr -from omegaconf import II - - -DEFAULT_MAX_TARGET_POSITIONS = 1024 - - -@dataclass -class TransformerLanguageModelConfig(FairseqDataclass): - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="relu", metadata={"help": "activation function to use"} - ) - dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) - attention_dropout: float = field( - default=0.0, metadata={"help": "dropout probability for attention weights"} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - relu_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - decoder_embed_dim: int = field( - default=512, metadata={"help": "decoder embedding dimension"} - ) - decoder_output_dim: int = field( - default=512, metadata={"help": "decoder output dimension"} - ) - decoder_input_dim: int = field( - default=512, metadata={"help": "decoder input dimension"} - ) - decoder_ffn_embed_dim: int = field( - default=2048, metadata={"help": "decoder embedding dimension for FFN"} - ) - decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) - decoder_attention_heads: int = field( - default=8, metadata={"help": "num decoder attention heads"} - ) - decoder_normalize_before: bool = field( - default=False, metadata={"help": "apply layernorm before each decoder block"} - ) - no_decoder_final_norm: bool = field( - default=False, - metadata={"help": "don't add an extra layernorm after the last decoder block"}, - ) - adaptive_softmax_cutoff: Optional[str] = field( - default=None, - metadata={ - "help": "comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion" - }, - ) - adaptive_softmax_dropout: float = field( - default=0, - metadata={"help": "sets adaptive softmax dropout for the tail projections"}, - ) - adaptive_softmax_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - no_token_positional_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, disables positional embeddings (outside self attention)" - }, - ) - share_decoder_input_output_embed: bool = field( - default=False, metadata={"help": "share decoder input and output embeddings"} - ) - character_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, uses character embedding convolutions to produce token embeddings" - }, - ) - character_filters: str = field( - default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", - metadata={"help": "size of character embeddings"}, - ) - character_embedding_dim: int = field( - default=4, metadata={"help": "size of character embeddings"} - ) - char_embedder_highway_layers: int = field( - default=2, - metadata={"help": "number of highway layers for character token embeddder"}, - ) - adaptive_input: bool = field( - default=False, metadata={"help": "if set, uses adaptive input"} - ) - adaptive_input_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - adaptive_input_cutoff: Optional[str] = field( - default=None, - metadata={"help": "comma separated list of adaptive input cutoff points."}, - ) - tie_adaptive_weights: bool = field( - default=False, - metadata={ - "help": "if set, ties the weights of adaptive softmax and adaptive input" - }, - ) - tie_adaptive_proj: bool = field( - default=False, - metadata={ - "help": "if set, ties the projection weights of adaptive softmax and adaptive input" - }, - ) - decoder_learned_pos: bool = field( - default=False, - metadata={"help": "use learned positional embeddings in the decoder"}, - ) - layernorm_embedding: bool = field( - default=False, metadata={"help": "add layernorm to embedding"} - ) - no_scale_embedding: bool = field( - default=False, metadata={"help": "if True, dont scale embeddings"} - ) - checkpoint_activations: bool = field( - default=False, metadata={"help": "checkpoint activations at each layer"} - ) - offload_activations: bool = field( - default=False, - metadata={"help": "move checkpointed activations to CPU after they are used."}, - ) - # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) - decoder_layerdrop: float = field( - default=0.0, metadata={"help": "LayerDrop probability for decoder"} - ) - decoder_layers_to_keep: Optional[str] = field( - default=None, - metadata={ - "help": "which layers to *keep* when pruning as a comma-separated list" - }, - ) - # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) - quant_noise_pq: float = field( - default=0.0, - metadata={"help": "iterative PQ quantization noise at training time"}, - ) - quant_noise_pq_block_size: int = field( - default=8, - metadata={"help": "block size of quantization noise at training time"}, - ) - quant_noise_scalar: float = field( - default=0.0, - metadata={ - "help": "scalar quantization noise and scalar quantization at training time" - }, - ) - # config for Fully Sharded Data Parallel (FSDP) training - min_params_to_wrap: int = field( - default=DEFAULT_MIN_PARAMS_TO_WRAP, - metadata={ - "help": ( - "minimum number of params for a layer to be wrapped with FSDP() when " - "training with --ddp-backend=fully_sharded. Smaller values will " - "improve memory efficiency, but may make torch.distributed " - "communication less efficient due to smaller input sizes. This option " - "is set to 0 (i.e., always wrap) when --checkpoint-activations or " - "--offload-activations are passed." - ) - } - ) - # config for "BASE Layers: Simplifying Training of Large, Sparse Models" - base_layers: Optional[int] = field( - default=0, metadata={"help": "number of BASE layers in total"} - ) - base_sublayers: Optional[int] = field( - default=1, metadata={"help": "number of sublayers in each BASE layer"} - ) - base_shuffle: Optional[int] = field( - default=1, metadata={"help": "shuffle tokens between workers before computing assignment"} - ) - # options from other parts of the config - add_bos_token: bool = II("task.add_bos_token") - tokens_per_sample: int = II("task.tokens_per_sample") - max_target_positions: Optional[int] = II("task.max_target_positions") - tpu: bool = II("common.tpu") - - -@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) -class TransformerLanguageModel(FairseqLanguageModel): - @classmethod - def hub_models(cls): - def moses_fastbpe(path): - return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} - - def spm(path): - return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} - - return { - "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", - "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", - "transformer_lm.wmt19.en": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" - ), - "transformer_lm.wmt19.de": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" - ), - "transformer_lm.wmt19.ru": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" - ), - "transformer_lm.wmt20.en": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" - ), - "transformer_lm.wmt20.ta": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" - ), - "transformer_lm.wmt20.iu.news": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" - ), - "transformer_lm.wmt20.iu.nh": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" - ), - } - - def __init__(self, decoder): - super().__init__(decoder) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - if args.decoder_layers_to_keep: - args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) - - if safe_getattr(args, "max_target_positions", None) is None: - args.max_target_positions = safe_getattr( - args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS - ) - - if args.character_embeddings: - embed_tokens = CharacterTokenEmbedder( - task.source_dictionary, - eval(args.character_filters), - args.character_embedding_dim, - args.decoder_embed_dim, - args.char_embedder_highway_layers, - ) - elif args.adaptive_input: - embed_tokens = AdaptiveInput( - len(task.source_dictionary), - task.source_dictionary.pad(), - args.decoder_input_dim, - args.adaptive_input_factor, - args.decoder_embed_dim, - options.eval_str_list(args.adaptive_input_cutoff, type=int), - args.quant_noise_pq, - args.quant_noise_pq_block_size, - ) - else: - embed_tokens = cls.build_embedding( - args, task.source_dictionary, args.decoder_input_dim - ) - - if args.tie_adaptive_weights: - assert args.adaptive_input - assert args.adaptive_input_factor == args.adaptive_softmax_factor - assert ( - args.adaptive_softmax_cutoff == args.adaptive_input_cutoff - ), "{} != {}".format( - args.adaptive_softmax_cutoff, args.adaptive_input_cutoff - ) - assert args.decoder_input_dim == args.decoder_output_dim - - decoder = TransformerDecoder( - args, task.target_dictionary, embed_tokens, no_encoder_attn=True - ) - return cls(decoder) - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) - return embed_tokens - - -def base_lm_architecture(args): - # backward compatibility for older model checkpoints - if safe_hasattr(args, "no_tie_adaptive_proj"): - # previous models defined --no-tie-adaptive-proj, so use the existence of - # that option to determine if this is an "old" model checkpoint - args.no_decoder_final_norm = True # old models always set this to True - if args.no_tie_adaptive_proj is False: - args.tie_adaptive_proj = True - if safe_hasattr(args, "decoder_final_norm"): - args.no_decoder_final_norm = not args.decoder_final_norm - - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) - args.decoder_layers = safe_getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) - args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) - args.activation_fn = safe_getattr(args, "activation_fn", "relu") - - args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) - args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) - args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) - args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) - args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) - - args.base_layers = safe_getattr(args, "base_layers", 0) - args.base_sublayers = safe_getattr(args, "base_sublayers", 1) - args.base_shuffle = safe_getattr(args, "base_shuffle", False) - - args.add_bos_token = safe_getattr(args, "add_bos_token", False) - args.no_token_positional_embeddings = safe_getattr( - args, "no_token_positional_embeddings", False - ) - args.share_decoder_input_output_embed = safe_getattr( - args, "share_decoder_input_output_embed", False - ) - args.character_embeddings = safe_getattr(args, "character_embeddings", False) - - args.decoder_output_dim = safe_getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # Model training is not stable without this - args.decoder_normalize_before = True - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) - - args.adaptive_input = safe_getattr(args, "adaptive_input", False) - args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) - - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) - - args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) - args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) - args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) - args.offload_activations = safe_getattr(args, "offload_activations", False) - if args.offload_activations: - args.checkpoint_activations = True - - -@register_model_architecture("transformer_lm", "transformer_lm_big") -def transformer_lm_big(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_wiki103") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") -def transformer_lm_baevski_wiki103(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 16) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.dropout = safe_getattr(args, "dropout", 0.3) - args.adaptive_input = safe_getattr(args, "adaptive_input", True) - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") - args.adaptive_softmax_cutoff = safe_getattr( - args, "adaptive_softmax_cutoff", "20000,60000" - ) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gbw") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") -def transformer_lm_baevski_gbw(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt") -def transformer_lm_gpt(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") -def transformer_lm_gpt2_small(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") -def transformer_lm_gpt2_tiny(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) - args.decoder_layers = safe_getattr(args, "decoder_layers", 2) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") -def transformer_lm_gpt2_medium(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) - args.decoder_layers = safe_getattr(args, "decoder_layers", 36) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") -def transformer_lm_gpt2_big(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) - args.decoder_layers = safe_getattr(args, "decoder_layers", 48) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -def base_gpt3_architecture(args): - args.decoder_input_dim = args.decoder_embed_dim - args.decoder_output_dim = args.decoder_embed_dim - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) - # GPT-3 used learned positional embeddings, rather than sinusoidal - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) - args.dropout = safe_getattr(args, "dropout", 0.0) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - args.share_decoder_input_output_embed = True - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") -def transformer_lm_gpt3_small(args): - # 125M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") -def transformer_lm_gpt3_medium(args): - # 350M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") -def transformer_lm_gpt3_large(args): - # 760M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") -def transformer_lm_gpt3_xl(args): - # 1.3B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") -def transformer_lm_gpt3_2_7(args): - # 2.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") -def transformer_lm_gpt3_6_7(args): - # 6.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") -def transformer_lm_gpt3_13(args): - # 13B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 40) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") -def transformer_lm_gpt3_175(args): - # 175B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 96) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) - base_gpt3_architecture(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/adagrad.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/adagrad.py deleted file mode 100644 index 4f539541c1c91d8c822f7ce624fa6eabf744f60e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/adagrad.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.optim - -from . import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("adagrad") -class Adagrad(LegacyFairseqOptimizer): - def __init__(self, args, params): - super().__init__(args) - self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "weight_decay": self.args.weight_decay, - } - - @property - def supports_flat_params(self): - return False diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/fp16_optimizer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/fp16_optimizer.py deleted file mode 100644 index c59b21cf6b36650a4dd899e62b83a01715d2e2a1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/fp16_optimizer.py +++ /dev/null @@ -1,548 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from itertools import chain - -import torch -from fairseq import optim -from omegaconf import DictConfig - -from .dynamic_loss_scaler import DynamicLossScaler - - -class _FP16OptimizerMixin(object): - def __init__(self, *args, **kwargs): - # forward __init__ call to the next class in mro(method resolution order) - super().__init__(*args, **kwargs) - self._multiply_factor = 1.0 - - @property - def has_flat_params(self): - return torch.is_tensor(self.fp32_params) or ( - isinstance(self.fp32_params, dict) - and all(torch.is_tensor(t) for t in self.fp32_params.values()) - ) - - @classmethod - def build_fp32_params(cls, args, params, flatten=True): - # create FP32 copy of parameters and grads - if flatten: - is_pipeline_parallel = getattr( - args, "pipeline_model_parallel", False - ) and getattr(args, "distributed_no_spawn", False) - total_param_size = sum(p.data.numel() for p in params) - devices = [torch.cuda.current_device()] - if is_pipeline_parallel: - devices = list(set(args.pipeline_devices)) - fp32_params = {} - for device in devices: - if is_pipeline_parallel: - device_param_size = sum( - p.data.numel() for p in params if p.device.index == device - ) - device_params = [p for p in params if p.device.index == device] - else: - device_param_size = total_param_size - device_params = params - fp32_params[device] = ( - device_params[0].new(0).float().new(device_param_size) - ) - offset = 0 - for p in device_params: - numel = p.data.numel() - fp32_params[device][offset : offset + numel].copy_(p.data.view(-1)) - offset += numel - fp32_params[device] = torch.nn.Parameter(fp32_params[device]) - fp32_params[device].grad = fp32_params[device].data.new( - device_param_size - ) - return fp32_params - else: - fp32_params = [] - for p in params: - p32 = torch.nn.Parameter(p.data.float()) - if hasattr(p, 'expert'): - p32.expert = True - elif hasattr(p, 'base_expert'): - p32.base_expert = True - p32.grad = torch.zeros_like(p32.data) - if hasattr(p, "param_group"): - p32.param_group = p.param_group - fp32_params.append(p32) - return fp32_params - - def state_dict(self): - """Return the optimizer's state dict.""" - state_dict = self.fp32_optimizer.state_dict() - if self.scaler is not None: - state_dict["loss_scale"] = self.scaler.loss_scale - return state_dict - - def load_state_dict(self, state_dict, optimizer_overrides=None): - """Load an optimizer state dict. - - In general we should prefer the configuration of the existing optimizer - instance (e.g., learning rate) over that found in the state_dict. This - allows us to resume training from a checkpoint using a new set of - optimizer args. - """ - if "loss_scale" in state_dict and self.scaler is not None: - self.scaler.loss_scale = state_dict["loss_scale"] - self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) - - def backward(self, loss): - """Computes the sum of gradients of the given tensor w.r.t. graph leaves. - - Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this - function additionally dynamically scales the loss to avoid gradient - underflow. - """ - if self.scaler is not None: - loss = self.scaler.scale(loss) - loss.backward() - self._needs_sync = True - - def _sync_fp16_grads_to_fp32(self): - if self._needs_sync: - # copy FP16 grads to FP32 - if self.has_flat_params: - devices = list(self.fp32_params.keys()) - device_params_dict = defaultdict(list) - for p in self.fp16_params: - if p.requires_grad: - device_params_dict[p.device.index].append(p) - for device in devices: - device_params = device_params_dict[device] - offset = 0 - for p in device_params: - grad_data = ( - p.grad.data - if p.grad is not None - else p.data.new_zeros(p.data.shape) - ) - numel = grad_data.numel() - self.fp32_params[device].grad.data[ - offset : offset + numel - ].copy_(grad_data.view(-1)) - offset += numel - else: - for p, p32 in zip(self.fp16_params, self.fp32_params): - if not p.requires_grad: - continue - if p.grad is not None: - if p32.grad is None: - p32.grad = p.grad.data.float() - else: - p32.grad.data.copy_(p.grad.data) - else: - p32.grad = torch.zeros_like(p.data, dtype=torch.float) - - self._needs_sync = False - - def _sync_fp32_params_to_fp16(self): - # copy FP32 params back into FP16 model - if self.has_flat_params: - devices = list(self.fp32_params.keys()) - device_params_dict = defaultdict(list) - for p in self.fp16_params: - device_params_dict[p.device.index].append(p) - for device in devices: - device_params = device_params_dict[device] - offset = 0 - for p in device_params: - numel = p.data.numel() - p.data.copy_( - self.fp32_params[device] - .data[offset : offset + numel] - .view_as(p.data) - ) - offset += numel - else: - for p, p32 in zip(self.fp16_params, self.fp32_params): - if not p.requires_grad: - continue - p.data.copy_(p32.data) - - def _unscale_grads(self): - self._sync_fp16_grads_to_fp32() - if ( - # Skip the multiplication if it's a no-op (i.e., if _multiply_factor - # is 1.0). At the same time, we want to avoid the device-to-host - # transfer by comparing it to 1.0. Since _multiply_factor starts as - # a Python float, we roughly assume that if it's a tensor then it's - # probably not =1.0 anymore and we do the multiplication. Otherwise - # we can safely check the value without a D2H transfer. - torch.is_tensor(self._multiply_factor) - or self._multiply_factor != 1.0 - ): - self.fp32_optimizer.multiply_grads(self._multiply_factor) - self._multiply_factor = 1.0 - - def multiply_grads(self, c): - """Multiplies grads by a constant ``c``.""" - self._multiply_factor *= c - - def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): - """Clips gradient norm and updates dynamic loss scaler.""" - self._sync_fp16_grads_to_fp32() - - grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm( - 0, aggregate_norm_fn - ) - - if self.scaler is not None: - if grad_norm > max_norm > 0.0: - self._multiply_factor *= max_norm / grad_norm - - self.scaler.check_overflow(grad_norm) - elif max_norm > 0.0: - clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) - self._multiply_factor *= clip_coef - - return grad_norm - - def step(self, closure=None, groups=None): - """Performs a single optimization step.""" - self._sync_fp16_grads_to_fp32() - - if getattr(self, "supports_step_with_scale", False): - self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups) - else: - self._unscale_grads() - self.fp32_optimizer.step(closure, groups=groups) - - if self.scaler is not None: - self.scaler.update() - - self._sync_fp32_params_to_fp16() - - def zero_grad(self): - """Clears the gradients of all optimized parameters.""" - for p in self.fp16_params: - p.grad = None - if self.has_flat_params: - if torch.is_tensor(self.fp32_params): - self.fp32_params.grad.zero_() - elif isinstance(self.fp32_params, dict): - for fp32_params in self.fp32_params.values(): - fp32_params.grad.zero_() - else: - raise RuntimeError("self.fp32_params must be a tensor or dict") - else: - for p32 in self.fp32_params: - if p32.grad is not None: - p32.grad.zero_() - self._needs_sync = False - - if self.scaler is not None: - self._multiply_factor = 1.0 / float(self.scaler.loss_scale) - - -class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): - """ - Wrap an *optimizer* to support FP16 (mixed precision) training. - """ - - def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs): - super().__init__(cfg.optimizer) - self.fp16_params = params - self.fp32_optimizer = fp32_optimizer - self.fp32_params = fp32_params - - if getattr(cfg.common, "fp16_scale_window", None) is None: - if len(cfg.optimization.update_freq) > 1: - raise ValueError( - "--fp16-scale-window must be given explicitly when using a " - "custom --update-freq schedule" - ) - data_parallel_size = int( - cfg.distributed_training.distributed_world_size - / cfg.common.model_parallel_size - ) - scale_window = int( - 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] - ) - else: - scale_window = cfg.common.fp16_scale_window - - if not getattr(cfg.common, "bf16", False): - self.scaler = DynamicLossScaler( - init_scale=cfg.common.fp16_init_scale, - scale_window=scale_window, - tolerance=cfg.common.fp16_scale_tolerance, - threshold=cfg.common.threshold_loss_scale, - min_loss_scale=cfg.common.min_loss_scale, - ) - else: - # disable loss scaling for bfloat16 - self.scaler = None - - @classmethod - def build_optimizer(cls, cfg: DictConfig, params, **kwargs): - """ - Args: - cfg (omegaconf.DictConfig): fairseq args - params (iterable): iterable of parameters to optimize - """ - flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False) - if getattr(cfg.common, "bf16", False): - flatten = False # mixed precision is faster on TPUs without flat grads - fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten) - if flatten: - fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params]) - else: - fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params) - if flatten and not fp32_optimizer.supports_flat_params: - raise RuntimeError( - f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads" - ) - return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs) - - @property - def optimizer(self): - return self.fp32_optimizer.optimizer - - @optimizer.setter - def optimizer(self, optimizer): - self.fp32_optimizer.optimizer = optimizer - - @property - def lr_scheduler(self): - return getattr(self.fp32_optimizer, "lr_scheduler", None) - - @property - def optimizer_config(self): - return self.fp32_optimizer.optimizer_config - - def get_lr(self): - return self.fp32_optimizer.get_lr() - - def set_lr(self, lr): - self.fp32_optimizer.set_lr(lr) - - def all_reduce_grads(self, module): - self.fp32_optimizer.all_reduce_grads(module) - - @property - def supports_flat_params(self): - return self.fp32_optimizer.supports_flat_params - - -class _MemoryEfficientFP16OptimizerMixin(object): - def __init__(self, *args, **kwargs): - # forward __init__ call to the next class in MRO (method resolution order) - super().__init__(*args, **kwargs) - self._multiply_factor = 1.0 - - @property - def has_flat_params(self): - return False - - def state_dict(self): - """Return the optimizer's state dict.""" - state_dict = self.wrapped_optimizer.state_dict() - if self.scaler is not None: - state_dict["loss_scale"] = self.scaler.loss_scale - return state_dict - - def load_state_dict(self, state_dict, optimizer_overrides=None): - """Load an optimizer state dict. - - In general we should prefer the configuration of the existing optimizer - instance (e.g., learning rate) over that found in the state_dict. This - allows us to resume training from a checkpoint using a new set of - optimizer args. - """ - if "loss_scale" in state_dict and self.scaler is not None: - self.scaler.loss_scale = state_dict["loss_scale"] - - self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) - - # Hack: PyTorch automatically casts the optimizer state to match the - # type of the current parameters. But with --memory-efficient-fp16 the - # params are FP16 while the optimizer state is FP32 and we don't want - # to cast. A workaround is to manually copy back the original state - # after the optimizer has been loaded. - if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False): - groups = self.optimizer.param_groups - saved_groups = state_dict["param_groups"] - id_map = { - old_id: p - for old_id, p in zip( - chain(*(g["params"] for g in saved_groups)), - chain(*(g["params"] for g in groups)), - ) - } - for k, v in state_dict["state"].items(): - if k in id_map: - param = id_map[k] - self.optimizer.state[param] = v - - def backward(self, loss): - """Computes the sum of gradients of the given tensor w.r.t. graph leaves. - - Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this - function additionally dynamically scales the loss to avoid gradient - underflow. - """ - if self.scaler is not None: - loss = self.scaler.scale(loss) - loss.backward() - - def _unscale_grads(self): - if ( - # Skip the multiplication if it's a no-op (i.e., if _multiply_factor - # is 1.0). At the same time, we want to avoid the device-to-host - # transfer by comparing it to 1.0. Since _multiply_factor starts as - # a Python float, we roughly assume that if it's a tensor then it's - # probably not =1.0 anymore and we do the multiplication. Otherwise - # we can safely check the value without a D2H transfer. - torch.is_tensor(self._multiply_factor) - or self._multiply_factor != 1.0 - ): - self.wrapped_optimizer.multiply_grads(self._multiply_factor) - self._multiply_factor = 1.0 - - def multiply_grads(self, c): - """Multiplies grads by a constant *c*.""" - self._multiply_factor *= c - - def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): - """Clips gradient norm and updates dynamic loss scaler.""" - max_norm = float(max_norm) - grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm( - 0, aggregate_norm_fn - ) - - if self.scaler is not None: - grad_norm_cpu = float(grad_norm) - if grad_norm_cpu > max_norm > 0.0: - self._multiply_factor *= max_norm / grad_norm_cpu - - # detect overflow and adjust loss scale - self.scaler.check_overflow(grad_norm_cpu) - elif max_norm > 0.0: - clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) - self._multiply_factor *= clip_coef - - return grad_norm - - def step(self, closure=None, groups=None): - """Performs a single optimization step.""" - if getattr(self, "supports_step_with_scale", False): - # NOTE(msb) optimizer divides by scale factor - self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups) - else: - self._unscale_grads() - self.wrapped_optimizer.step(closure, groups=groups) - - if self.scaler is not None: - self.scaler.update() - - def zero_grad(self): - """Clears the gradients of all optimized parameters.""" - self.wrapped_optimizer.zero_grad() - if self.scaler is not None: - self._multiply_factor = 1.0 / float(self.scaler.loss_scale) - else: - self._multiply_factor = 1.0 - - @property - def supports_flat_params(self): - return self.wrapped_optimizer.supports_flat_params - - -class MemoryEfficientFP16Optimizer( - _MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer -): - """ - Wrap an *optimizer* to support FP16 (mixed precision) training. - - Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not - maintain an FP32 copy of the model. We instead expect the optimizer to - convert the gradients to FP32 internally and sync the results back to the - FP16 model params. This significantly reduces memory usage but slightly - increases the time spent in the optimizer. - - Since this wrapper depends on specific functionality in the wrapped - optimizer (i.e., on-the-fly conversion of grads to FP32), only certain - optimizers can be wrapped. This is determined by the - *supports_memory_efficient_fp16* property. - """ - - def __init__( - self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs - ): - if not allow_unsupported and not optimizer.supports_memory_efficient_fp16: - raise ValueError( - "Unsupported optimizer: {}".format(optimizer.__class__.__name__) - ) - - super().__init__(getattr(cfg, "optimizer", None)) - self.wrapped_optimizer = optimizer - - if getattr(cfg.common, "fp16_scale_window", None) is None: - if len(cfg.optimization.update_freq) > 1: - raise ValueError( - "--fp16-scale-window must be given explicitly when using a " - "custom --update-freq schedule" - ) - data_parallel_size = int( - cfg.distributed_training.distributed_world_size - / cfg.common.model_parallel_size - ) - scale_window = int( - 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] - ) - else: - scale_window = cfg.common.fp16_scale_window - - if not getattr(cfg.common, "bf16", False): - self.scaler = DynamicLossScaler( - init_scale=cfg.common.fp16_init_scale, - scale_window=scale_window, - tolerance=cfg.common.fp16_scale_tolerance, - threshold=cfg.common.threshold_loss_scale, - min_loss_scale=cfg.common.min_loss_scale, - ) - else: - # disable loss scaling for bfloat16 - self.scaler = None - - @classmethod - def build_optimizer(cls, cfg: DictConfig, params, **kwargs): - """ - Args: - args (argparse.Namespace): fairseq args - params (iterable): iterable of parameters to optimize - """ - fp16_optimizer = optim.build_optimizer(cfg.optimizer, params) - return cls(cfg, params, fp16_optimizer, **kwargs) - - @property - def optimizer(self): - return self.wrapped_optimizer.optimizer - - @optimizer.setter - def optimizer(self, optimizer): - self.wrapped_optimizer.optimizer = optimizer - - @property - def optimizer_config(self): - return self.wrapped_optimizer.optimizer_config - - @property - def lr_scheduler(self): - return getattr(self.wrapped_optimizer, "lr_scheduler", None) - - def get_lr(self): - return self.wrapped_optimizer.get_lr() - - def set_lr(self, lr): - self.wrapped_optimizer.set_lr(lr) - - def all_reduce_grads(self, module): - self.wrapped_optimizer.all_reduce_grads(module) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_activation_checkpointing.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_activation_checkpointing.py deleted file mode 100644 index 647a9572886f8aff09a4aadc0b21e1d5817ff38e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_activation_checkpointing.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch -import torch.nn as nn -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from torch.utils.checkpoint import checkpoint - - -class Model(nn.Module): - def __init__( - self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False, **kwargs - ): - super().__init__() - torch.manual_seed(0) - self.use_pytorch_checkpoint = use_pytorch_checkpoint - self.ffn = nn.Sequential( - nn.Linear(32, 128), - # add a Dropout layer to test RNG save/restore - nn.Dropout(p=0.5), - nn.Linear(128, 32), - ) - if use_fairseq_checkpoint: - self.ffn = checkpoint_wrapper(self.ffn, **kwargs) - self.out = nn.Linear(32, 1) - - def forward(self, x): - if self.use_pytorch_checkpoint: - x = checkpoint(self.ffn, x) - else: - x = self.ffn(x) - return self.out(x) - - -class TestActivationCheckpointing(unittest.TestCase): - def _test_checkpoint_wrapper(self, device, log_memory_usage=False): - def get_loss_and_gnorm(model): - torch.manual_seed(1) - input = torch.rand(2, 16, 32).requires_grad_(True).to(device) - model.zero_grad() - loss = model(input).sum() - loss.backward() - gnorm = torch.norm( - torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]) - ) - return {"loss": loss, "gnorm": gnorm} - - model = Model().to(device) - no_cpt = get_loss_and_gnorm(model) - - model = Model(use_pytorch_checkpoint=True).to(device) - pyt_cpt = get_loss_and_gnorm(model) - torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"]) - torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"]) - - model = Model(use_fairseq_checkpoint=True).to(device) - fairseq_cpt = get_loss_and_gnorm(model) - torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt["loss"]) - torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt["gnorm"]) - - model = Model(use_fairseq_checkpoint=True, offload_to_cpu=True).to(device) - fairseq_cpt_offload = get_loss_and_gnorm(model) - torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt_offload["loss"]) - torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt_offload["gnorm"]) - - def test_checkpoint_wrapper_cpu(self): - self._test_checkpoint_wrapper(device=torch.device("cpu")) - - @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") - def test_checkpoint_wrapper_cuda(self): - self._test_checkpoint_wrapper(device=torch.device("cuda")) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/ORI-Muchim/MarinTTS/text/__init__.py b/spaces/ORI-Muchim/MarinTTS/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/MarinTTS/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/OgiKazus/vits-uma-genshin-honkai/text/cleaners.py b/spaces/OgiKazus/vits-uma-genshin-honkai/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/OgiKazus/vits-uma-genshin-honkai/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i len(dataset): - batch_size_all = len(dataset) - prog_bar.completed - for _ in range(batch_size_all): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results under cpu mode. - - On cpu mode, this function will save the results on different gpus to - ``tmpdir`` and collect them by the rank 0 worker. - - Args: - result_part (list): Result list containing result parts - to be collected. - size (int): Size of the results, commonly equal to length of - the results. - tmpdir (str | None): temporal directory for collected results to - store. If set to None, it will create a random temporal directory - for it. - - Returns: - list: The collected results. - """ - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - mmcv.mkdir_or_exist('.dist_test') - tmpdir = tempfile.mkdtemp(dir='.dist_test') - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, f'part_{i}.pkl') - part_result = mmcv.load(part_file) - # When data is severely insufficient, an empty part_result - # on a certain gpu could makes the overall outputs empty. - if part_result: - part_list.append(part_result) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results under gpu mode. - - On gpu mode, this function will encode results to gpu tensors and use gpu - communication for results collection. - - Args: - result_part (list): Result list containing result parts - to be collected. - size (int): Size of the results, commonly equal to length of - the results. - - Returns: - list: The collected results. - """ - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) - # When data is severely insufficient, an empty part_result - # on a certain gpu could makes the overall outputs empty. - if part_result: - part_list.append(part_result) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/ddpm.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/ddpm.py deleted file mode 100644 index b3092c7005eb1b8370bda8d716f666ad66a9ee7e..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,1445 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - # @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - # @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - # @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - # @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - # @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - # @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - # @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - # @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - # @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - # @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - # @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - # @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - # @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - - # @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, **kwargs): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with self.ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - # @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs diff --git a/spaces/PascalNotin/Tranception_design/tranception/utils/__init__.py b/spaces/PascalNotin/Tranception_design/tranception/utils/__init__.py deleted file mode 100644 index 90ad8879addca5701c02d6544c0c035f831ae93d..0000000000000000000000000000000000000000 --- a/spaces/PascalNotin/Tranception_design/tranception/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import scoring_utils, msa_utils \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilysong.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilysong.py deleted file mode 100644 index bb025fce0526c1075a93f17cc477423a714a84a9..0000000000000000000000000000000000000000 --- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/bin/lilysong.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/home/lily/lilypond-2.24.2/release/binaries/dependencies/install/Python-3.10.8/bin/python3.10 - -# Copyright (c) 2006--2022 Brailcom, o.p.s. -# -# Author: Milan Zamazal -# -# This file is part of LilyPond, the GNU music typesetter. -# -# LilyPond is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# LilyPond is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with LilyPond. If not, see . - - -import codecs -import optparse -import os -import subprocess -import sys -import tempfile - -""" - -# relocate-preamble.py.in -# -# This file is part of LilyPond, the GNU music typesetter. -# -# Copyright (C) 2007--2022 Han-Wen Nienhuys -# -# LilyPond is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# LilyPond is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with LilyPond. If not, see . -# - -This is generic code, used for all python scripts. - -The quotes are to ensure that the source .py file can still be -run as a python script, but does not include any sys.path handling. -Otherwise, the lilypond-book calls inside the build -might modify installed .pyc files. - -""" - -# This is needed for installations with a non-default layout, ie where share/ -# is not next to bin/. -sys.path.insert (0, os.path.join ('/home/lily/lilypond-2.24.2/release/binaries/mingw/lilypond/install/share/lilypond/2.24.2', 'python')) - -# Dynamic relocation, for installations with a default layout including GUB, -# but also for execution from the build directory. -bindir = os.path.abspath (os.path.dirname (sys.argv[0])) -topdir = os.path.dirname (bindir) -if bindir.endswith (r'/scripts/out'): - topdir = os.path.join (os.path.dirname (topdir), 'out') -datadir = os.path.abspath (os.path.join (topdir, 'share', 'lilypond')) -for v in [ 'current', '2.24.2' ]: - sys.path.insert (0, os.path.join (datadir, v, 'python')) - -""" -""" - - -FESTIVAL_COMMAND = ['festival', '--pipe'] -VOICE_CODINGS = {'voice_czech_ph': 'iso-8859-2'} - -_USAGE = """lilysong [-p PLAY-PROGRAM] FILE.xml [LANGUAGE-CODE-OR-VOICE [SPEEDUP]] - lilysong FILE.ly [LANGUAGE-CODE-OR-VOICE] - lilysong --list-voices - lilysong --list-languages -""" - - -def usage(): - print('Usage:', _USAGE) - sys.exit(2) - - -def process_options(args): - parser = optparse.OptionParser(usage=_USAGE, version="2.24.2") - parser.add_option('', '--list-voices', action='store_true', dest='list_voices', - help="list available Festival voices") - parser.add_option('', '--list-languages', action='store_true', dest='list_languages', - help="list available Festival languages") - parser.add_option('-p', '--play-program', metavar='PROGRAM', - action='store', type='string', dest='play_program', - help="use PROGRAM to play song immediately") - options, args = parser.parse_args(args) - return options, args - - -def call_festival(scheme_code): - p = subprocess.Popen(FESTIVAL_COMMAND, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, close_fds=True) - p.stdin.write(scheme_code) - p.stdin.close() - answer = '' - while True: - process_output = p.stdout.read() - if not process_output: - break - answer = answer + process_output - return answer - - -def select_voice(language_or_voice): - if language_or_voice[:6] == 'voice_': - voice = language_or_voice - else: - voice = call_festival(''' -(let ((candidates '())) - (mapcar (lambda (v) - (if (eq (cadr (assoc 'language (cadr (voice.description v)))) '%s) - (set! candidates (cons v candidates)))) - (append (voice.list) (mapcar car Voice_descriptions))) - (if candidates - (format t "voice_%%s" (car candidates)) - (format t "nil"))) -''' % (language_or_voice,)) - if voice == 'nil': - voice = None - return voice - - -def list_voices(): - print(call_festival(''' -(let ((voices (voice.list)) - (print-voice (lambda (v) (format t "voice_%s\n" v)))) - (mapcar print-voice voices) - (mapcar (lambda (v) (if (not (member v voices)) (print-voice v))) - (mapcar car Voice_descriptions))) -''')) - - -def list_languages(): - print(call_festival(''' -(let ((languages '())) - (let ((voices (voice.list)) - (print-language (lambda (v) - (let ((language (cadr (assoc 'language (cadr (voice.description v)))))) - (if (and language (not (member language languages))) - (begin - (set! languages (cons language languages)) - (print language))))))) - (mapcar print-language voices) - (mapcar (lambda (v) (if (not (member v voices)) (print-language v))) - (mapcar car Voice_descriptions)))) -''')) - - -def process_xml_file(file_name, voice, speedup, play_program): - if speedup == 1: - speedup = None - coding = (VOICE_CODINGS.get(voice) or 'iso-8859-1') - _, xml_temp_file = tempfile.mkstemp('.xml') - try: - # recode the XML file - recodep = (coding != 'utf-8') - if recodep: - decode = codecs.getdecoder('utf-8') - encode = codecs.getencoder(coding) - input = open(file_name, encoding='utf-8') - output = open(xml_temp_file, 'w', encoding='utf-8') - while True: - data = input.read() - if not data: - break - if recodep: - data = encode(decode(data)[0])[0] - output.write(data) - output.close() - # synthesize - wav_file = file_name[:-3] + 'wav' - if speedup: - _, wav_temp_file = tempfile.mkstemp('.wav') - else: - wav_temp_file = wav_file - try: - print("text2wave -eval '(%s)' -mode singing '%s' -o '%s'" % - (voice, xml_temp_file, wav_temp_file,)) - result = os.system("text2wave -eval '(%s)' -mode singing '%s' -o '%s'" % - (voice, xml_temp_file, wav_temp_file,)) - if result: - sys.stdout.write("Festival processing failed.\n") - return - if speedup: - result = os.system("sox '%s' '%s' speed '%f'" % - (wav_temp_file, wav_file, speedup,)) - if result: - sys.stdout.write("Festival processing failed.\n") - return - finally: - if speedup: - try: - os.delete(wav_temp_file) - except OSError: - pass - sys.stdout.write("%s created.\n" % (wav_file,)) - # play - if play_program: - os.system("%s '%s' >/dev/null" % (play_program, wav_file,)) - finally: - try: - os.delete(xml_temp_file) - except OSError: - pass - - -def process_ly_file(file_name, voice): - result = os.system("lilypond '%s'" % (file_name,)) - if result: - return - xml_file = None - for f in os.listdir(os.path.dirname(file_name) or '.'): - if (f[-4:] == '.xml' and - (not xml_file or os.stat.st_mtime(f) > os.stat.st_mtime(xml_file))): - xml_file = f - if xml_file: - process_xml_file(xml_file, voice, None, None) - else: - sys.stderr.write("No XML file found\n") - - -def go(): - options, args = process_options(sys.argv[1:]) - if options.list_voices: - list_voices() - elif options.list_languages: - list_languages() - else: - arglen = len(args) - if arglen < 1: - usage() - file_name = args[0] - if arglen > 1: - language_or_voice = args[1] - voice = select_voice(language_or_voice) - else: - voice = None - if file_name[-3:] == '.ly': - if arglen > 2: - usage() - process_ly_file(file_name, voice) - else: - if arglen > 3: - usage() - elif arglen == 3: - try: - speedup = float(args[2]) - except ValueError: - usage() - else: - speedup = None - process_xml_file(file_name, voice, speedup, options.play_program) - - -if __name__ == '__main__': - go() diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/hashtables.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/hashtables.go deleted file mode 100644 index 817944d54447c7a69162d131dd97e6485eef7e83..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/hashtables.go and /dev/null differ diff --git a/spaces/Plashkar/test-gradio-sdk/README.md b/spaces/Plashkar/test-gradio-sdk/README.md deleted file mode 100644 index 804053d9f30d4c808157ff37b090dea3fddff54e..0000000000000000000000000000000000000000 --- a/spaces/Plashkar/test-gradio-sdk/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test Gradio Sdk -emoji: 👁 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/arch_util.py b/spaces/Pranjal12345/Text_to_Speech/tortoise/models/arch_util.py deleted file mode 100644 index f678a0290cc16901b68bb46191a9f7df1001772a..0000000000000000000000000000000000000000 --- a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/arch_util.py +++ /dev/null @@ -1,373 +0,0 @@ -import os -import functools -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchaudio -from tortoise.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - - -def normalization(channels): - """ - Make a standard normalization layer. - - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - groups = 32 - if channels <= 16: - groups = 8 - elif channels <= 64: - groups = 16 - while channels % groups != 0: - groups = int(groups / 2) - assert groups > 2 - return GroupNorm32(groups, channels) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv, mask=None, rel_pos=None): - """ - Apply QKV attention. - - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = torch.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - if rel_pos is not None: - weight = rel_pos(weight.reshape(bs, self.n_heads, weight.shape[-2], weight.shape[-1])).reshape(bs * self.n_heads, weight.shape[-2], weight.shape[-1]) - weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) - if mask is not None: - # The proper way to do this is to mask before the softmax using -inf, but that doesn't work properly on CPUs. - mask = mask.repeat(self.n_heads, 1).unsqueeze(1) - weight = weight * mask - a = torch.einsum("bts,bcs->bct", weight, v) - - return a.reshape(bs, -1, length) - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - do_checkpoint=True, - relative_pos_embeddings=False, - ): - super().__init__() - self.channels = channels - self.do_checkpoint = do_checkpoint - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.norm = normalization(channels) - self.qkv = nn.Conv1d(channels, channels * 3, 1) - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(nn.Conv1d(channels, channels, 1)) - if relative_pos_embeddings: - self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64) - else: - self.relative_pos_embeddings = None - - def forward(self, x, mask=None): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv, mask, self.relative_pos_embeddings) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - """ - - def __init__(self, channels, use_conv, out_channels=None, factor=4): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.factor = factor - if use_conv: - ksize = 5 - pad = 2 - self.conv = nn.Conv1d(self.channels, self.out_channels, ksize, padding=pad) - - def forward(self, x): - assert x.shape[1] == self.channels - x = F.interpolate(x, scale_factor=self.factor, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - """ - - def __init__(self, channels, use_conv, out_channels=None, factor=4, ksize=5, pad=2): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - - stride = factor - if use_conv: - self.op = nn.Conv1d( - self.channels, self.out_channels, ksize, stride=stride, padding=pad - ) - else: - assert self.channels == self.out_channels - self.op = nn.AvgPool1d(kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(nn.Module): - def __init__( - self, - channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - up=False, - down=False, - kernel_size=3, - ): - super().__init__() - self.channels = channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_scale_shift_norm = use_scale_shift_norm - padding = 1 if kernel_size == 3 else 2 - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False) - self.x_upd = Upsample(channels, False) - elif down: - self.h_upd = Downsample(channels, False) - self.x_upd = Downsample(channels, False) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = nn.Conv1d( - channels, self.out_channels, kernel_size, padding=padding - ) - else: - self.skip_connection = nn.Conv1d(channels, self.out_channels, 1) - - def forward(self, x): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AudioMiniEncoder(nn.Module): - def __init__(self, - spec_dim, - embedding_dim, - base_channels=128, - depth=2, - resnet_blocks=2, - attn_blocks=4, - num_attn_heads=4, - dropout=0, - downsample_factor=2, - kernel_size=3): - super().__init__() - self.init = nn.Sequential( - nn.Conv1d(spec_dim, base_channels, 3, padding=1) - ) - ch = base_channels - res = [] - for l in range(depth): - for r in range(resnet_blocks): - res.append(ResBlock(ch, dropout, kernel_size=kernel_size)) - res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor)) - ch *= 2 - self.res = nn.Sequential(*res) - self.final = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.Conv1d(ch, embedding_dim, 1) - ) - attn = [] - for a in range(attn_blocks): - attn.append(AttentionBlock(embedding_dim, num_attn_heads,)) - self.attn = nn.Sequential(*attn) - self.dim = embedding_dim - - def forward(self, x): - h = self.init(x) - h = self.res(h) - h = self.final(h) - h = self.attn(h) - return h[:, :, 0] - - -DEFAULT_MEL_NORM_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/mel_norms.pth') - - -class TorchMelSpectrogram(nn.Module): - def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0, mel_fmax=8000, - sampling_rate=22050, normalize=False, mel_norm_file=DEFAULT_MEL_NORM_FILE): - super().__init__() - # These are the default tacotron values for the MEL spectrogram. - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.n_mel_channels = n_mel_channels - self.mel_fmin = mel_fmin - self.mel_fmax = mel_fmax - self.sampling_rate = sampling_rate - self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length, - win_length=self.win_length, power=2, normalized=normalize, - sample_rate=self.sampling_rate, f_min=self.mel_fmin, - f_max=self.mel_fmax, n_mels=self.n_mel_channels, - norm="slaney") - self.mel_norm_file = mel_norm_file - if self.mel_norm_file is not None: - self.mel_norms = torch.load(self.mel_norm_file) - else: - self.mel_norms = None - - def forward(self, inp): - if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio) - inp = inp.squeeze(1) - assert len(inp.shape) == 2 - if torch.backends.mps.is_available(): - inp = inp.to('cpu') - self.mel_stft = self.mel_stft.to(inp.device) - mel = self.mel_stft(inp) - # Perform dynamic range compression - mel = torch.log(torch.clamp(mel, min=1e-5)) - if self.mel_norms is not None: - self.mel_norms = self.mel_norms.to(mel.device) - mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1) - return mel - - -class CheckpointedLayer(nn.Module): - """ - Wraps a module. When forward() is called, passes kwargs that require_grad through torch.checkpoint() and bypasses - checkpoint for all other args. - """ - def __init__(self, wrap): - super().__init__() - self.wrap = wrap - - def forward(self, x, *args, **kwargs): - for k, v in kwargs.items(): - assert not (isinstance(v, torch.Tensor) and v.requires_grad) # This would screw up checkpointing. - partial = functools.partial(self.wrap, **kwargs) - return partial(x, *args) - - -class CheckpointedXTransformerEncoder(nn.Module): - """ - Wraps a ContinuousTransformerWrapper and applies CheckpointedLayer to each layer and permutes from channels-mid - to channels-last that XTransformer expects. - """ - def __init__(self, needs_permute=True, exit_permute=True, checkpoint=True, **xtransformer_kwargs): - super().__init__() - self.transformer = ContinuousTransformerWrapper(**xtransformer_kwargs) - self.needs_permute = needs_permute - self.exit_permute = exit_permute - - if not checkpoint: - return - for i in range(len(self.transformer.attn_layers.layers)): - n, b, r = self.transformer.attn_layers.layers[i] - self.transformer.attn_layers.layers[i] = nn.ModuleList([n, CheckpointedLayer(b), r]) - - def forward(self, x, **kwargs): - if self.needs_permute: - x = x.permute(0,2,1) - h = self.transformer(x, **kwargs) - if self.exit_permute: - h = h.permute(0,2,1) - return h \ No newline at end of file diff --git a/spaces/PurplePanda00/plant-leaf-detection/app.py b/spaces/PurplePanda00/plant-leaf-detection/app.py deleted file mode 100644 index 8c0f90f546f7f046a376a79eced412041354e052..0000000000000000000000000000000000000000 --- a/spaces/PurplePanda00/plant-leaf-detection/app.py +++ /dev/null @@ -1,27 +0,0 @@ -import tensorflow as tf -import gradio as gr - -model = tf.keras.models.load_model('50epoch.48-0.06.h5') -labels = ['Diseased', 'Healthy'] - -def classify_images(inp): - inp = inp[None, ...] - inp = tf.keras.applications.resnet.preprocess_input(inp) - prediction = model.predict(inp).flatten() - return {labels[i]: float(prediction[i]) for i in range(len(labels))} - -image = gr.Image(shape=(224, 224)) -label = gr.Label(num_top_classes=3) - -examples = [ - ["aug_Durian diseased__0_9801.jpg"], - ["aug_Durian healthy__0_163.jpg"], - ["aug_Guava___diseased__0_596.jpg"], - ["aug_Guava___healthy__0_1324.jpg"], - ["aug_Mango___diseased__0_980.jpg"], - ["aug_Mango___healthy__0_3867.jpg"], - ["aug_Rambutan diseased__0_388.jpg"], - ["aug_Rambutan healthy__0_3427.jpg"], -] - -gr.Interface(fn=classify_images, inputs=image, outputs=label, interpretation="default",examples=examples).launch(share=False) \ No newline at end of file diff --git a/spaces/QINGCHE/TSA/util.py b/spaces/QINGCHE/TSA/util.py deleted file mode 100644 index ebbdae9b4a863356f75ddb063ae8909c02fb4aea..0000000000000000000000000000000000000000 --- a/spaces/QINGCHE/TSA/util.py +++ /dev/null @@ -1,96 +0,0 @@ -import json -import jieba -import re -import requests -import backoff -import time - - -@backoff.on_exception(backoff.expo, requests.exceptions.RequestException) -def post_url(url, headers, payload): - time.sleep(1) - response = requests.request("POST", url, headers=headers, data=payload) - return response - - -def seg(text): - text = text.replace('\n', " ") - sentences = re.split(r'(?<=[。!?.!?: ])\s*', text) - sentences = [string for string in sentences if string != ''] - return sentences - - -def clean_text(text): - text = text.replace('\n', "") - text = re.sub(r"-", " ", text) - text = re.sub(r"\d+/\d+/\d+", "", text) # 日期 - text = re.sub(r"[0-2]?[0-9]:[0-6][0-9]", "", text) # 时间 - text = re.sub( - r"/[a-zA-Z]*[:\//\]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i", "", text) # 网址 - pure_text = '' - for letter in text: - if letter.isalpha() or letter == ' ': - pure_text += letter - - text = ' '.join(word for word in pure_text.split() if len(word) > 1) - return text - - -def article_to_group(groups, topics): - para = {} - for i in groups: - if not i[1] in para: - para[i[1]] = i[0] - else: - para[i[1]] = para[i[1]] + i[0] - return para - - -def generation(para, max_length): - API_KEY = "IZt1uK9PAI0LiqleqT0cE30b" - SECRET_KEY = "Xv5kHB8eyhNuI1B1G7fRgm2SIPdlxGxs" - - def get_access_token(): - - url = "https://aip.baidubce.com/oauth/2.0/token" - params = {"grant_type": "client_credentials", - "client_id": API_KEY, "client_secret": SECRET_KEY} - return str(requests.post(url, params=params).json().get("access_token")) - - url = "https://aip.baidubce.com/rpc/2.0/nlp/v1/news_summary?charset=UTF-8&access_token=" + get_access_token() - topic = {} - Ai_abstract = [] - for i, (j, k) in enumerate(para.items()): - input_text = k - # print(k) - payload = json.dumps({ - "content": k, - "max_summary_len": max_length - }) - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json' - } - - # response = post_url(url, headers, payload) - # text_dict = json.loads(response.text) - # print(text_dict) - # while('summary' not in text_dict.keys()): - # response = post_url(url, headers, payload) - # text_dict = json.loads(response.text) - # print("ReTrying") - - # topic[text_dict['summary']] = (j, k) - # Ai_abstract.append(text_dict['summary']) - topic[j] = (j, k) - Ai_abstract.append(j) - return topic,Ai_abstract -def formate_text(title_dict,outline_list): - formated = [] - for each in outline_list: - if(each not in title_dict.keys()): - formated.append(f"# {each}") - if(each in title_dict.keys()): - formated.append(f"## {each}") - formated.append(title_dict[each][1]) - return formated \ No newline at end of file diff --git a/spaces/Qiukai/gpt/README.md b/spaces/Qiukai/gpt/README.md deleted file mode 100644 index 01bac90e809880f1ae2f10527edaede5a0535b51..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/README.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: ChatImprovement -emoji: 😻 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -duplicated_from: qingxu98/gpt-academic ---- - - -# ChatGPT 学术优化 - -**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发issue或者pull requests(dev分支)** - -If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request (to `dev` branch). - -``` -代码中参考了很多其他优秀项目中的设计,主要包括: - -# 借鉴项目1:借鉴了ChuanhuChatGPT中读取OpenAI json的方法、记录历史问询记录的方法以及gradio queue的使用技巧 -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 借鉴项目2:借鉴了mdtex2html中公式处理的方法 -https://github.com/polarwinkel/mdtex2html - -项目使用OpenAI的gpt-3.5-turbo模型,期待gpt-4早点放宽门槛😂 -``` - -> **Note** -> -> 1.请注意只有“红颜色”标识的函数插件(按钮)才支持读取文件。目前对pdf/word格式文件的支持插件正在逐步完善中,需要更多developer的帮助。 -> -> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。 -> -> 3.如果您不太习惯部分中文命名的函数、注释或者界面,您可以随时点击相关函数插件,调用ChatGPT一键生成纯英文的项目源代码。 - -
        - -功能 | 描述 ---- | --- -一键润色 | 支持一键润色、一键查找论文语法错误 -一键中英互译 | 一键中英互译 -一键代码解释 | 可以正确显示代码、解释代码 -自定义快捷键 | 支持自定义快捷键 -配置代理服务器 | 支持配置代理服务器 -模块化设计 | 支持自定义高阶的实验性功能与[函数插件],插件支持[热更新](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -自我程序剖析 | [函数插件] 一键读懂本项目的源代码 -程序剖析 | [函数插件] 一键可以剖析其他Python/C/C++/Java项目树 -读论文 | [函数插件] 一键解读latex论文全文并生成摘要 -批量注释生成 | [函数插件] 一键批量生成函数注释 -chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 -arxiv小助手 | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF -公式显示 | 可以同时显示公式的tex形式和渲染形式 -图片显示 | 可以在markdown中显示图片 -多线程函数插件支持 | 支持多线调用chatgpt,一键处理海量文本或程序 -支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格 -…… | …… - -
        - - -- 新界面 -
        - -
        - - -- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板 -
        - -
        - -- 润色/纠错 -
        - -
        - - -- 支持GPT输出的markdown表格 -
        - -
        - -- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读 -
        - -
        - - -- 懒得看项目代码?整个工程直接给chatgpt炫嘴里 -
        - -
        - -## 直接运行 (Windows, Linux or MacOS) - -### 1. 下载项目 -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -### 2. 配置API_KEY和代理设置 - -在`config.py`中,配置 海外Proxy 和 OpenAI API KEY,说明如下 -``` -1. 如果你在国内,需要设置海外代理才能够顺利使用 OpenAI API,设置方法请仔细阅读config.py(1.修改其中的USE_PROXY为True; 2.按照说明修改其中的proxies)。 -2. 配置 OpenAI API KEY。你需要在 OpenAI 官网上注册并获取 API KEY。一旦你拿到了 API KEY,在 config.py 文件里配置好即可。 -3. 与代理网络有关的issue(网络超时、代理不起作用)汇总到 https://github.com/binary-husky/chatgpt_academic/issues/1 -``` -(P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中。`config_private.py`不受git管控,可以让您的隐私信息更加安全。) - - -### 3. 安装依赖 -```sh -# (选择一)推荐 -python -m pip install -r requirements.txt - -# (选择二)如果您使用anaconda,步骤也是类似的: -# (选择二.1)conda create -n gptac_venv python=3.11 -# (选择二.2)conda activate gptac_venv -# (选择二.3)python -m pip install -r requirements.txt - -# 备注:使用官方pip源或者阿里pip源,其他pip源(如清华pip)有可能出问题,临时换源方法: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -### 4. 运行 -```sh -python main.py -``` - -### 5. 测试实验性功能 -``` -- 测试C++项目头文件分析 - input区域 输入 `./crazy_functions/test_project/cpp/libJPG` , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" -- 测试给Latex项目写摘要 - input区域 输入 `./crazy_functions/test_project/latex/attention` , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" -- 测试Python项目分析 - input区域 输入 `./crazy_functions/test_project/python/dqn` , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" -- 测试自我代码解读 - 点击 "[实验] 请解析并解构此项目本身" -- 测试实验功能模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能 - 点击 "[实验] 实验功能函数模板" -``` - -## 使用docker (Linux) - -``` sh -# 下载项目 -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# 配置 海外Proxy 和 OpenAI API KEY -用任意文本编辑器编辑 config.py -# 安装 -docker build -t gpt-academic . -# 运行 -docker run --rm -it --net=host gpt-academic - -# 测试实验性功能 -## 测试自我代码解读 -点击 "[实验] 请解析并解构此项目本身" -## 测试实验功能模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能 -点击 "[实验] 实验功能函数模板" -##(请注意在docker中运行时,需要额外注意程序的文件访问权限问题) -## 测试C++项目头文件分析 -input区域 输入 ./crazy_functions/test_project/cpp/libJPG , 然后点击 "[实验] 解析整个C++项目(input输入项目根路径)" -## 测试给Latex项目写摘要 -input区域 输入 ./crazy_functions/test_project/latex/attention , 然后点击 "[实验] 读tex论文写摘要(input输入项目根路径)" -## 测试Python项目分析 -input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "[实验] 解析整个py项目(input输入项目根路径)" - -``` - -## 其他部署方式 -- 使用WSL2(Windows Subsystem for Linux 子系统) -请访问[部署wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -- nginx远程部署 -请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E7%9A%84%E6%8C%87%E5%AF%BC) - - -## 自定义新的便捷按钮(学术快捷键自定义) -打开functional.py,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。) -例如 -``` -"超级英译中": { - - # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 - "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n", - - # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。 - "Suffix": "", - -}, -``` -
        - -
        - - -如果你发明了更好用的学术快捷键,欢迎发issue或者pull requests! - -## 配置代理 -### 方法一:常规方法 -在```config.py```中修改端口与代理软件对应 - -
        - - -
        - -配置完成后,你可以用以下命令测试代理是否工作,如果一切正常,下面的代码将输出你的代理服务器所在地: -``` -python check_proxy.py -``` -### 方法二:纯新手教程 -[纯新手教程](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - -## 兼容性测试 - -### 图片显示: - -
        - -
        - - -### 如果一个程序能够读懂并剖析自己: - -
        - -
        - -
        - -
        - -### 其他任意Python/Cpp项目剖析: -
        - -
        - -
        - -
        - -### Latex论文一键阅读理解与摘要生成 -
        - -
        - -### 自动报告生成 -
        - - - -
        - -### 模块化功能设计 -
        - - -
        - -## Todo: - -- (Top Priority) 调用另一个开源项目text-generation-webui的web接口,使用其他llm模型 -- 总结大工程源代码时,文本过长、token溢出的问题(目前的方法是直接二分丢弃处理溢出,过于粗暴,有效信息大量丢失) - - diff --git a/spaces/RMXK/RVC_HFF/infer/lib/uvr5_pack/lib_v5/dataset.py b/spaces/RMXK/RVC_HFF/infer/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/infer/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/core.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/core.py deleted file mode 100644 index de13978f02aa85ac70aa49a0d39178cbba913199..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/core.py +++ /dev/null @@ -1,291 +0,0 @@ -"""distutils.core - -The only module that needs to be imported to use the Distutils; provides -the 'setup' function (which is to be called from the setup script). Also -indirectly provides the Distribution and Command classes, although they are -really defined in distutils.dist and distutils.cmd. -""" - -import os -import sys -import tokenize - -from distutils.debug import DEBUG -from distutils.errors import ( - DistutilsSetupError, - DistutilsError, - CCompilerError, - DistutilsArgError, -) - -# Mainly import these so setup scripts can "from distutils.core import" them. -from distutils.dist import Distribution -from distutils.cmd import Command -from distutils.config import PyPIRCCommand -from distutils.extension import Extension - - -__all__ = ['Distribution', 'Command', 'PyPIRCCommand', 'Extension', 'setup'] - -# This is a barebones help message generated displayed when the user -# runs the setup script with no arguments at all. More useful help -# is generated with various --help options: global help, list commands, -# and per-command help. -USAGE = """\ -usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] - or: %(script)s --help [cmd1 cmd2 ...] - or: %(script)s --help-commands - or: %(script)s cmd --help -""" - - -def gen_usage(script_name): - script = os.path.basename(script_name) - return USAGE % locals() - - -# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. -_setup_stop_after = None -_setup_distribution = None - -# Legal keyword arguments for the setup() function -setup_keywords = ( - 'distclass', - 'script_name', - 'script_args', - 'options', - 'name', - 'version', - 'author', - 'author_email', - 'maintainer', - 'maintainer_email', - 'url', - 'license', - 'description', - 'long_description', - 'keywords', - 'platforms', - 'classifiers', - 'download_url', - 'requires', - 'provides', - 'obsoletes', -) - -# Legal keyword arguments for the Extension constructor -extension_keywords = ( - 'name', - 'sources', - 'include_dirs', - 'define_macros', - 'undef_macros', - 'library_dirs', - 'libraries', - 'runtime_library_dirs', - 'extra_objects', - 'extra_compile_args', - 'extra_link_args', - 'swig_opts', - 'export_symbols', - 'depends', - 'language', -) - - -def setup(**attrs): # noqa: C901 - """The gateway to the Distutils: do everything your setup script needs - to do, in a highly flexible and user-driven way. Briefly: create a - Distribution instance; find and parse config files; parse the command - line; run each Distutils command found there, customized by the options - supplied to 'setup()' (as keyword arguments), in config files, and on - the command line. - - The Distribution instance might be an instance of a class supplied via - the 'distclass' keyword argument to 'setup'; if no such class is - supplied, then the Distribution class (in dist.py) is instantiated. - All other arguments to 'setup' (except for 'cmdclass') are used to set - attributes of the Distribution instance. - - The 'cmdclass' argument, if supplied, is a dictionary mapping command - names to command classes. Each command encountered on the command line - will be turned into a command class, which is in turn instantiated; any - class found in 'cmdclass' is used in place of the default, which is - (for command 'foo_bar') class 'foo_bar' in module - 'distutils.command.foo_bar'. The command class must provide a - 'user_options' attribute which is a list of option specifiers for - 'distutils.fancy_getopt'. Any command-line options between the current - and the next command are used to set attributes of the current command - object. - - When the entire command-line has been successfully parsed, calls the - 'run()' method on each command object in turn. This method will be - driven entirely by the Distribution object (which each command object - has a reference to, thanks to its constructor), and the - command-specific options that became attributes of each command - object. - """ - - global _setup_stop_after, _setup_distribution - - # Determine the distribution class -- either caller-supplied or - # our Distribution (see below). - klass = attrs.get('distclass') - if klass: - del attrs['distclass'] - else: - klass = Distribution - - if 'script_name' not in attrs: - attrs['script_name'] = os.path.basename(sys.argv[0]) - if 'script_args' not in attrs: - attrs['script_args'] = sys.argv[1:] - - # Create the Distribution instance, using the remaining arguments - # (ie. everything except distclass) to initialize it - try: - _setup_distribution = dist = klass(attrs) - except DistutilsSetupError as msg: - if 'name' not in attrs: - raise SystemExit("error in setup command: %s" % msg) - else: - raise SystemExit("error in {} setup command: {}".format(attrs['name'], msg)) - - if _setup_stop_after == "init": - return dist - - # Find and parse the config file(s): they will override options from - # the setup script, but be overridden by the command line. - dist.parse_config_files() - - if DEBUG: - print("options (after parsing config files):") - dist.dump_option_dicts() - - if _setup_stop_after == "config": - return dist - - # Parse the command line and override config files; any - # command-line errors are the end user's fault, so turn them into - # SystemExit to suppress tracebacks. - try: - ok = dist.parse_command_line() - except DistutilsArgError as msg: - raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg) - - if DEBUG: - print("options (after parsing command line):") - dist.dump_option_dicts() - - if _setup_stop_after == "commandline": - return dist - - # And finally, run all the commands found on the command line. - if ok: - return run_commands(dist) - - return dist - - -# setup () - - -def run_commands(dist): - """Given a Distribution object run all the commands, - raising ``SystemExit`` errors in the case of failure. - - This function assumes that either ``sys.argv`` or ``dist.script_args`` - is already set accordingly. - """ - try: - dist.run_commands() - except KeyboardInterrupt: - raise SystemExit("interrupted") - except OSError as exc: - if DEBUG: - sys.stderr.write("error: {}\n".format(exc)) - raise - else: - raise SystemExit("error: {}".format(exc)) - - except (DistutilsError, CCompilerError) as msg: - if DEBUG: - raise - else: - raise SystemExit("error: " + str(msg)) - - return dist - - -def run_setup(script_name, script_args=None, stop_after="run"): - """Run a setup script in a somewhat controlled environment, and - return the Distribution instance that drives things. This is useful - if you need to find out the distribution meta-data (passed as - keyword args from 'script' to 'setup()', or the contents of the - config files or command-line. - - 'script_name' is a file that will be read and run with 'exec()'; - 'sys.argv[0]' will be replaced with 'script' for the duration of the - call. 'script_args' is a list of strings; if supplied, - 'sys.argv[1:]' will be replaced by 'script_args' for the duration of - the call. - - 'stop_after' tells 'setup()' when to stop processing; possible - values: - init - stop after the Distribution instance has been created and - populated with the keyword arguments to 'setup()' - config - stop after config files have been parsed (and their data - stored in the Distribution instance) - commandline - stop after the command-line ('sys.argv[1:]' or 'script_args') - have been parsed (and the data stored in the Distribution) - run [default] - stop after all commands have been run (the same as if 'setup()' - had been called in the usual way - - Returns the Distribution instance, which provides all information - used to drive the Distutils. - """ - if stop_after not in ('init', 'config', 'commandline', 'run'): - raise ValueError("invalid value for 'stop_after': {!r}".format(stop_after)) - - global _setup_stop_after, _setup_distribution - _setup_stop_after = stop_after - - save_argv = sys.argv.copy() - g = {'__file__': script_name, '__name__': '__main__'} - try: - try: - sys.argv[0] = script_name - if script_args is not None: - sys.argv[1:] = script_args - # tokenize.open supports automatic encoding detection - with tokenize.open(script_name) as f: - code = f.read().replace(r'\r\n', r'\n') - exec(code, g) - finally: - sys.argv = save_argv - _setup_stop_after = None - except SystemExit: - # Hmm, should we do something if exiting with a non-zero code - # (ie. error)? - pass - - if _setup_distribution is None: - raise RuntimeError( - ( - "'distutils.core.setup()' was never called -- " - "perhaps '%s' is not a Distutils setup script?" - ) - % script_name - ) - - # I wonder if the setup script's namespace -- g and l -- would be of - # any interest to callers? - # print "_setup_distribution:", _setup_distribution - return _setup_distribution - - -# run_setup () diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py deleted file mode 100644 index 71f66bd03cb713a2190853bdf7170c4ea80d2425..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +++ /dev/null @@ -1,104 +0,0 @@ -import types -import functools - - -# from jaraco.functools 3.3 -def method_cache(method, cache_wrapper=None): - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - cache_wrapper = cache_wrapper or functools.lru_cache() - - def wrapper(self, *args, **kwargs): - # it's the first call, replace the method with a cached, bound method - bound_method = types.MethodType(method, self) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None - - return wrapper - - -# From jaraco.functools 3.3 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools_rust/command.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools_rust/command.py deleted file mode 100644 index bbd042dfaea9b3d59fd8515cd1119097bee69dc5..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools_rust/command.py +++ /dev/null @@ -1,112 +0,0 @@ -from abc import ABC, abstractmethod -from distutils import log -from distutils.cmd import Command -from distutils.errors import DistutilsPlatformError -from typing import List, Optional - -from setuptools.dist import Distribution - -from .extension import RustExtension -from .rustc_info import get_rust_version - - -class RustCommand(Command, ABC): - """Abstract base class for commands which interact with Rust Extensions.""" - - # Types for distutils variables which exist on all commands but seem to be - # missing from https://github.com/python/typeshed/blob/master/stdlib/distutils/cmd.pyi - distribution: Distribution - verbose: int - - def initialize_options(self) -> None: - self.extensions: List[RustExtension] = [] - - def finalize_options(self) -> None: - extensions: Optional[List[RustExtension]] = getattr( - self.distribution, "rust_extensions", None - ) - if extensions is None: - # extensions is None if the setup.py file did not contain - # rust_extensions keyword; just no-op if this is the case. - return - - if not isinstance(extensions, list): - ty = type(extensions) - raise ValueError( - "expected list of RustExtension objects for rust_extensions " - f"argument to setup(), got `{ty}`" - ) - for (i, extension) in enumerate(extensions): - - if not isinstance(extension, RustExtension): - ty = type(extension) - raise ValueError( - "expected RustExtension object for rust_extensions " - f"argument to setup(), got `{ty}` at position {i}" - ) - # Extensions have been verified to be at the correct type - self.extensions = extensions - - def run(self) -> None: - if not self.extensions: - log.info("%s: no rust_extensions defined", self.get_command_name()) - return - - all_optional = all(ext.optional for ext in self.extensions) - try: - version = get_rust_version() - if version is None: - min_version = max( # type: ignore[type-var] - filter( - lambda version: version is not None, - (ext.get_rust_version() for ext in self.extensions), - ), - default=None, - ) - raise DistutilsPlatformError( - "can't find Rust compiler\n\n" - "If you are using an outdated pip version, it is possible a " - "prebuilt wheel is available for this package but pip is not able " - "to install from it. Installing from the wheel would avoid the " - "need for a Rust compiler.\n\n" - "To update pip, run:\n\n" - " pip install --upgrade pip\n\n" - "and then retry package installation.\n\n" - "If you did intend to build this package from source, try " - "installing a Rust compiler from your system package manager and " - "ensure it is on the PATH during installation. Alternatively, " - "rustup (available at https://rustup.rs) is the recommended way " - "to download and update the Rust compiler toolchain." - + ( - f"\n\nThis package requires Rust {min_version}." - if min_version is not None - else "" - ) - ) - except DistutilsPlatformError as e: - if not all_optional: - raise - else: - print(str(e)) - return - - for ext in self.extensions: - try: - rust_version = ext.get_rust_version() - if rust_version is not None and version not in rust_version: - raise DistutilsPlatformError( - f"Rust {version} does not match extension requirement {rust_version}" - ) - - self.run_for_extension(ext) - except Exception as e: - if not ext.optional: - raise - else: - command_name = self.get_command_name() - print(f"{command_name}: optional Rust extension {ext.name} failed") - print(str(e)) - - @abstractmethod - def run_for_extension(self, extension: RustExtension) -> None: - ... diff --git a/spaces/Realcat/image-matching-webui/hloc/utils/io.py b/spaces/Realcat/image-matching-webui/hloc/utils/io.py deleted file mode 100644 index 1cd55d4c30b41c3754634a164312dc5e8c294274..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/hloc/utils/io.py +++ /dev/null @@ -1,77 +0,0 @@ -from typing import Tuple -from pathlib import Path -import numpy as np -import cv2 -import h5py - -from .parsers import names_to_pair, names_to_pair_old - - -def read_image(path, grayscale=False): - if grayscale: - mode = cv2.IMREAD_GRAYSCALE - else: - mode = cv2.IMREAD_COLOR - image = cv2.imread(str(path), mode) - if image is None: - raise ValueError(f"Cannot read image {path}.") - if not grayscale and len(image.shape) == 3: - image = image[:, :, ::-1] # BGR to RGB - return image - - -def list_h5_names(path): - names = [] - with h5py.File(str(path), "r", libver="latest") as fd: - - def visit_fn(_, obj): - if isinstance(obj, h5py.Dataset): - names.append(obj.parent.name.strip("/")) - - fd.visititems(visit_fn) - return list(set(names)) - - -def get_keypoints( - path: Path, name: str, return_uncertainty: bool = False -) -> np.ndarray: - with h5py.File(str(path), "r", libver="latest") as hfile: - dset = hfile[name]["keypoints"] - p = dset.__array__() - uncertainty = dset.attrs.get("uncertainty") - if return_uncertainty: - return p, uncertainty - return p - - -def find_pair(hfile: h5py.File, name0: str, name1: str): - pair = names_to_pair(name0, name1) - if pair in hfile: - return pair, False - pair = names_to_pair(name1, name0) - if pair in hfile: - return pair, True - # older, less efficient format - pair = names_to_pair_old(name0, name1) - if pair in hfile: - return pair, False - pair = names_to_pair_old(name1, name0) - if pair in hfile: - return pair, True - raise ValueError( - f"Could not find pair {(name0, name1)}... " - "Maybe you matched with a different list of pairs? " - ) - - -def get_matches(path: Path, name0: str, name1: str) -> Tuple[np.ndarray]: - with h5py.File(str(path), "r", libver="latest") as hfile: - pair, reverse = find_pair(hfile, name0, name1) - matches = hfile[pair]["matches0"].__array__() - scores = hfile[pair]["matching_scores0"].__array__() - idx = np.where(matches != -1)[0] - matches = np.stack([idx, matches[idx]], -1) - if reverse: - matches = np.flip(matches, -1) - scores = scores[idx] - return matches, scores diff --git a/spaces/Riksarkivet/htr_demo/helper/utils.py b/spaces/Riksarkivet/htr_demo/helper/utils.py deleted file mode 100644 index 9d3316150657df7910b460bd4903cc6d0906617d..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/helper/utils.py +++ /dev/null @@ -1,97 +0,0 @@ -import hashlib -import os -import shutil -import sqlite3 -import uuid -from datetime import datetime - -import gradio as gr -import huggingface_hub -import pandas as pd -import pytz -from apscheduler.schedulers.background import BackgroundScheduler - - -class TrafficDataHandler: - _DB_FILE_PATH = "./traffic_data.db" - _DB_TEMP_PATH = "./data/traffic_data.db" - _TOKEN = os.environ.get("HUB_TOKEN") - _TZ = "Europe/Stockholm" - _INTERVAL_MIN_UPDATE = 30 - _repo = huggingface_hub.Repository( - local_dir="data", repo_type="dataset", clone_from="Riksarkivet/traffic_demo_data", use_auth_token=_TOKEN - ) - _session_uuid = None - - @classmethod - def _pull_repo_data(cls): - cls._repo.git_pull() - shutil.copyfile(cls._DB_TEMP_PATH, cls._DB_FILE_PATH) - - @staticmethod - def _hash_ip(ip_address): - return hashlib.sha256(ip_address.encode()).hexdigest() - - @classmethod - def _current_time_in_sweden(cls): - swedish_tz = pytz.timezone(cls._TZ) - return datetime.now(swedish_tz).strftime("%Y-%m-%d %H:%M:%S") - - @classmethod - def onload_store_metric_data(cls, request: gr.Request): - cls._session_uuid = str(uuid.uuid1()) - cls._setup_database() - hashed_host = cls._hash_ip(request.client.host) - cls._backup_and_update_database(hashed_host, "load") - - @classmethod - def store_metric_data(cls, action, request: gr.Request): - hashed_host = cls._hash_ip(request.client.host) - cls._backup_and_update_database(hashed_host, action) - - @classmethod - def _commit_host_to_database(cls, hashed_host, action): - with sqlite3.connect(cls._DB_FILE_PATH) as db: - db.execute( - "INSERT INTO ip_data(current_time, hashed_ip, session_uuid, action) VALUES(?,?,?,?)", - [cls._current_time_in_sweden(), hashed_host, cls._session_uuid, action], - ) - - @classmethod - def _setup_database(cls): - with sqlite3.connect(cls._DB_FILE_PATH) as db: - try: - db.execute("SELECT * FROM ip_data").fetchall() - except sqlite3.OperationalError: - db.execute( - """ - CREATE TABLE ip_data (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - current_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, - hashed_ip TEXT, - session_uuid TEXT, - action TEXT) - """ - ) - cls._pull_repo_data() - - @classmethod - def _backup_and_update_database(cls, hashed_host, action): - cls._commit_host_to_database(hashed_host, action) - shutil.copyfile(cls._DB_FILE_PATH, cls._DB_TEMP_PATH) - - with sqlite3.connect(cls._DB_FILE_PATH) as db: - ip_data = db.execute("SELECT * FROM ip_data").fetchall() - pd.DataFrame(ip_data, columns=["id", "current_time", "hashed_ip", "session_uuid", "action"]).to_csv( - "./data/ip_data.csv", index=False - ) - - cls._repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.now()}") - - @classmethod - def _initialize_and_schedule_backup(cls, hashed_host, action): - cls._backup_and_update_database(hashed_host, action) - scheduler = BackgroundScheduler() - scheduler.add_job( - cls._backup_and_update_database, "interval", minutes=cls._INTERVAL_MIN_UPDATE, args=(hashed_host, action) - ) - scheduler.start() diff --git a/spaces/SFP/ImCap/README.md b/spaces/SFP/ImCap/README.md deleted file mode 100644 index d08a9443c97e3f700d7b9d6c16d46d83f09b4233..0000000000000000000000000000000000000000 --- a/spaces/SFP/ImCap/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ImCap V1 -emoji: 👀 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/losses/segmentation_loss.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/losses/segmentation_loss.py deleted file mode 100644 index 85cb46e4eea5510a95da23996fdd357bd8f8e743..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/losses/segmentation_loss.py +++ /dev/null @@ -1,25 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class BCELoss(nn.Module): - - def forward(self, prediction, target): - loss = F.binary_cross_entropy_with_logits(prediction, target) - return loss, {} - - -class BCELossWithQuant(nn.Module): - - def __init__(self, codebook_weight=1.): - super().__init__() - self.codebook_weight = codebook_weight - - def forward(self, qloss, target, prediction, split): - bce_loss = F.binary_cross_entropy_with_logits(prediction, target) - loss = bce_loss + self.codebook_weight * qloss - return loss, { - "{}/total_loss".format(split): loss.clone().detach().mean(), - "{}/bce_loss".format(split): bce_loss.detach().mean(), - "{}/quant_loss".format(split): qloss.detach().mean() - } diff --git a/spaces/Salesforce/EDICT/my_diffusers/pipelines/ddim/__init__.py b/spaces/Salesforce/EDICT/my_diffusers/pipelines/ddim/__init__.py deleted file mode 100644 index 8fd31868a88ac0d9ec7118574f21a9d8a1d4069b..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/pipelines/ddim/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# flake8: noqa -from .pipeline_ddim import DDIMPipeline diff --git a/spaces/SeViLA/SeViLA/docs/conf.py b/spaces/SeViLA/SeViLA/docs/conf.py deleted file mode 100644 index ad4f8ab1af27d21f1a1a51e57e5bb49cc4485c05..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/docs/conf.py +++ /dev/null @@ -1,56 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - - -# -- Project information ----------------------------------------------------- - -project = "LAVIS" -copyright = "2022, salesforce.com inc." -author = ( - "Dongxu Li, Junnan Li, Hung Le, Guangsen Wang, Silvio Savarese, Steven C.H. Hoi" -) - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ["nbsphinx"] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = "alabaster" -html_theme = "sphinx_rtd_theme" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# pygments_style = "sphinx" diff --git a/spaces/ServerX/PorcoDiaz/infer/lib/infer_pack/modules.py b/spaces/ServerX/PorcoDiaz/infer/lib/infer_pack/modules.py deleted file mode 100644 index 2201a58bee9b7808d386b3ef9ac2d1f9630e56ef..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/infer/lib/infer_pack/modules.py +++ /dev/null @@ -1,521 +0,0 @@ -import copy -import math - -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm - -from infer.lib.infer_pack import commons -from infer.lib.infer_pack.commons import get_padding, init_weights -from infer.lib.infer_pack.transforms import piecewise_rational_quadratic_transform - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/SrRaptor/Imagy/setup.sh b/spaces/SrRaptor/Imagy/setup.sh deleted file mode 100644 index e221c60655cf9d06bd304bc6395c60f761ef174d..0000000000000000000000000000000000000000 --- a/spaces/SrRaptor/Imagy/setup.sh +++ /dev/null @@ -1,2 +0,0 @@ -export GRADIO_SERVER_NAME=0.0.0.0 -export GRADIO_SERVER_PORT="$PORT" diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_page.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_page.py deleted file mode 100644 index 9f6a3742495912f0eb357cd4a7c0411f5210d92a..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_page.py +++ /dev/null @@ -1,20 +0,0 @@ -#----------------------------------------------------------------------------- -# Copyright (C) 2010-2011 The IPython Development Team. -# -# Distributed under the terms of the BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -import io - -# N.B. For the test suite, page.page is overridden (see IPython.testing.globalipapp) -from IPython.core import page - -def test_detect_screen_size(): - """Simple smoketest for page._detect_screen_size.""" - try: - page._detect_screen_size(True, 25) - except (TypeError, io.UnsupportedOperation): - # This can happen in the test suite, because stdout may not have a - # fileno. - pass diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/util_annotation.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/util_annotation.py deleted file mode 100644 index b647543461c7d9922421466157a0bd0323d9c34e..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/util_annotation.py +++ /dev/null @@ -1,17 +0,0 @@ -from __future__ import annotations - -import textwrap -import types - - -def get_code(source, *, filename="", function=False): - source = textwrap.dedent(source).strip() - code = compile(source, filename, "exec") - if function: - sub_code = [ - const for const in code.co_consts if isinstance(const, types.CodeType) - ] - if len(sub_code) != 1: - raise ValueError("unable to find function code") - code = sub_code[0] - return code diff --git a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/__init__.py b/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/__init__.py deleted file mode 100644 index 2e441a5838d1e972823b9668ac8d459445f6f6ce..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .gen_efficientnet import * -from .mobilenetv3 import * -from .model_factory import create_model -from .config import is_exportable, is_scriptable, set_exportable, set_scriptable -from .activations import * \ No newline at end of file diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/cost.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/cost.py deleted file mode 100644 index 10558ddf728842f519f3cbf6e2c47e4e5f4ede44..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/cost.py +++ /dev/null @@ -1,539 +0,0 @@ -from dataclasses import dataclass -from typing import Any, Callable, Optional, Tuple - -from mmcv import Config -import numpy as np -import torch -from torch import Tensor - - -def masked_min_torch(x, mask=None, dim=None): - if mask is not None: - x = torch.masked_fill(x, torch.logical_not(mask), float("inf")) - if dim is None: - return torch.min(x) - else: - return torch.min(x, dim=dim)[0] - - -def masked_max_torch(x, mask=None, dim=None): - if mask is not None: - x = torch.masked_fill(x, torch.logical_not(mask), float("-inf")) - if dim is None: - return torch.max(x) - else: - return torch.max(x, dim=dim)[0] - - -def get_masked_discounted_mean_torch(discount_factor=0.95): - def masked_discounted_mean_torch(x, mask=None, dim=None): - discount_tensor = torch.full(x.shape, discount_factor, device=x.device) - discount_tensor = torch.cumprod(discount_tensor, dim=-2) - if mask is not None: - x = torch.masked_fill(x, torch.logical_not(mask), 0) - if dim is None: - assert mask.any() - return (x * discount_tensor).sum() / (mask * discount_tensor).sum() - else: - return (x * discount_tensor).sum(dim) / (mask * discount_tensor).sum( - dim - ).clamp_min(1) - else: - if dim is None: - return (x * discount_tensor).sum() / discount_tensor.sum() - else: - return (x * discount_tensor).sum(dim) / discount_tensor.sum(dim) - - return masked_discounted_mean_torch - - -def masked_mean_torch(x, mask=None, dim=None): - if mask is not None: - x = torch.masked_fill(x, torch.logical_not(mask), 0) - if dim is None: - assert mask.any() - return x.sum() / mask.sum() - else: - return x.sum(dim) / mask.sum(dim).clamp_min(1) - else: - if dim is None: - return x.mean() - else: - return x.mean(dim) - - -def get_discounted_mean_np(discount_factor=0.95): - def discounted_mean_np(x, axis=None): - discount_tensor = np.full(x.shape, discount_factor) - discount_tensor = np.cumprod(discount_tensor, axis=-2) - if axis is None: - return (x * discount_tensor).sum() / discount_tensor.sum() - else: - return (x * discount_tensor).sum(axis) / discount_tensor.sum(axis) - - return discounted_mean_np - - -def get_masked_reduce_np(reduce_function): - def masked_reduce_np(x, mask=None, axis=None): - if mask is not None: - x = np.ma.array(x, mask=np.logical_not(mask)) - return reduce_function(x, axis=axis) - else: - return reduce_function(x, axis=axis) - - return masked_reduce_np - - -@dataclass -class CostParams: - scale: float - reduce: str - discount_factor: float - - @staticmethod - def from_config(cfg: Config): - return CostParams( - scale=cfg.cost_scale, - reduce=cfg.cost_reduce, - discount_factor=cfg.discount_factor, - ) - - -class BaseCostTorch: - """Base cost class defining reduce strategy and basic parameters. - Its __call__ definition is only a dummy example returning zeros, this class is intended to be - inherited from and __call__ redefined with an actual cost between the inputs. - """ - - def __init__(self, params: CostParams) -> None: - super().__init__() - self._reduce_fun = params.reduce - self.scale = params.scale - - reduce_fun_torch_dict = { - "min": masked_min_torch, - "max": masked_max_torch, - "mean": masked_mean_torch, - "discounted_mean": get_masked_discounted_mean_torch(params.discount_factor), - "now": lambda *args, **kwargs: args[0][..., 0], - "final": lambda *args, **kwargs: args[0][..., -1], - } - - self._reduce_fun = reduce_fun_torch_dict[params.reduce] - - @property - def distance_bandwidth(self): - return 1 - - @property - def time_bandwidth(self): - return 1 - - def __call__( - self, - x1: Tensor, - x2: Tensor, - v1: Tensor, - v2: Tensor, - mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Any]: - """Compute the cost from given positions x1, x2 and velocities v1, v2 - The base cost only returns 0 cost, use costs that inherit from this to compute an actual cost. - - Args: - x1 (some shape, num_steps, 2): positions of the first agent - x2 (some shape, num_steps, 2): positions of the second agent - v1 (some shape, num_steps, 2): velocities of the first agent - v2 (some shape, num_steps, 2): velocities of the second agent - mask (some_shape, num_steps, 2): mask set to True where the cost should be computed - - Returns: - (some_shape) cost for the compared states of agent 1 and agent 2, as well as any - supplementary cost-related information - """ - return ( - self._reduce_fun(torch.zeros_like(x2[..., 0]), mask, dim=-1), - None, - ) - - -class BaseCostNumpy: - """Base cost class defining reduce strategy and basic parameters. - Its __call__ definition is only a dummy example returning zeros, this class is intended to be - inherited from and __call__ redefined with an actual cost between the inputs. - """ - - def __init__(self, params: CostParams) -> None: - super().__init__() - self._reduce_fun = params.reduce - self.scale = params.scale - - reduce_fun_np_dict = { - "min": get_masked_reduce_np(np.min), - "max": get_masked_reduce_np(np.max), - "mean": get_masked_reduce_np(np.mean), - "discounted_mean": get_masked_reduce_np( - get_discounted_mean_np(params.discount_factor) - ), - "now": get_masked_reduce_np(lambda *args, **kwargs: args[0][..., 0]), - "final": get_masked_reduce_np(lambda *args, **kwargs: args[0][..., -1]), - } - self._reduce_fun = reduce_fun_np_dict[params.reduce] - - @property - def distance_bandwidth(self): - return 1 - - @property - def time_bandwidth(self): - return 1 - - def __call__( - self, - x1: np.ndarray, - x2: np.ndarray, - v1: np.ndarray, - v2: np.ndarray, - mask: Optional[np.ndarray] = None, - ) -> Tuple[np.ndarray, Any]: - """Compute the cost from given positions x1, x2 and velocities v1, v2 - The base cost only returns 0 cost, use costs that inherit from this to compute an actual cost. - - Args: - x1 (some shape, num_steps, 2): positions of the first agent - x2 (some shape, num_steps, 2): positions of the second agent - v1 (some shape, num_steps, 2): velocities of the first agent - v2 (some shape, num_steps, 2): velocities of the second agent - mask (some_shape, num_steps, 2): mask set to True where the cost should be computed - - Returns: - (some_shape) cost for the compared states of agent 1 and agent 2, as well as any - supplementary cost-related information - """ - return ( - self._reduce_fun(np.zeros_like(x2[..., 0]), mask, axis=-1), - None, - ) - - -@dataclass -class DistanceCostParams(CostParams): - bandwidth: float - - @staticmethod - def from_config(cfg: Config): - return DistanceCostParams( - scale=cfg.cost_scale, - reduce=cfg.cost_reduce, - bandwidth=cfg.distance_bandwidth, - discount_factor=cfg.discount_factor, - ) - - -class DistanceCostTorch(BaseCostTorch): - def __init__(self, params: DistanceCostParams) -> None: - super().__init__(params) - self._bandwidth = params.bandwidth - - @property - def distance_bandwidth(self): - return self._bandwidth - - def __call__( - self, x1: Tensor, x2: Tensor, *args, mask: Optional[Tensor] = None, **kwargs - ) -> Tuple[Tensor, Tensor]: - """ - Returns a cost estimation based on distance. Also returns distances between ego and pedestrians. - Args: - x1: First agent trajectory - x2: Second agent trajectory - mask: True where cost should be computed - Returns: - cost, distance_to_collision - """ - - dist = torch.square(x2 - x1).sum(-1) - if mask is not None: - dist = torch.masked_fill(dist, torch.logical_not(mask), 1e9) - cost = torch.exp(-dist / (2 * self._bandwidth)) - return self.scale * self._reduce_fun(cost, mask=mask, dim=-1), dist - - -class DistanceCostNumpy(BaseCostNumpy): - def __init__(self, params: DistanceCostParams) -> None: - super().__init__(params) - self._bandwidth = params.bandwidth - - @property - def distance_bandwidth(self): - return self._bandwidth - - def __call__( - self, - x1: np.ndarray, - x2: np.ndarray, - *args, - mask: Optional[np.ndarray] = None, - **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: - """ - Returns a cost estimation based on distance. Also returns distances between ego and pedestrians. - Args: - x1: First agent trajectory - x2: Second agent trajectory - mask: True where cost should be computed - Returns: - cost, distance_to_collision - """ - dist = np.square(x2 - x1).sum(-1) - if mask is not None: - dist = np.where(mask, dist, 1e9) - cost = np.exp(-dist / (2 * self._bandwidth)) - return self.scale * self._reduce_fun(cost, mask=mask, axis=-1), dist - - -@dataclass -class TTCCostParams(CostParams): - distance_bandwidth: float - time_bandwidth: float - min_velocity_diff: float - - @staticmethod - def from_config(cfg: Config): - return TTCCostParams( - scale=cfg.cost_scale, - reduce=cfg.cost_reduce, - distance_bandwidth=cfg.distance_bandwidth, - time_bandwidth=cfg.time_bandwidth, - min_velocity_diff=cfg.min_velocity_diff, - discount_factor=cfg.discount_factor, - ) - - -class TTCCostTorch(BaseCostTorch): - def __init__(self, params: TTCCostParams) -> None: - super().__init__(params) - self._d_bw = params.distance_bandwidth - self._t_bw = params.time_bandwidth - self._min_v = params.min_velocity_diff - - @property - def distance_bandwidth(self): - return self._d_bw - - @property - def time_bandwidth(self): - return self._t_bw - - def __call__( - self, - x1: Tensor, - x2: Tensor, - v1: Tensor, - v2: Tensor, - *args, - mask: Optional[Tensor] = None, - **kwargs - ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: - """ - Returns a cost estimation based on time to collision and distance to collision. - Also returns the estimated time to collision, and the imaginary part of the time to collision. - Args: - x1: (some_shape, sequence_length, feature_shape) Initial position of the first agent - x2: (some_shape, sequence_length, feature_shape) Initial position of the second agent - v1: (some_shape, sequence_length, feature_shape) Velocity of the first agent - v2: (some_shape, sequence_length, feature_shape) Velocity of the second agent - mask: (some_shape, sequence_length) True where cost should be computed - Returns: - cost, (time_to_collision, distance_to_collision) - """ - pos_diff = x1 - x2 - velocity_diff = v1 - v2 - - dx = pos_diff[..., 0] - dy = pos_diff[..., 1] - vx = velocity_diff[..., 0] - vy = velocity_diff[..., 1] - - speed_diff = ( - torch.square(velocity_diff).sum(-1).clamp(self._min_v * self._min_v, None) - ) - - TTC = -(dx * vx + dy * vy) / speed_diff - - distance_TTC = torch.where( - TTC < 0, - torch.sqrt(dx * dx + dy * dy), - torch.abs(vy * dx - vx * dy) / torch.sqrt(speed_diff), - ) - TTC = torch.relu(TTC) - if mask is not None: - TTC = torch.masked_fill(TTC, torch.logical_not(mask), 1e9) - distance_TTC = torch.masked_fill(distance_TTC, torch.logical_not(mask), 1e9) - - cost = self.scale * self._reduce_fun( - torch.exp( - -torch.square(TTC) / (2 * self._t_bw) - - torch.square(distance_TTC) / (2 * self._d_bw) - ), - mask=mask, - dim=-1, - ) - - return cost, (TTC, distance_TTC) - - -class TTCCostNumpy(BaseCostNumpy): - def __init__(self, params: TTCCostParams) -> None: - super().__init__(params) - self._d_bw = params.distance_bandwidth - self._t_bw = params.time_bandwidth - self._min_v = params.min_velocity_diff - - @property - def distance_bandwidth(self): - return self._d_bw - - @property - def time_bandwidth(self): - return self._t_bw - - def __call__( - self, - x1: np.ndarray, - x2: np.ndarray, - v1: np.ndarray, - v2: np.ndarray, - *args, - mask: Optional[np.ndarray] = None, - **kwargs - ) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]: - """ - Returns a cost estimation based on time to collision and distance to collision. - Also returns the estimated time to collision, and the imaginary part of the time to collision. - Args: - x1: (some_shape, sequence_length, feature_shape) Initial position of the first agent - x2: (some_shape, sequence_length, feature_shape) Initial position of the second agent - v1: (some_shape, sequence_length, feature_shape) Velocity of the first agent - v2: (some_shape, sequence_length, feature_shape) Velocity of the second agent - mask: (some_shape, sequence_length) True where cost should be computed - Returns: - cost, (time_to_collision, distance_to_collision) - """ - pos_diff = x1 - x2 - velocity_diff = v1 - v2 - - dx = pos_diff[..., 0] - dy = pos_diff[..., 1] - vx = velocity_diff[..., 0] - vy = velocity_diff[..., 1] - - speed_diff = np.maximum( - np.square(velocity_diff).sum(-1), self._min_v * self._min_v - ) - - TTC = -(dx * vx + dy * vy) / speed_diff - distance_TTC = np.where( - TTC < 0, - np.sqrt(dx * dx + dy * dy), - np.abs(vy * dx - vx * dy) / np.sqrt(speed_diff), - ) - TTC = np.where( - TTC < 0, - 0, - TTC, - ) - if mask is not None: - TTC = np.where(mask, TTC, 1e9) - distance_TTC = np.where(mask, TTC, 1e9) - - cost = self.scale * self._reduce_fun( - np.exp( - -np.square(TTC) / (2 * self._t_bw) - - np.square(distance_TTC) / (2 * self._d_bw) - ), - mask=mask, - axis=-1, - ) - return cost, (TTC, distance_TTC) - - -def compute_v_from_x(x: Tensor, y: Tensor, dt: float): - """ - Computes the velocity from the position and the time difference. - Args: - x: (some_shape, past_time_sequence, features) Past positions of the agents - y: (some_shape, future_time_sequence, features) Future positions of the agents - dt: Time difference - Returns: - v: (some_shape, future_time_sequence, features) Velocity of the agents - """ - v = (y[..., 1:, :2] - y[..., :-1, :2]) / dt - v_0 = (y[..., 0:1, :2] - x[..., -1:, :2]) / dt - v = torch.cat((v_0, v), -2) - return v - - -def get_cost( - cost_function: BaseCostTorch, - x: torch.Tensor, - y_samples: torch.Tensor, - offset: torch.Tensor, - x_ego: torch.Tensor, - y_ego: torch.Tensor, - dt: float, - unnormalizer: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], - mask: Optional[torch.Tensor] = None, -) -> torch.Tensor: - """Compute cost samples from predicted future trajectories - - Args: - cost_function: Cost function to use - x: (batch_size, n_agents, num_steps, state_dim) normalized tensor of history - y_samples: (batch_size, n_agents, n_samples, num_steps_future, state_dim) normalized tensor of predicted - future trajectory samples - offset: (batch_size, n_agents, state_dim) offset position from ego - x_ego: (batch_size, 1, num_steps, state_dim) tensor of ego history - y_ego: (batch_size, 1, num_steps_future, state_dim) tensor of ego future trajectory - dt: time step in trajectories - unnormalizer: function that takes in a trajectory and an offset and that outputs the - unnormalized trajectory - mask: tensor indicating where to compute the cost - Returns: - torch.Tensor: (batch_size, n_agents, n_samples) cost tensor - """ - x = unnormalizer(x, offset) - y_samples = unnormalizer(y_samples, offset) - if offset.shape[1] > 1: - x_ego = unnormalizer(x_ego, offset[:, 0:1]) - y_ego = unnormalizer(y_ego, offset[:, 0:1]) - - min_dim = min(x.shape[-1], y_samples.shape[-1]) - x = x[..., :min_dim] - y_samples = y_samples[..., :min_dim] - x_ego = x_ego[..., :min_dim] - y_ego = y_ego[..., :min_dim] - assert x_ego.ndim == y_ego.ndim - if y_samples.shape[-1] < 5: - v_samples = compute_v_from_x(x.unsqueeze(-3), y_samples, dt) - else: - v_samples = y_samples[..., 3:5] - - if y_ego.shape[-1] < 5: - v_ego = compute_v_from_x(x_ego, y_ego, dt) - else: - v_ego = y_ego[..., 3:5] - if mask is not None: - mask = torch.cat( - (mask[..., 0:1], torch.logical_and(mask[..., 1:], mask[..., :-1])), -1 - ) - - cost, _ = cost_function( - x1=y_ego.unsqueeze(-3), - x2=y_samples, - v1=v_ego.unsqueeze(-3), - v2=v_samples, - mask=mask, - ) - return cost diff --git a/spaces/TRI-ML/risk_biased_prediction/tests/runtests.py b/spaces/TRI-ML/risk_biased_prediction/tests/runtests.py deleted file mode 100644 index 54d68f220d38fd50b886299409525093b9f3f3ad..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/tests/runtests.py +++ /dev/null @@ -1,3 +0,0 @@ -import pytest - -pytest.main() diff --git a/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/tabular_metrics.py b/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/tabular_metrics.py deleted file mode 100644 index 54d36523a7937ca14d7a6d97e675e1aa0f860165..0000000000000000000000000000000000000000 --- a/spaces/TabPFN/TabPFNPrediction/TabPFN/scripts/tabular_metrics.py +++ /dev/null @@ -1,212 +0,0 @@ -""" -=============================== -Metrics calculation -=============================== -Includes a few metric as well as functions composing metrics on results files. - -""" - - - -import numpy as np -import torch -from sklearn.metrics import roc_auc_score, accuracy_score, balanced_accuracy_score, average_precision_score, mean_squared_error, mean_absolute_error, r2_score -from scipy.stats import rankdata -import pandas as pd - -def root_mean_squared_error_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - return torch.sqrt(torch.nn.functional.mse_loss(target, pred)) - -def mean_squared_error_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - return torch.nn.functional.mse_loss(target, pred) - -def mean_absolute_error_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - return torch.tensor(mean_absolute_error(target, pred)) - -""" -=============================== -Metrics calculation -=============================== -""" -def auc_metric(target, pred, multi_class='ovo', numpy=False): - lib = np if numpy else torch - try: - if not numpy: - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - if len(lib.unique(target)) > 2: - if not numpy: - return torch.tensor(roc_auc_score(target, pred, multi_class=multi_class)) - return roc_auc_score(target, pred, multi_class=multi_class) - else: - if len(pred.shape) == 2: - pred = pred[:, 1] - if not numpy: - return torch.tensor(roc_auc_score(target, pred)) - return roc_auc_score(target, pred) - except ValueError as e: - print(e) - return np.nan if numpy else torch.tensor(np.nan) - -def accuracy_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - if len(torch.unique(target)) > 2: - return torch.tensor(accuracy_score(target, torch.argmax(pred, -1))) - else: - return torch.tensor(accuracy_score(target, pred[:, 1] > 0.5)) - -def average_precision_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - if len(torch.unique(target)) > 2: - return torch.tensor(average_precision_score(target, torch.argmax(pred, -1))) - else: - return torch.tensor(average_precision_score(target, pred[:, 1] > 0.5)) - -def balanced_accuracy_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - if len(torch.unique(target)) > 2: - return torch.tensor(balanced_accuracy_score(target, torch.argmax(pred, -1))) - else: - return torch.tensor(balanced_accuracy_score(target, pred[:, 1] > 0.5)) - -def cross_entropy(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - if len(torch.unique(target)) > 2: - ce = torch.nn.CrossEntropyLoss() - return ce(pred.float(), target.long()) - else: - bce = torch.nn.BCELoss() - return bce(pred[:, 1].float(), target.float()) - -def r2_metric(target, pred): - target = torch.tensor(target) if not torch.is_tensor(target) else target - pred = torch.tensor(pred) if not torch.is_tensor(pred) else pred - return torch.tensor(neg_r2(target, pred)) - -def neg_r2(target, pred): - return -r2_score(pred.float(), target.float()) - -def is_classification(metric_used): - if metric_used == auc_metric or metric_used == cross_entropy: - return True - return False - -def time_metric(): - """ - Dummy function, will just be used as a handler. - """ - pass - -def count_metric(x, y): - """ - Dummy function, returns one count per dataset. - """ - return 1 - -""" -=============================== -Metrics composition -=============================== -""" -def calculate_score_per_method(metric, name:str, global_results:dict, ds:list, eval_positions:list[int], aggregator:str='mean'): - """ - Calculates the metric given by 'metric' and saves it under 'name' in the 'global_results' - - :param metric: Metric function - :param name: Name of metric in 'global_results' - :param global_results: Dicrtonary containing the results for current method for a collection of datasets - :param ds: Dataset to calculate metrics on, a list of dataset properties - :param eval_positions: List of positions to calculate metrics on - :param aggregator: Specifies way to aggregate results across evaluation positions - :return: - """ - aggregator_f = np.nanmean if aggregator == 'mean' else np.nansum - for pos in eval_positions: - valid_positions = 0 - for d in ds: - if f'{d[0]}_outputs_at_{pos}' in global_results: - preds = global_results[f'{d[0]}_outputs_at_{pos}'] - y = global_results[f'{d[0]}_ys_at_{pos}'] - - preds, y = preds.detach().cpu().numpy() if torch.is_tensor( - preds) else preds, y.detach().cpu().numpy() if torch.is_tensor(y) else y - - try: - if metric == time_metric: - global_results[f'{d[0]}_{name}_at_{pos}'] = global_results[f'{d[0]}_time_at_{pos}'] - valid_positions = valid_positions + 1 - else: - global_results[f'{d[0]}_{name}_at_{pos}'] = aggregator_f( - [metric(y[split], preds[split]) for split in range(y.shape[0])]) - valid_positions = valid_positions + 1 - except Exception as err: - print(f'Error calculating metric with {err}, {type(err)} at {d[0]} {pos} {name}') - global_results[f'{d[0]}_{name}_at_{pos}'] = np.nan - else: - global_results[f'{d[0]}_{name}_at_{pos}'] = np.nan - - if valid_positions > 0: - global_results[f'{aggregator}_{name}_at_{pos}'] = aggregator_f([global_results[f'{d[0]}_{name}_at_{pos}'] for d in ds]) - else: - global_results[f'{aggregator}_{name}_at_{pos}'] = np.nan - - for d in ds: - metrics = [global_results[f'{d[0]}_{name}_at_{pos}'] for pos in eval_positions] - metrics = [m for m in metrics if not np.isnan(m)] - global_results[f'{d[0]}_{aggregator}_{name}'] = aggregator_f(metrics) if len(metrics) > 0 else np.nan - - metrics = [global_results[f'{aggregator}_{name}_at_{pos}'] for pos in eval_positions] - metrics = [m for m in metrics if not np.isnan(m)] - global_results[f'{aggregator}_{name}'] = aggregator_f(metrics) if len(metrics) > 0 else np.nan - - -def calculate_score(metric, name, global_results, ds, eval_positions, aggregator='mean', limit_to=''): - """ - Calls calculate_metrics_by_method with a range of methods. See arguments of that method. - :param limit_to: This method will not get metric calculations. - """ - for m in global_results: - if limit_to not in m: - continue - calculate_score_per_method(metric, name, global_results[m], ds, eval_positions, aggregator=aggregator) - - -def make_metric_matrix(global_results, methods, pos, name, ds): - result = [] - for m in global_results: - try: - result += [[global_results[m][d[0] + '_' + name + '_at_' + str(pos)] for d in ds]] - except Exception as e: - result += [[np.nan]] - result = np.array(result) - result = pd.DataFrame(result.T, index=[d[0] for d in ds], columns=[k for k in list(global_results.keys())]) - - matrix_means, matrix_stds = [], [] - - for method in methods: - matrix_means += [result.iloc[:, [c.startswith(method+'_time') for c in result.columns]].mean(axis=1)] - matrix_stds += [result.iloc[:, [c.startswith(method+'_time') for c in result.columns]].std(axis=1)] - - matrix_means = pd.DataFrame(matrix_means, index=methods).T - matrix_stds = pd.DataFrame(matrix_stds, index=methods).T - - return matrix_means, matrix_stds - - -def make_ranks_and_wins_table(matrix): - for dss in matrix.T: - matrix.loc[dss] = rankdata(-matrix.round(3).loc[dss]) - ranks_acc = matrix.mean() - wins_acc = (matrix == 1).sum() - - return ranks_acc, wins_acc \ No newline at end of file diff --git a/spaces/TencentARC/VLog/models/grit_src/grit/custom_solver.py b/spaces/TencentARC/VLog/models/grit_src/grit/custom_solver.py deleted file mode 100644 index 87f7d61ed756acf9326b7ab4097a989a9e6c7532..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/grit/custom_solver.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/custom_solver.py -import itertools -from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union -import torch - -from detectron2.config import CfgNode - -from detectron2.solver.build import maybe_add_gradient_clipping - - -def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - optimizer_type = cfg.SOLVER.OPTIMIZER - - for key, value in model.named_parameters(recurse=True): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - lr = cfg.SOLVER.BASE_LR - weight_decay = cfg.SOLVER.WEIGHT_DECAY - - if cfg.SOLVER.VIT_LAYER_DECAY: - lr = lr * get_vit_lr_decay_rate(key, cfg.SOLVER.VIT_LAYER_DECAY_RATE, cfg.MODEL.VIT_LAYERS) - - param = {"params": [value], "lr": lr} - if optimizer_type != 'ADAMW': - param['weight_decay'] = weight_decay - params += [param] - - def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class - # detectron2 doesn't have full model gradient clipping now - clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE - enable = ( - cfg.SOLVER.CLIP_GRADIENTS.ENABLED - and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" - and clip_norm_val > 0.0 - ) - - class FullModelGradientClippingOptimizer(optim): - def step(self, closure=None): - all_params = itertools.chain(*[x["params"] for x in self.param_groups]) - torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) - super().step(closure=closure) - - return FullModelGradientClippingOptimizer if enable else optim - - - if optimizer_type == 'SGD': - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( - params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, - nesterov=cfg.SOLVER.NESTEROV - ) - elif optimizer_type == 'ADAMW': - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( - params, cfg.SOLVER.BASE_LR, - weight_decay=cfg.SOLVER.WEIGHT_DECAY - ) - else: - raise NotImplementedError(f"no optimizer type {optimizer_type}") - if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": - optimizer = maybe_add_gradient_clipping(cfg, optimizer) - return optimizer - - -def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12): - """ - Calculate lr decay rate for different ViT blocks. - Args: - name (string): parameter name. - lr_decay_rate (float): base lr decay rate. - num_layers (int): number of ViT blocks. - - Returns: - lr decay rate for the given parameter. - """ - layer_id = num_layers + 1 - if name.startswith("backbone"): - if ".pos_embed" in name or ".patch_embed" in name: - layer_id = 0 - elif ".blocks." in name and ".residual." not in name: - layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1 - - return lr_decay_rate ** (num_layers + 1 - layer_id) \ No newline at end of file diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py deleted file mode 100644 index 40844ddeb8d47ff58a6af49ab35bad84e14f5721..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..common.optim import SGD as optimizer -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.train import train - -model.backbone.bottom_up.freeze_at = 2 -train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/spaces/Theivaprakasham/yolov6/yolov6/models/reppan.py b/spaces/Theivaprakasham/yolov6/yolov6/models/reppan.py deleted file mode 100644 index 2571d687155fb754dbe013a0f0f9468af487f8ca..0000000000000000000000000000000000000000 --- a/spaces/Theivaprakasham/yolov6/yolov6/models/reppan.py +++ /dev/null @@ -1,108 +0,0 @@ -import torch -from torch import nn -from yolov6.layers.common import RepBlock, SimConv, Transpose - - -class RepPANNeck(nn.Module): - """RepPANNeck Module - EfficientRep is the default backbone of this model. - RepPANNeck has the balance of feature fusion ability and hardware efficiency. - """ - - def __init__( - self, - channels_list=None, - num_repeats=None - ): - super().__init__() - - assert channels_list is not None - assert num_repeats is not None - - self.Rep_p4 = RepBlock( - in_channels=channels_list[3] + channels_list[5], - out_channels=channels_list[5], - n=num_repeats[5], - ) - - self.Rep_p3 = RepBlock( - in_channels=channels_list[2] + channels_list[6], - out_channels=channels_list[6], - n=num_repeats[6] - ) - - self.Rep_n3 = RepBlock( - in_channels=channels_list[6] + channels_list[7], - out_channels=channels_list[8], - n=num_repeats[7], - ) - - self.Rep_n4 = RepBlock( - in_channels=channels_list[5] + channels_list[9], - out_channels=channels_list[10], - n=num_repeats[8] - ) - - self.reduce_layer0 = SimConv( - in_channels=channels_list[4], - out_channels=channels_list[5], - kernel_size=1, - stride=1 - ) - - self.upsample0 = Transpose( - in_channels=channels_list[5], - out_channels=channels_list[5], - ) - - self.reduce_layer1 = SimConv( - in_channels=channels_list[5], - out_channels=channels_list[6], - kernel_size=1, - stride=1 - ) - - self.upsample1 = Transpose( - in_channels=channels_list[6], - out_channels=channels_list[6] - ) - - self.downsample2 = SimConv( - in_channels=channels_list[6], - out_channels=channels_list[7], - kernel_size=3, - stride=2 - ) - - self.downsample1 = SimConv( - in_channels=channels_list[8], - out_channels=channels_list[9], - kernel_size=3, - stride=2 - ) - - def forward(self, input): - - (x2, x1, x0) = input - - fpn_out0 = self.reduce_layer0(x0) - upsample_feat0 = self.upsample0(fpn_out0) - f_concat_layer0 = torch.cat([upsample_feat0, x1], 1) - f_out0 = self.Rep_p4(f_concat_layer0) - - fpn_out1 = self.reduce_layer1(f_out0) - upsample_feat1 = self.upsample1(fpn_out1) - f_concat_layer1 = torch.cat([upsample_feat1, x2], 1) - pan_out2 = self.Rep_p3(f_concat_layer1) - - down_feat1 = self.downsample2(pan_out2) - p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1) - pan_out1 = self.Rep_n3(p_concat_layer1) - - down_feat0 = self.downsample1(pan_out1) - p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1) - pan_out0 = self.Rep_n4(p_concat_layer2) - - outputs = [pan_out2, pan_out1, pan_out0] - - return outputs diff --git a/spaces/Tomoniai/Demo_Mistral_Chat/app.py b/spaces/Tomoniai/Demo_Mistral_Chat/app.py deleted file mode 100644 index be203c1992c8061e67a1f38d4c27746bdf908a13..0000000000000000000000000000000000000000 --- a/spaces/Tomoniai/Demo_Mistral_Chat/app.py +++ /dev/null @@ -1,48 +0,0 @@ -from huggingface_hub import InferenceClient -import gradio as gr - -client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1") - -def format_prompt(message, history): - prompt = "" - for user_prompt, bot_response in history: - prompt += f"[INST] {user_prompt} [/INST]" - prompt += f" {bot_response} " - prompt += f"[INST] {message} [/INST]" - return prompt - -def generate( - prompt, history, temperature=0.3, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, -): - temperature = float(temperature) - if temperature < 1e-2: - temperature = 1e-2 - top_p = float(top_p) - - generate_kwargs = dict( - temperature=temperature, - max_new_tokens=max_new_tokens, - top_p=top_p, - repetition_penalty=repetition_penalty, - do_sample=True, - seed=42, - ) - - formatted_prompt = format_prompt(prompt, history) - - stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) - output = "" - - for response in stream: - output += response.token.text - yield output - return output - -demo = gr.ChatInterface(fn=generate, - title="Mistralai-Mistral-7B-Instruct Chat", - retry_btn=None, - undo_btn=None, - clear_btn=None - ) - -demo.queue().launch() \ No newline at end of file diff --git a/spaces/Tonic/QuranInUrdu/README.md b/spaces/Tonic/QuranInUrdu/README.md deleted file mode 100644 index d977f4cb2efa5b8327e976fb48449f09a0aedf1e..0000000000000000000000000000000000000000 --- a/spaces/Tonic/QuranInUrdu/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Quran in Urdu -emoji: 🌠🕌 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.46.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/UdayPrasad/fashion-mnist/README.md b/spaces/UdayPrasad/fashion-mnist/README.md deleted file mode 100644 index 32e8a31e9a53a9c224118009b0b2f4f3c3930217..0000000000000000000000000000000000000000 --- a/spaces/UdayPrasad/fashion-mnist/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fashion Mnist -emoji: 🏢 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h b/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h deleted file mode 100644 index b5c144a4a58649906c9c87a40044b5118a00aa04..0000000000000000000000000000000000000000 --- a/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h +++ /dev/null @@ -1,133 +0,0 @@ -#pragma once - -#include -#include "masked_image.h" - -class PatchDistanceMetric { -public: - PatchDistanceMetric(int patch_size) : m_patch_size(patch_size) {} - virtual ~PatchDistanceMetric() = default; - - inline int patch_size() const { return m_patch_size; } - virtual int operator()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const = 0; - static const int kDistanceScale; - -protected: - int m_patch_size; -}; - -class NearestNeighborField { -public: - NearestNeighborField() : m_source(), m_target(), m_field(), m_distance_metric(nullptr) { - // pass - } - NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, int max_retry = 20) - : m_source(source), m_target(target), m_distance_metric(metric) { - m_field = cv::Mat(m_source.size(), CV_32SC3); - _randomize_field(max_retry); - } - NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, const NearestNeighborField &other, int max_retry = 20) - : m_source(source), m_target(target), m_distance_metric(metric) { - m_field = cv::Mat(m_source.size(), CV_32SC3); - _initialize_field_from(other, max_retry); - } - - const MaskedImage &source() const { - return m_source; - } - const MaskedImage &target() const { - return m_target; - } - inline cv::Size source_size() const { - return m_source.size(); - } - inline cv::Size target_size() const { - return m_target.size(); - } - inline void set_source(const MaskedImage &source) { - m_source = source; - } - inline void set_target(const MaskedImage &target) { - m_target = target; - } - - inline int *mutable_ptr(int y, int x) { - return m_field.ptr(y, x); - } - inline const int *ptr(int y, int x) const { - return m_field.ptr(y, x); - } - - inline int at(int y, int x, int c) const { - return m_field.ptr(y, x)[c]; - } - inline int &at(int y, int x, int c) { - return m_field.ptr(y, x)[c]; - } - inline void set_identity(int y, int x) { - auto ptr = mutable_ptr(y, x); - ptr[0] = y, ptr[1] = x, ptr[2] = 0; - } - - void minimize(int nr_pass); - -private: - inline int _distance(int source_y, int source_x, int target_y, int target_x) { - return (*m_distance_metric)(m_source, source_y, source_x, m_target, target_y, target_x); - } - - void _randomize_field(int max_retry = 20, bool reset = true); - void _initialize_field_from(const NearestNeighborField &other, int max_retry); - void _minimize_link(int y, int x, int direction); - - MaskedImage m_source; - MaskedImage m_target; - cv::Mat m_field; // { y_target, x_target, distance_scaled } - const PatchDistanceMetric *m_distance_metric; -}; - - -class PatchSSDDistanceMetric : public PatchDistanceMetric { -public: - using PatchDistanceMetric::PatchDistanceMetric; - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - static const int kSSDScale; -}; - -class DebugPatchSSDDistanceMetric : public PatchDistanceMetric { -public: - DebugPatchSSDDistanceMetric(int patch_size, int width, int height) : PatchDistanceMetric(patch_size), m_width(width), m_height(height) {} - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; -protected: - int m_width, m_height; -}; - -class RegularityGuidedPatchDistanceMetricV1 : public PatchDistanceMetric { -public: - RegularityGuidedPatchDistanceMetricV1(int patch_size, double dx1, double dy1, double dx2, double dy2, double weight) - : PatchDistanceMetric(patch_size), m_dx1(dx1), m_dy1(dy1), m_dx2(dx2), m_dy2(dy2), m_weight(weight) { - - assert(m_dy1 == 0); - assert(m_dx2 == 0); - m_scale = sqrt(m_dx1 * m_dx1 + m_dy2 * m_dy2) / 4; - } - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - -protected: - double m_dx1, m_dy1, m_dx2, m_dy2; - double m_scale, m_weight; -}; - -class RegularityGuidedPatchDistanceMetricV2 : public PatchDistanceMetric { -public: - RegularityGuidedPatchDistanceMetricV2(int patch_size, cv::Mat ijmap, double weight) - : PatchDistanceMetric(patch_size), m_ijmap(ijmap), m_weight(weight) { - - } - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - -protected: - cv::Mat m_ijmap; - double m_width, m_height, m_weight; -}; - diff --git a/spaces/Veer15/image-prompt-editing/app.py b/spaces/Veer15/image-prompt-editing/app.py deleted file mode 100644 index 4e0f7bd81f237d661e66ebe2087f20fab7a8cad6..0000000000000000000000000000000000000000 --- a/spaces/Veer15/image-prompt-editing/app.py +++ /dev/null @@ -1,131 +0,0 @@ -import io - -import numpy as np -import panel as pn -import param -import PIL -import requests -import torch - -from diffusers import StableDiffusionInstructPix2PixPipeline - -pn.extension('texteditor', template="bootstrap", sizing_mode='stretch_width') - -pn.state.template.param.update( - main_max_width="690px", - header_background="#F08080", -) - -model_id = "timbrooks/instruct-pix2pix" -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -if 'pipe' in pn.state.cache: - pipe = pn.state.cache['pipe'] -else: - pipe = pn.state.cache['pipe'] = StableDiffusionInstructPix2PixPipeline.from_pretrained( - model_id, torch_dtype=torch.float16 - ).to(device) - pipe.enable_xformers_memory_efficient_attention() - pipe.unet.to(memory_format=torch.channels_last) - -def normalize_image(value, width): - """ - normalize image to RBG channels and to the same size - """ - b = io.BytesIO(value) - image = PIL.Image.open(b).convert("RGB") - aspect = image.size[1] / image.size[0] - height = int(aspect * width) - return image.resize((width, height), PIL.Image.LANCZOS) - -def new_image(prompt, image, img_guidance, guidance, steps, width=600): - """ - create a new image from the StableDiffusionInstructPix2PixPipeline model - """ - edit = pipe( - prompt, - image=image, - image_guidance_scale=img_guidance, - guidance_scale=guidance, - num_inference_steps=steps, - ).images[0] - return edit - -file_input = pn.widgets.FileInput(width=600) - -prompt = pn.widgets.TextEditor( - value="", placeholder="Enter image editing instruction here...", height=160, toolbar=False -) -img_guidance = pn.widgets.DiscreteSlider( - name="Image guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=1.5 -) -guidance = pn.widgets.DiscreteSlider( - name="Guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=7 -) -steps = pn.widgets.IntSlider( - name="Inference Steps", start=1, end=100, step=1, value=20 -) -run_button = pn.widgets.Button(name="Run!") - -widgets = pn.Row( - pn.Column(prompt, run_button, margin=5), - pn.Card( - pn.Column(img_guidance, guidance, steps), - title="Advanced settings", margin=10 - ), width=600 -) - -# define global variables to keep track of things -convos = [] # store all panel objects in a list -image = None -filename = None - -def get_conversations(_, img, img_guidance, guidance, steps, width=600): - """ - Get all the conversations in a Panel object - """ - global image, filename - prompt_text = prompt.value - prompt.value = "" - - # if the filename changes, open the image again - if filename != file_input.filename: - filename = file_input.filename - image = normalize_image(file_input.value, width) - convos.clear() - - # if there is a prompt run output - if prompt_text: - image = new_image(prompt_text, image, img_guidance, guidance, steps) - convos.extend([ - pn.Row( - pn.panel("\U0001F60A", width=10), - prompt_text, - width=600 - ), - pn.Row( - pn.panel(image, align='end', width=500), - pn.panel("\U0001F916", width=10), - align='end' - ) - ]) - return pn.Column(*convos, margin=15, width=575) - -# bind widgets to functions -interactive_upload = pn.panel(pn.bind(pn.panel, file_input, width=575, min_height=400, margin=15)) - -interactive_conversation = pn.panel( - pn.bind( - get_conversations, run_button, file_input, img_guidance, guidance, steps - ), loading_indicator=True -) - - -# layout -pn.Column( - "## \U0001F60A Upload an image file and start editing!", - file_input, - interactive_upload, - interactive_conversation, - widgets -).servable(title="Panel Stable Diffusion InstructPix2pix Image Editing Chatbot") \ No newline at end of file diff --git a/spaces/Wootang01/next_sentence/app.py b/spaces/Wootang01/next_sentence/app.py deleted file mode 100644 index 6f6808c4fb2fb4193b9d3d50875691bcca3f6830..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/next_sentence/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -#from gradio import inputs -#from gradio.inputs import Textbox -#from gradio import outputs -#from transformers import pipeline - -title = "Next Sentence Generator" -description = "Try this text generator!" -examples = [ - ["Zoe Kwan is a 20-year old singer and songwriter."], - ["Zoe Kwan only recently began writing songs."], - ["Zoe Kwan has taken Hong Kong’s music scene by storm."] -] - -generator2 = gr.load("huggingface/facebook/opt-1.3b") -generator3 = gr.load("huggingface/gpt2") -generator1 = gr.load("huggingface/bigscience/bloom-1b1") - -gr.Parallel(generator1, generator2, generator3, inputs=gr.inputs.Textbox(lines=5, label="Enter a sentence to get another sentence"), - title=title, description=description, examples=examples).launch(enable_queue=True) - -#gr.Parallel(generator1, generator2, generator3, inputs=gr.inputs.Textbox(lines=5, label="Enter a sentence to get another sentence."), -# title=title, description=description, examples=examples).launch(share=False, enable_queue=True) \ No newline at end of file diff --git a/spaces/Xenova/next-example-app/_next/static/VACoPNZgWP3gqVs1-SVcF/_ssgManifest.js b/spaces/Xenova/next-example-app/_next/static/VACoPNZgWP3gqVs1-SVcF/_ssgManifest.js deleted file mode 100644 index 5b3ff592fd46c8736892a12864fdf3fed8775202..0000000000000000000000000000000000000000 --- a/spaces/Xenova/next-example-app/_next/static/VACoPNZgWP3gqVs1-SVcF/_ssgManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() \ No newline at end of file diff --git a/spaces/XzJosh/Azusa-Bert-VITS2/app.py b/spaces/XzJosh/Azusa-Bert-VITS2/app.py deleted file mode 100644 index 9e18d6c4abb43ddd8b02a245e65a2ac3925193c1..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Azusa-Bert-VITS2/app.py +++ /dev/null @@ -1,150 +0,0 @@ -import sys, os - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s") - -logger = logging.getLogger(__name__) - -import torch -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser - - -net_g = None - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid): - global net_g - bert, phones, tones, lang_ids = get_text(text, "ZH", hps) - with torch.no_grad(): - x_tst=phones.to(device).unsqueeze(0) - tones=tones.to(device).unsqueeze(0) - lang_ids=lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - return audio - -def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale): - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker) - return "Success", (hps.data.sampling_rate, audio) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model_dir", default="./logs/Azusa/G_2800.pth", help="path of your model") - parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file") - parser.add_argument("--share", default=False, help="make link public") - parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log") - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - hps = utils.get_hparams_from_file(args.config_dir) - device = "cuda:0" if torch.cuda.is_available() else "cpu" - ''' - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - ''' - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True) - - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - with gr.Blocks() as app: - with gr.Row(): - with gr.Column(): - gr.Markdown(value=""" - 【AI阿梓】在线语音合成(Bert-Vits2)\n - 作者:Xz乔希 https://space.bilibili.com/5859321\n - 声音归属:阿梓从小就很可爱 https://space.bilibili.com/7706705\n - Bert-VITS2项目:https://github.com/Stardust-minus/Bert-VITS2\n - 使用本模型请严格遵守法律法规!\n - 发布二创作品请标注本项目作者及链接、作品使用Bert-VITS2 AI生成!\n - """) - text = gr.TextArea(label="Text", placeholder="Input Text Here", - value="大家好呀我是阿梓。") - speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker') - sdp_ratio = gr.Slider(minimum=0.1, maximum=1, value=0.2, step=0.01, label='SDP/DP混合比') - noise_scale = gr.Slider(minimum=0.1, maximum=1, value=0.5, step=0.01, label='感情调节') - noise_scale_w = gr.Slider(minimum=0.1, maximum=1, value=0.9, step=0.01, label='音素长度') - length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成长度') - btn = gr.Button("点击生成", variant="primary") - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio") - gr.Markdown(value=""" - 【AI塔菲】https://huggingface.co/spaces/XzJosh/Taffy-Bert-VITS2\n - 【AI东雪莲】https://huggingface.co/spaces/XzJosh/Azuma-Bert-VITS2\n - 【AI奶绿】https://huggingface.co/spaces/XzJosh/LAPLACE-Bert-VITS2\n - 【AI尼奈】https://huggingface.co/spaces/XzJosh/nine1-Bert-VITS2\n - 【AI珈乐】https://huggingface.co/spaces/XzJosh/Carol-Bert-VITS2\n - 【AI电棍】https://huggingface.co/spaces/XzJosh/otto-Bert-VITS2\n - 【AI七海】https://huggingface.co/spaces/XzJosh/Nana7mi-Bert-VITS2\n - """) - btn.click(tts_fn, - inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale], - outputs=[text_output, audio_output]) - -# webbrowser.open("http://127.0.0.1:6006") -# app.launch(server_port=6006, show_error=True) - - app.launch(show_error=True) diff --git a/spaces/XzJosh/Jianmo-Bert-VITS2/README.md b/spaces/XzJosh/Jianmo-Bert-VITS2/README.md deleted file mode 100644 index d251140633499250c637c08250163be9a1285fa2..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jianmo-Bert-VITS2/README.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -license: mit -sdk: gradio -title: AI剑魔② ---- \ No newline at end of file diff --git a/spaces/XzJosh/LAPLACE-Bert-VITS2/resample.py b/spaces/XzJosh/LAPLACE-Bert-VITS2/resample.py deleted file mode 100644 index 2ed1685654a371c5722168e9987809b05b1cb224..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/LAPLACE-Bert-VITS2/resample.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/utils.py b/spaces/XzJosh/Lumi-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Lumi-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/YuAnthony/Audio-Caption/tools/yaml_loader.py b/spaces/YuAnthony/Audio-Caption/tools/yaml_loader.py deleted file mode 100644 index 7889b0a063b9e5caa19f1f558b0834e32b9971e4..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/tools/yaml_loader.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import yaml - -__author__ = 'Konstantinos Drossos -- Tampere University' -__docformat__ = 'reStructuredText' -__all__ = ['YAMLLoader'] - - -class YAMLLoader(yaml.SafeLoader): - """Custom YAML loader for adding the functionality\ - of including one YAML file inside another. - - Code after: https://stackoverflow.com/a/9577670 - """ - - def __init__(self, stream): - - self._root = os.path.split(stream.name)[0] - super(YAMLLoader, self).__init__(stream) - - def include(self, node): - filename = os.path.join(self._root, self.construct_scalar(node)) - with open(filename, 'r') as f: - return yaml.load(f, YAMLLoader) - - -YAMLLoader.add_constructor('!include', YAMLLoader.include) - -# EOF diff --git a/spaces/Yudha515/Rvc-Models/tests/modules/test_codebooks_patterns.py b/spaces/Yudha515/Rvc-Models/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/Yudha515/Rvc-Models/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/Yuelili/RealNagrse/realesrgan/models/realesrnet_model.py b/spaces/Yuelili/RealNagrse/realesrgan/models/realesrnet_model.py deleted file mode 100644 index d11668f3712bffcd062c57db14d22ca3a0e1e59d..0000000000000000000000000000000000000000 --- a/spaces/Yuelili/RealNagrse/realesrgan/models/realesrnet_model.py +++ /dev/null @@ -1,188 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.sr_model import SRModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRNetModel(SRModel): - """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It is trained without GAN losses. - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRNetModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - # USM sharpen the GT images - if self.opt['gt_usm'] is True: - self.gt = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True diff --git a/spaces/a-v-bely/russian-task-generator/app.py b/spaces/a-v-bely/russian-task-generator/app.py deleted file mode 100644 index f4fc945984243ae37cf8356c9047fc86cd87aac3..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/russian-task-generator/app.py +++ /dev/null @@ -1,35 +0,0 @@ -import warnings -import streamlit as st -from utilities.utils import is_valid_uuid -from utilities_database.user_database_widgets import LogIn - -warnings.filterwarnings('ignore') -st.header('Добро пожаловать!') -st.subheader('Вы используете инструмент по автоматической генерации лексико-грамматических заданий по ' - 'русскому языку!') -st.write('**Зарегистрируйтесь или войдите в аккаунт**') -__login__obj = LogIn(auth_token=st.secrets['COURIER_AUTH_TOKEN'], - company_name=st.secrets['COMPANY_NAME'], - width=200, height=200, - logout_button_name='Выйти', - hide_menu_bool=False, - hide_footer_bool=False, - lottie_url='https://assets2.lottiefiles.com/packages/lf20_jcikwtux.json') -LOGGED_IN = __login__obj.build_login_ui() -st.session_state['-LOGGED_IN-'] = False -# Check for username in cookies -if '-USER_NAME-' not in st.session_state: - if __login__obj.cookies.get('__streamlit_login_signup_ui_username__'): - if not is_valid_uuid(__login__obj.cookies['__streamlit_login_signup_ui_username__']): - st.session_state['-USER_NAME-'] = __login__obj.cookies['__streamlit_login_signup_ui_username__'] - st.session_state['-LOGGED_IN_BOOL-'] = True - -if LOGGED_IN: - st.session_state['-LOGGED_IN_BOOL-'] = True - # st.session_state['-USER_NAME-'] = - st.success('Можете переходить к следующим вкладкам!') - -st.markdown('*Автор-разработчик: А.В.Белый, кафедра математической лингвистики, филологический факультет СПбГУ,' - ' 3 курс, бакалавриат, "Прикладная, компьютерная и математическая лингвистика (английский язык)"*' - '\n\n*Научный руководитель: канд. филол. наук, доц. О.А.Митрофанова*') -st.markdown('*E-mail: st087202@student.spbu.ru*') diff --git a/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/css/chunk-vendors.df4470f9.css b/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/css/chunk-vendors.df4470f9.css deleted file mode 100644 index 00d4cab5675ba2f7e0b92c3b071b031f15b81653..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/spanish-task-generator/utilities_option_menu/frontend/dist/css/chunk-vendors.df4470f9.css +++ /dev/null @@ -1,6 +0,0 @@ -@charset "UTF-8";/*! - * Bootstrap v5.1.3 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors - * Copyright 2011-2021 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */:root{--bs-blue:#0d6efd;--bs-indigo:#6610f2;--bs-purple:#6f42c1;--bs-pink:#d63384;--bs-red:#dc3545;--bs-orange:#fd7e14;--bs-yellow:#ffc107;--bs-green:#198754;--bs-teal:#20c997;--bs-cyan:#0dcaf0;--bs-white:#fff;--bs-gray:#6c757d;--bs-gray-dark:#343a40;--bs-gray-100:#f8f9fa;--bs-gray-200:#e9ecef;--bs-gray-300:#dee2e6;--bs-gray-400:#ced4da;--bs-gray-500:#adb5bd;--bs-gray-600:#6c757d;--bs-gray-700:#495057;--bs-gray-800:#343a40;--bs-gray-900:#212529;--bs-primary:#0d6efd;--bs-secondary:#6c757d;--bs-success:#198754;--bs-info:#0dcaf0;--bs-warning:#ffc107;--bs-danger:#dc3545;--bs-light:#f8f9fa;--bs-dark:#212529;--bs-primary-rgb:13,110,253;--bs-secondary-rgb:108,117,125;--bs-success-rgb:25,135,84;--bs-info-rgb:13,202,240;--bs-warning-rgb:255,193,7;--bs-danger-rgb:220,53,69;--bs-light-rgb:248,249,250;--bs-dark-rgb:33,37,41;--bs-white-rgb:255,255,255;--bs-black-rgb:0,0,0;--bs-body-color-rgb:33,37,41;--bs-body-bg-rgb:255,255,255;--bs-font-sans-serif:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--bs-font-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--bs-gradient:linear-gradient(180deg,hsla(0,0%,100%,0.15),hsla(0,0%,100%,0));--bs-body-font-family:var(--bs-font-sans-serif);--bs-body-font-size:1rem;--bs-body-font-weight:400;--bs-body-line-height:1.5;--bs-body-color:#212529;--bs-body-bg:#fff}*,:after,:before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);color:var(--bs-body-color);text-align:var(--bs-body-text-align);background-color:var(--bs-body-bg);-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}hr{margin:1rem 0;color:inherit;background-color:currentColor;border:0;opacity:.25}hr:not([size]){height:1px}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2}.h1,h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){.h1,h1{font-size:2.5rem}}.h2,h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){.h2,h2{font-size:2rem}}.h3,h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){.h3,h3{font-size:1.75rem}}.h4,h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){.h4,h4{font-size:1.5rem}}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[data-bs-original-title],abbr[title]{text-decoration:underline dotted;cursor:help;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}.small,small{font-size:.875em}.mark,mark{padding:.2em;background-color:#fcf8e3}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#0d6efd;text-decoration:underline}a:hover{color:#0a58ca}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:var(--bs-font-monospace);font-size:1em;direction:ltr;unicode-bidi:bidi-override}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:#d63384;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:.875em;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:1em;font-weight:700}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:#6c757d;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]::-webkit-calendar-picker-indicator{display:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none!important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-6{font-size:2.5rem}}.list-inline,.list-unstyled{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:.875em;color:#6c757d}.blockquote-footer:before{content:"— "}.img-fluid,.img-thumbnail{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:.875em;color:#6c757d}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{width:100%;padding-right:var(--bs-gutter-x,.75rem);padding-left:var(--bs-gutter-x,.75rem);margin-right:auto;margin-left:auto}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}@media (min-width:1400px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{max-width:1320px}}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-top:calc(-1*var(--bs-gutter-y));margin-right:calc(-0.5*var(--bs-gutter-x));margin-left:calc(-0.5*var(--bs-gutter-x))}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x)*0.5);padding-left:calc(var(--bs-gutter-x)*0.5);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333333%}.col-2{flex:0 0 auto;width:16.66666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333333%}.col-5{flex:0 0 auto;width:41.66666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333333%}.col-8{flex:0 0 auto;width:66.66666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333333%}.col-11{flex:0 0 auto;width:91.66666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333333%}.offset-2{margin-left:16.66666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333333%}.offset-5{margin-left:41.66666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333333%}.offset-8{margin-left:66.66666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333333%}.offset-11{margin-left:91.66666667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:0.25rem}.g-1,.gy-1{--bs-gutter-y:0.25rem}.g-2,.gx-2{--bs-gutter-x:0.5rem}.g-2,.gy-2{--bs-gutter-y:0.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333333%}.col-sm-2{flex:0 0 auto;width:16.66666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333333%}.col-sm-5{flex:0 0 auto;width:41.66666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333333%}.col-sm-8{flex:0 0 auto;width:66.66666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333333%}.col-sm-11{flex:0 0 auto;width:91.66666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333333%}.offset-sm-2{margin-left:16.66666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333333%}.offset-sm-5{margin-left:41.66666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333333%}.offset-sm-8{margin-left:66.66666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333333%}.offset-sm-11{margin-left:91.66666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333333%}.col-md-2{flex:0 0 auto;width:16.66666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333333%}.col-md-5{flex:0 0 auto;width:41.66666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333333%}.col-md-8{flex:0 0 auto;width:66.66666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333333%}.col-md-11{flex:0 0 auto;width:91.66666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333333%}.offset-md-2{margin-left:16.66666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333333%}.offset-md-5{margin-left:41.66666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333333%}.offset-md-8{margin-left:66.66666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333333%}.offset-md-11{margin-left:91.66666667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333333%}.col-lg-2{flex:0 0 auto;width:16.66666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333333%}.col-lg-5{flex:0 0 auto;width:41.66666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333333%}.col-lg-8{flex:0 0 auto;width:66.66666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333333%}.col-lg-11{flex:0 0 auto;width:91.66666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333333%}.offset-lg-2{margin-left:16.66666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333333%}.offset-lg-5{margin-left:41.66666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333333%}.offset-lg-8{margin-left:66.66666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333333%}.offset-lg-11{margin-left:91.66666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333333%}.col-xl-2{flex:0 0 auto;width:16.66666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333333%}.col-xl-5{flex:0 0 auto;width:41.66666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333333%}.col-xl-8{flex:0 0 auto;width:66.66666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333333%}.col-xl-11{flex:0 0 auto;width:91.66666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333333%}.offset-xl-2{margin-left:16.66666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333333%}.offset-xl-5{margin-left:41.66666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333333%}.offset-xl-8{margin-left:66.66666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333333%}.offset-xl-11{margin-left:91.66666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}@media (min-width:1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.33333333%}.col-xxl-2{flex:0 0 auto;width:16.66666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.33333333%}.col-xxl-5{flex:0 0 auto;width:41.66666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.33333333%}.col-xxl-8{flex:0 0 auto;width:66.66666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.33333333%}.col-xxl-11{flex:0 0 auto;width:91.66666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.33333333%}.offset-xxl-2{margin-left:16.66666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.33333333%}.offset-xxl-5{margin-left:41.66666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.33333333%}.offset-xxl-8{margin-left:66.66666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.33333333%}.offset-xxl-11{margin-left:91.66666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x:0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y:0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x:0.25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y:0.25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x:0.5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y:0.5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x:1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y:1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x:1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y:1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x:3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y:3rem}}.table{--bs-table-bg:transparent;--bs-table-accent-bg:transparent;--bs-table-striped-color:#212529;--bs-table-striped-bg:rgba(0,0,0,0.05);--bs-table-active-color:#212529;--bs-table-active-bg:rgba(0,0,0,0.1);--bs-table-hover-color:#212529;--bs-table-hover-bg:rgba(0,0,0,0.075);width:100%;margin-bottom:1rem;color:#212529;vertical-align:top;border-color:#dee2e6}.table>:not(caption)>*>*{padding:.5rem .5rem;background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-accent-bg)}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table>:not(:first-child){border-top:2px solid currentColor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-accent-bg:var(--bs-table-striped-bg);color:var(--bs-table-striped-color)}.table-active{--bs-table-accent-bg:var(--bs-table-active-bg);color:var(--bs-table-active-color)}.table-hover>tbody>tr:hover>*{--bs-table-accent-bg:var(--bs-table-hover-bg);color:var(--bs-table-hover-color)}.table-primary{--bs-table-bg:#cfe2ff;--bs-table-striped-bg:#c5d7f2;--bs-table-striped-color:#000;--bs-table-active-bg:#bacbe6;--bs-table-active-color:#000;--bs-table-hover-bg:#bfd1ec;--bs-table-hover-color:#000;color:#000;border-color:#bacbe6}.table-secondary{--bs-table-bg:#e2e3e5;--bs-table-striped-bg:#d7d8da;--bs-table-striped-color:#000;--bs-table-active-bg:#cbccce;--bs-table-active-color:#000;--bs-table-hover-bg:#d1d2d4;--bs-table-hover-color:#000;color:#000;border-color:#cbccce}.table-success{--bs-table-bg:#d1e7dd;--bs-table-striped-bg:#c7dbd2;--bs-table-striped-color:#000;--bs-table-active-bg:#bcd0c7;--bs-table-active-color:#000;--bs-table-hover-bg:#c1d6cc;--bs-table-hover-color:#000;color:#000;border-color:#bcd0c7}.table-info{--bs-table-bg:#cff4fc;--bs-table-striped-bg:#c5e8ef;--bs-table-striped-color:#000;--bs-table-active-bg:#badce3;--bs-table-active-color:#000;--bs-table-hover-bg:#bfe2e9;--bs-table-hover-color:#000;color:#000;border-color:#badce3}.table-warning{--bs-table-bg:#fff3cd;--bs-table-striped-bg:#f2e7c3;--bs-table-striped-color:#000;--bs-table-active-bg:#e6dbb9;--bs-table-active-color:#000;--bs-table-hover-bg:#ece1be;--bs-table-hover-color:#000;color:#000;border-color:#e6dbb9}.table-danger{--bs-table-bg:#f8d7da;--bs-table-striped-bg:#eccccf;--bs-table-striped-color:#000;--bs-table-active-bg:#dfc2c4;--bs-table-active-color:#000;--bs-table-hover-bg:#e5c7ca;--bs-table-hover-color:#000;color:#000;border-color:#dfc2c4}.table-light{--bs-table-bg:#f8f9fa;--bs-table-striped-bg:#ecedee;--bs-table-striped-color:#000;--bs-table-active-bg:#dfe0e1;--bs-table-active-color:#000;--bs-table-hover-bg:#e5e6e7;--bs-table-hover-color:#000;color:#000;border-color:#dfe0e1}.table-dark{--bs-table-bg:#212529;--bs-table-striped-bg:#2c3034;--bs-table-striped-color:#fff;--bs-table-active-bg:#373b3e;--bs-table-active-color:#fff;--bs-table-hover-bg:#323539;--bs-table-hover-color:#fff;color:#fff;border-color:#373b3e}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media (max-width:575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem}.form-text{margin-top:.25rem;font-size:.875em;color:#6c757d}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;-webkit-appearance:none;-moz-appearance:none;appearance:none;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:#212529;background-color:#fff;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-control::-webkit-date-and-time-value{height:1.5em}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}.form-control::file-selector-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#dde0e3}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:#dde0e3}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + .75rem + 2px)}textarea.form-control-sm{min-height:calc(1.5em + .5rem + 2px)}textarea.form-control-lg{min-height:calc(1.5em + 1rem + 2px)}.form-control-color{width:3rem;height:auto;padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{height:1.5em;border-radius:.25rem}.form-control-color::-webkit-color-swatch{height:1.5em;border-radius:.25rem}.form-select{display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;-moz-padding-start:calc(.75rem - 3px);font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:1px solid #ced4da;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-select{transition:none}}.form-select:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #212529}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem;border-radius:.2rem}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem;border-radius:.3rem}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-input{width:1em;height:1em;margin-top:.25em;vertical-align:top;background-color:#fff;background-repeat:no-repeat;background-position:50%;background-size:contain;border:1px solid rgba(0,0,0,.25);-webkit-appearance:none;-moz-appearance:none;appearance:none;-webkit-print-color-adjust:exact;color-adjust:exact}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{-webkit-filter:brightness(90%);filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3E%3Cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10l3 3 6-6'/%3E%3C/svg%3E")}.form-check-input:checked[type=radio]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='2' fill='%23fff'/%3E%3C/svg%3E")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;border-color:#0d6efd;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3E%3Cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3E%3C/svg%3E")}.form-check-input:disabled{pointer-events:none;-webkit-filter:none;filter:none;opacity:.5}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{width:2em;margin-left:-2.5em;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='rgba(0, 0, 0, 0.25)'/%3E%3C/svg%3E");background-position:0;border-radius:2em;transition:background-position .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%2386b7fe'/%3E%3C/svg%3E")}.form-switch .form-check-input:checked{background-position:100%;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.btn-check:disabled+.btn,.btn-check[disabled]+.btn{pointer-events:none;-webkit-filter:none;filter:none;opacity:.65}.form-range{width:100%;height:1.5rem;padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#0d6efd;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#0d6efd;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-moz-range-thumb{-moz-transition:none;transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.form-range:disabled::-moz-range-thumb{background-color:#adb5bd}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-select{height:calc(3.5rem + 2px);line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;height:100%;padding:1rem .75rem;pointer-events:none;border:1px solid transparent;transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media (prefers-reduced-motion:reduce){.form-floating>label{transition:none}}.form-floating>.form-control{padding:1rem .75rem}.form-floating>.form-control::-moz-placeholder{color:transparent}.form-floating>.form-control:-ms-input-placeholder{color:transparent}.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control:not(:-moz-placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:not(:-ms-input-placeholder){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:not(:-moz-placeholder-shown)~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:not(:-ms-input-placeholder)~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:-webkit-autofill~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus{z-index:3}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:3}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-lg>.btn,.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.input-group-sm>.btn,.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:-1px;border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#198754}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(25,135,84,.9);border-radius:.25rem}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#198754;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-valid,.was-validated .form-select:valid{border-color:#198754}.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"],.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3E%3C/svg%3E"),url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-valid:focus,.was-validated .form-select:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid,.was-validated .form-check-input:valid{border-color:#198754}.form-check-input.is-valid:checked,.was-validated .form-check-input:valid:checked{background-color:#198754}.form-check-input.is-valid:focus,.was-validated .form-check-input:valid:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#198754}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.input-group .form-control.is-valid,.input-group .form-select.is-valid,.was-validated .input-group .form-control:valid,.was-validated .input-group .form-select:valid{z-index:1}.input-group .form-control.is-valid:focus,.input-group .form-select.is-valid:focus,.was-validated .input-group .form-control:valid:focus,.was-validated .input-group .form-select:valid:focus{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(220,53,69,.9);border-radius:.25rem}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-invalid,.was-validated .form-select:invalid{border-color:#dc3545}.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"],.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3E%3C/svg%3E"),url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-invalid:focus,.was-validated .form-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid,.was-validated .form-check-input:invalid{border-color:#dc3545}.form-check-input.is-invalid:checked,.was-validated .form-check-input:invalid:checked{background-color:#dc3545}.form-check-input.is-invalid:focus,.was-validated .form-check-input:invalid:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.input-group .form-control.is-invalid,.input-group .form-select.is-invalid,.was-validated .input-group .form-control:invalid,.was-validated .input-group .form-select:invalid{z-index:2}.input-group .form-control.is-invalid:focus,.input-group .form-select.is-invalid:focus,.was-validated .input-group .form-control:invalid:focus,.was-validated .input-group .form-select:invalid:focus{z-index:3}.btn{display:inline-block;font-weight:400;line-height:1.5;color:#212529;text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529}.btn-check:focus+.btn,.btn:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.btn.disabled,.btn:disabled,fieldset:disabled .btn{pointer-events:none;opacity:.65}.btn-primary{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-primary:hover{color:#fff;background-color:#0b5ed7;border-color:#0a58ca}.btn-check:focus+.btn-primary,.btn-primary:focus{color:#fff;background-color:#0b5ed7;border-color:#0a58ca;box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-check:active+.btn-primary,.btn-check:checked+.btn-primary,.btn-primary.active,.btn-primary:active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0a58ca;border-color:#0a53be}.btn-check:active+.btn-primary:focus,.btn-check:checked+.btn-primary:focus,.btn-primary.active:focus,.btn-primary:active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5c636a;border-color:#565e64}.btn-check:focus+.btn-secondary,.btn-secondary:focus{color:#fff;background-color:#5c636a;border-color:#565e64;box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-check:active+.btn-secondary,.btn-check:checked+.btn-secondary,.btn-secondary.active,.btn-secondary:active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#565e64;border-color:#51585e}.btn-check:active+.btn-secondary:focus,.btn-check:checked+.btn-secondary:focus,.btn-secondary.active:focus,.btn-secondary:active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-success{color:#fff;background-color:#198754;border-color:#198754}.btn-success:hover{color:#fff;background-color:#157347;border-color:#146c43}.btn-check:focus+.btn-success,.btn-success:focus{color:#fff;background-color:#157347;border-color:#146c43;box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-check:active+.btn-success,.btn-check:checked+.btn-success,.btn-success.active,.btn-success:active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#146c43;border-color:#13653f}.btn-check:active+.btn-success:focus,.btn-check:checked+.btn-success:focus,.btn-success.active:focus,.btn-success:active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#198754;border-color:#198754}.btn-info{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-info:hover{color:#000;background-color:#31d2f2;border-color:#25cff2}.btn-check:focus+.btn-info,.btn-info:focus{color:#000;background-color:#31d2f2;border-color:#25cff2;box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-check:active+.btn-info,.btn-check:checked+.btn-info,.btn-info.active,.btn-info:active,.show>.btn-info.dropdown-toggle{color:#000;background-color:#3dd5f3;border-color:#25cff2}.btn-check:active+.btn-info:focus,.btn-check:checked+.btn-info:focus,.btn-info.active:focus,.btn-info:active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-info.disabled,.btn-info:disabled{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-warning{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#000;background-color:#ffca2c;border-color:#ffc720}.btn-check:focus+.btn-warning,.btn-warning:focus{color:#000;background-color:#ffca2c;border-color:#ffc720;box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-check:active+.btn-warning,.btn-check:checked+.btn-warning,.btn-warning.active,.btn-warning:active,.show>.btn-warning.dropdown-toggle{color:#000;background-color:#ffcd39;border-color:#ffc720}.btn-check:active+.btn-warning:focus,.btn-check:checked+.btn-warning:focus,.btn-warning.active:focus,.btn-warning:active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#bb2d3b;border-color:#b02a37}.btn-check:focus+.btn-danger,.btn-danger:focus{color:#fff;background-color:#bb2d3b;border-color:#b02a37;box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-check:active+.btn-danger,.btn-check:checked+.btn-danger,.btn-danger.active,.btn-danger:active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#b02a37;border-color:#a52834}.btn-check:active+.btn-danger:focus,.btn-check:checked+.btn-danger:focus,.btn-danger.active:focus,.btn-danger:active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-light{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:focus+.btn-light,.btn-light:focus{color:#000;background-color:#f9fafb;border-color:#f9fafb;box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-check:active+.btn-light,.btn-check:checked+.btn-light,.btn-light.active,.btn-light:active,.show>.btn-light.dropdown-toggle{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:active+.btn-light:focus,.btn-check:checked+.btn-light:focus,.btn-light.active:focus,.btn-light:active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-light.disabled,.btn-light:disabled{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-dark{color:#fff;background-color:#212529;border-color:#212529}.btn-dark:hover{color:#fff;background-color:#1c1f23;border-color:#1a1e21}.btn-check:focus+.btn-dark,.btn-dark:focus{color:#fff;background-color:#1c1f23;border-color:#1a1e21;box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-check:active+.btn-dark,.btn-check:checked+.btn-dark,.btn-dark.active,.btn-dark:active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1a1e21;border-color:#191c1f}.btn-check:active+.btn-dark:focus,.btn-check:checked+.btn-dark:focus,.btn-dark.active:focus,.btn-dark:active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-primary{color:#0d6efd;border-color:#0d6efd}.btn-outline-primary:hover{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:focus+.btn-outline-primary,.btn-outline-primary:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-check:active+.btn-outline-primary,.btn-check:checked+.btn-outline-primary,.btn-outline-primary.active,.btn-outline-primary.dropdown-toggle.show,.btn-outline-primary:active{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:active+.btn-outline-primary:focus,.btn-check:checked+.btn-outline-primary:focus,.btn-outline-primary.active:focus,.btn-outline-primary.dropdown-toggle.show:focus,.btn-outline-primary:active:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#0d6efd;background-color:transparent}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:focus+.btn-outline-secondary,.btn-outline-secondary:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-check:active+.btn-outline-secondary,.btn-check:checked+.btn-outline-secondary,.btn-outline-secondary.active,.btn-outline-secondary.dropdown-toggle.show,.btn-outline-secondary:active{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:active+.btn-outline-secondary:focus,.btn-check:checked+.btn-outline-secondary:focus,.btn-outline-secondary.active:focus,.btn-outline-secondary.dropdown-toggle.show:focus,.btn-outline-secondary:active:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-success{color:#198754;border-color:#198754}.btn-outline-success:hover{color:#fff;background-color:#198754;border-color:#198754}.btn-check:focus+.btn-outline-success,.btn-outline-success:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-check:active+.btn-outline-success,.btn-check:checked+.btn-outline-success,.btn-outline-success.active,.btn-outline-success.dropdown-toggle.show,.btn-outline-success:active{color:#fff;background-color:#198754;border-color:#198754}.btn-check:active+.btn-outline-success:focus,.btn-check:checked+.btn-outline-success:focus,.btn-outline-success.active:focus,.btn-outline-success.dropdown-toggle.show:focus,.btn-outline-success:active:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#198754;background-color:transparent}.btn-outline-info{color:#0dcaf0;border-color:#0dcaf0}.btn-outline-info:hover{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:focus+.btn-outline-info,.btn-outline-info:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-check:active+.btn-outline-info,.btn-check:checked+.btn-outline-info,.btn-outline-info.active,.btn-outline-info.dropdown-toggle.show,.btn-outline-info:active{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:active+.btn-outline-info:focus,.btn-check:checked+.btn-outline-info:focus,.btn-outline-info.active:focus,.btn-outline-info.dropdown-toggle.show:focus,.btn-outline-info:active:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#0dcaf0;background-color:transparent}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:focus+.btn-outline-warning,.btn-outline-warning:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-check:active+.btn-outline-warning,.btn-check:checked+.btn-outline-warning,.btn-outline-warning.active,.btn-outline-warning.dropdown-toggle.show,.btn-outline-warning:active{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:active+.btn-outline-warning:focus,.btn-check:checked+.btn-outline-warning:focus,.btn-outline-warning.active:focus,.btn-outline-warning.dropdown-toggle.show:focus,.btn-outline-warning:active:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:focus+.btn-outline-danger,.btn-outline-danger:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-check:active+.btn-outline-danger,.btn-check:checked+.btn-outline-danger,.btn-outline-danger.active,.btn-outline-danger.dropdown-toggle.show,.btn-outline-danger:active{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:active+.btn-outline-danger:focus,.btn-check:checked+.btn-outline-danger:focus,.btn-outline-danger.active:focus,.btn-outline-danger.dropdown-toggle.show:focus,.btn-outline-danger:active:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:focus+.btn-outline-light,.btn-outline-light:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-check:active+.btn-outline-light,.btn-check:checked+.btn-outline-light,.btn-outline-light.active,.btn-outline-light.dropdown-toggle.show,.btn-outline-light:active{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:active+.btn-outline-light:focus,.btn-check:checked+.btn-outline-light:focus,.btn-outline-light.active:focus,.btn-outline-light.dropdown-toggle.show:focus,.btn-outline-light:active:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-dark{color:#212529;border-color:#212529}.btn-outline-dark:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-check:focus+.btn-outline-dark,.btn-outline-dark:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-check:active+.btn-outline-dark,.btn-check:checked+.btn-outline-dark,.btn-outline-dark.active,.btn-outline-dark.dropdown-toggle.show,.btn-outline-dark:active{color:#fff;background-color:#212529;border-color:#212529}.btn-check:active+.btn-outline-dark:focus,.btn-check:checked+.btn-outline-dark:focus,.btn-outline-dark.active:focus,.btn-outline-dark.dropdown-toggle.show:focus,.btn-outline-dark:active:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#212529;background-color:transparent}.btn-link{font-weight:400;color:#0d6efd;text-decoration:underline}.btn-link:hover{color:#0a58ca}.btn-link.disabled,.btn-link:disabled{color:#6c757d}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{width:0;height:auto;transition:width .35s ease}@media (prefers-reduced-motion:reduce){.collapsing.collapse-horizontal{transition:none}}.dropdown,.dropend,.dropstart,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{position:absolute;z-index:1000;display:none;min-width:10rem;padding:.5rem 0;margin:0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:.125rem}.dropdown-menu-start{--bs-position:start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position:end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media (min-width:576px){.dropdown-menu-sm-start{--bs-position:start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position:end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media (min-width:768px){.dropdown-menu-md-start{--bs-position:start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position:end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media (min-width:992px){.dropdown-menu-lg-start{--bs-position:start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position:end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1200px){.dropdown-menu-xl-start{--bs-position:start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position:end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1400px){.dropdown-menu-xxl-start{--bs-position:start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position:end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropend .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropend .dropdown-toggle:empty:after{margin-left:0}.dropend .dropdown-toggle:after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropstart .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle:after{display:none}.dropstart .dropdown-toggle:before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropstart .dropdown-toggle:empty:after{margin-left:0}.dropstart .dropdown-toggle:before{vertical-align:0}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid rgba(0,0,0,.15)}.dropdown-item{display:block;width:100%;padding:.25rem 1rem;clear:both;font-weight:400;color:#212529;text-align:inherit;text-decoration:none;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#1e2125;background-color:#e9ecef}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#0d6efd}.dropdown-item.disabled,.dropdown-item:disabled{color:#adb5bd;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1rem;color:#212529}.dropdown-menu-dark{color:#dee2e6;background-color:#343a40;border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item{color:#dee2e6}.dropdown-menu-dark .dropdown-item:focus,.dropdown-menu-dark .dropdown-item:hover{color:#fff;background-color:hsla(0,0%,100%,.15)}.dropdown-menu-dark .dropdown-item.active,.dropdown-menu-dark .dropdown-item:active{color:#fff;background-color:#0d6efd}.dropdown-menu-dark .dropdown-item.disabled,.dropdown-menu-dark .dropdown-item:disabled{color:#adb5bd}.dropdown-menu-dark .dropdown-divider{border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item-text{color:#dee2e6}.dropdown-menu-dark .dropdown-header{color:#adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split:after,.dropend .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropstart .dropdown-toggle-split:before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn~.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem;color:#0d6efd;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media (prefers-reduced-motion:reduce){.nav-link{transition:none}}.nav-link:focus,.nav-link:hover{color:#0a58ca}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-link{margin-bottom:-1px;background:0 0;border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#e9ecef #e9ecef #dee2e6;isolation:isolate}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{background:0 0;border:0;border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#0d6efd}.nav-fill .nav-item,.nav-fill>.nav-link{flex:1 1 auto;text-align:center}.nav-justified .nav-item,.nav-justified>.nav-link{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding-top:.5rem;padding-bottom:.5rem}.navbar>.container,.navbar>.container-fluid,.navbar>.container-lg,.navbar>.container-md,.navbar>.container-sm,.navbar>.container-xl,.navbar>.container-xxl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;text-decoration:none;white-space:nowrap}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem;transition:box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 .25rem}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-repeat:no-repeat;background-position:50%;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height,75vh);overflow-y:auto}@media (min-width:576px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler,.navbar-expand-sm .offcanvas-header{display:none}.navbar-expand-sm .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-sm .offcanvas-bottom,.navbar-expand-sm .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-sm .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:768px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler,.navbar-expand-md .offcanvas-header{display:none}.navbar-expand-md .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-md .offcanvas-bottom,.navbar-expand-md .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-md .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:992px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler,.navbar-expand-lg .offcanvas-header{display:none}.navbar-expand-lg .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-lg .offcanvas-bottom,.navbar-expand-lg .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-lg .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler,.navbar-expand-xl .offcanvas-header{display:none}.navbar-expand-xl .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-xl .offcanvas-bottom,.navbar-expand-xl .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-xl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1400px){.navbar-expand-xxl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xxl .navbar-toggler,.navbar-expand-xxl .offcanvas-header{display:none}.navbar-expand-xxl .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-xxl .offcanvas-bottom,.navbar-expand-xxl .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-xxl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler,.navbar-expand .offcanvas-header{display:none}.navbar-expand .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand .offcanvas-bottom,.navbar-expand .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}.navbar-light .navbar-brand{color:rgba(0,0,0,.9)}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.55)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .show>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,.55);border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3E%3Cpath stroke='rgba(0, 0, 0, 0.55)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,.55)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(0,0,0,.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:hsla(0,0%,100%,.55)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:hsla(0,0%,100%,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:hsla(0,0%,100%,.25)}.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:hsla(0,0%,100%,.55);border-color:hsla(0,0%,100%,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3E%3Cpath stroke='rgba(255, 255, 255, 0.55)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:hsla(0,0%,100%,.55)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:1rem 1rem}.card-title{margin-bottom:.5rem}.card-subtitle{margin-top:-.25rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:1rem}.card-header{padding:.5rem 1rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{padding:.5rem 1rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-bottom:-.5rem;border-bottom:0}.card-header-pills,.card-header-tabs{margin-right:-.5rem;margin-left:-.5rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom,.card-img-top{width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-group>.card{margin-bottom:.75rem}@media (min-width:576px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:1rem 1.25rem;font-size:1rem;color:#212529;text-align:left;background-color:#fff;border:0;border-radius:0;overflow-anchor:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out,border-radius .15s ease}@media (prefers-reduced-motion:reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:#0c63e4;background-color:#e7f1ff;box-shadow:inset 0 -1px 0 rgba(0,0,0,.125)}.accordion-button:not(.collapsed):after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%230c63e4'%3E%3Cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 01.708 0L8 10.293l5.646-5.647a.5.5 0 01.708.708l-6 6a.5.5 0 01-.708 0l-6-6a.5.5 0 010-.708z'/%3E%3C/svg%3E");transform:rotate(-180deg)}.accordion-button:after{flex-shrink:0;width:1.25rem;height:1.25rem;margin-left:auto;content:"";background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3E%3Cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 01.708 0L8 10.293l5.646-5.647a.5.5 0 01.708.708l-6 6a.5.5 0 01-.708 0l-6-6a.5.5 0 010-.708z'/%3E%3C/svg%3E");background-repeat:no-repeat;background-size:1.25rem;transition:transform .2s ease-in-out}@media (prefers-reduced-motion:reduce){.accordion-button:after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.accordion-header{margin-bottom:0}.accordion-item{background-color:#fff;border:1px solid rgba(0,0,0,.125)}.accordion-item:first-of-type{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.accordion-item:first-of-type .accordion-button{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.accordion-item:last-of-type .accordion-collapse{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-body{padding:1rem 1.25rem}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button{border-radius:0}.breadcrumb{display:flex;flex-wrap:wrap;padding:0 0;margin-bottom:1rem;list-style:none}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item:before{float:left;padding-right:.5rem;color:#6c757d;content:var(--bs-breadcrumb-divider,"/")}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;color:#0d6efd;text-decoration:none;background-color:#fff;border:1px solid #dee2e6;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:#0a58ca;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;color:#0a58ca;background-color:#e9ecef;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.page-item:not(:first-child) .page-link{margin-left:-1px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;background-color:#fff;border-color:#dee2e6}.page-link{padding:.375rem .75rem}.page-item:first-child .page-link{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{position:relative;padding:1rem 1rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{color:#084298;background-color:#cfe2ff;border-color:#b6d4fe}.alert-primary .alert-link{color:#06357a}.alert-secondary{color:#41464b;background-color:#e2e3e5;border-color:#d3d6d8}.alert-secondary .alert-link{color:#34383c}.alert-success{color:#0f5132;background-color:#d1e7dd;border-color:#badbcc}.alert-success .alert-link{color:#0c4128}.alert-info{color:#055160;background-color:#cff4fc;border-color:#b6effb}.alert-info .alert-link{color:#04414d}.alert-warning{color:#664d03;background-color:#fff3cd;border-color:#ffecb5}.alert-warning .alert-link{color:#523e02}.alert-danger{color:#842029;background-color:#f8d7da;border-color:#f5c2c7}.alert-danger .alert-link{color:#6a1a21}.alert-light{color:#636464;background-color:#fefefe;border-color:#fdfdfe}.alert-light .alert-link{color:#4f5050}.alert-dark{color:#141619;background-color:#d3d3d4;border-color:#bcbebf}.alert-dark .alert-link{color:#101214}@-webkit-keyframes progress-bar-stripes{0%{background-position-x:1rem}}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress{height:1rem;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress,.progress-bar{display:flex;overflow:hidden}.progress-bar{flex-direction:column;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#0d6efd;transition:width .6s ease}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,hsla(0,0%,100%,.15) 25%,transparent 0,transparent 50%,hsla(0,0%,100%,.15) 0,hsla(0,0%,100%,.15) 75%,transparent 0,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}@media (prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>li:before{content:counters(section,".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.5rem 1rem;color:#212529;text-decoration:none;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media (min-width:576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1400px){.list-group-horizontal-xxl{flex-direction:row}.list-group-horizontal-xxl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xxl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#084298;background-color:#cfe2ff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#084298;background-color:#bacbe6}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#084298;border-color:#084298}.list-group-item-secondary{color:#41464b;background-color:#e2e3e5}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#41464b;background-color:#cbccce}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#41464b;border-color:#41464b}.list-group-item-success{color:#0f5132;background-color:#d1e7dd}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#0f5132;background-color:#bcd0c7}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#0f5132;border-color:#0f5132}.list-group-item-info{color:#055160;background-color:#cff4fc}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#055160;background-color:#badce3}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#055160;border-color:#055160}.list-group-item-warning{color:#664d03;background-color:#fff3cd}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#664d03;background-color:#e6dbb9}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#664d03;border-color:#664d03}.list-group-item-danger{color:#842029;background-color:#f8d7da}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#842029;background-color:#dfc2c4}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#842029;border-color:#842029}.list-group-item-light{color:#636464;background-color:#fefefe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#636464;background-color:#e5e5e5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#636464;border-color:#636464}.list-group-item-dark{color:#141619;background-color:#d3d3d4}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#141619;background-color:#bebebf}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#141619;border-color:#141619}.btn-close{box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:#000;background:transparent url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath d='M.293.293a1 1 0 011.414 0L8 6.586 14.293.293a1 1 0 111.414 1.414L9.414 8l6.293 6.293a1 1 0 01-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 01-1.414-1.414L6.586 8 .293 1.707a1 1 0 010-1.414z'/%3E%3C/svg%3E") 50%/1em auto no-repeat;border:0;border-radius:.25rem;opacity:.5}.btn-close:hover{color:#000;text-decoration:none;opacity:.75}.btn-close:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);opacity:1}.btn-close.disabled,.btn-close:disabled{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;opacity:.25}.btn-close-white{-webkit-filter:invert(1) grayscale(100%) brightness(200%);filter:invert(1) grayscale(100%) brightness(200%)}.toast{width:350px;max-width:100%;font-size:.875rem;pointer-events:auto;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .5rem 1rem rgba(0,0,0,.15);border-radius:.25rem}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{width:-webkit-max-content;width:-moz-max-content;width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:.75rem}.toast-header{display:flex;align-items:center;padding:.5rem .75rem;color:#6c757d;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05);border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.toast-header .btn-close{margin-right:-.375rem;margin-left:.75rem}.toast-body{padding:.75rem;word-wrap:break-word}.modal{position:fixed;top:0;left:0;z-index:1055;display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translateY(-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1050;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;flex-shrink:0;align-items:center;justify-content:space-between;padding:1rem 1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.modal-header .btn-close{padding:.5rem .5rem;margin:-.5rem -.5rem -.5rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;flex-shrink:0;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #dee2e6;border-bottom-right-radius:calc(.3rem - 1px);border-bottom-left-radius:calc(.3rem - 1px)}.modal-footer>*{margin:.25rem}@media (min-width:576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{height:calc(100% - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width:1200px){.modal-xl{max-width:1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}.modal-fullscreen .modal-footer{border-radius:0}@media (max-width:575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}.modal-fullscreen-sm-down .modal-footer{border-radius:0}}@media (max-width:767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}.modal-fullscreen-md-down .modal-footer{border-radius:0}}@media (max-width:991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}.modal-fullscreen-lg-down .modal-footer{border-radius:0}}@media (max-width:1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}.modal-fullscreen-xl-down .modal-footer{border-radius:0}}@media (max-width:1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xxl-down .modal-header{border-radius:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}.modal-fullscreen-xxl-down .modal-footer{border-radius:0}}.tooltip{position:absolute;z-index:1080;display:block;margin:0;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .tooltip-arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .tooltip-arrow:before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[data-popper-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow,.bs-tooltip-top .tooltip-arrow{bottom:0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow:before,.bs-tooltip-top .tooltip-arrow:before{top:-1px;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-auto[data-popper-placement^=right],.bs-tooltip-end{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow,.bs-tooltip-end .tooltip-arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow:before,.bs-tooltip-end .tooltip-arrow:before{right:-1px;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-auto[data-popper-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow,.bs-tooltip-bottom .tooltip-arrow{top:0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow:before,.bs-tooltip-bottom .tooltip-arrow:before{bottom:-1px;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-auto[data-popper-placement^=left],.bs-tooltip-start{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow,.bs-tooltip-start .tooltip-arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow:before,.bs-tooltip-start .tooltip-arrow:before{left:-1px;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1070;display:block;max-width:276px;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover .popover-arrow{position:absolute;display:block;width:1rem;height:.5rem}.popover .popover-arrow:after,.popover .popover-arrow:before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow,.bs-popover-top>.popover-arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:before,.bs-popover-top>.popover-arrow:before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:after,.bs-popover-top>.popover-arrow:after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow,.bs-popover-end>.popover-arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:before,.bs-popover-end>.popover-arrow:before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:after,.bs-popover-end>.popover-arrow:after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow,.bs-popover-bottom>.popover-arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:before,.bs-popover-bottom>.popover-arrow:before{top:0;border-width:0 .5rem .5rem .5rem;border-bottom-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:after,.bs-popover-bottom>.popover-arrow:after{top:1px;border-width:0 .5rem .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[data-popper-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f0f0f0}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow,.bs-popover-start>.popover-arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:before,.bs-popover-start>.popover-arrow:before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:after,.bs-popover-start>.popover-arrow:after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem 1rem;margin-bottom:0;font-size:1rem;background-color:#f0f0f0;border-bottom:1px solid rgba(0,0,0,.2);border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:1rem 1rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner:after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-end,.carousel-item-next:not(.carousel-item-start){transform:translateX(100%)}.active.carousel-item-start,.carousel-item-prev:not(.carousel-item-end){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:0 0;border:0;opacity:.5;transition:opacity .15s ease}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3E%3Cpath d='M11.354 1.646a.5.5 0 010 .708L5.707 8l5.647 5.646a.5.5 0 01-.708.708l-6-6a.5.5 0 010-.708l6-6a.5.5 0 01.708 0z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3E%3Cpath d='M4.646 1.646a.5.5 0 01.708 0l6 6a.5.5 0 010 .708l-6 6a.5.5 0 01-.708-.708L10.293 8 4.646 2.354a.5.5 0 010-.708z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%;list-style:none}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media (prefers-reduced-motion:reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-next-icon,.carousel-dark .carousel-control-prev-icon{-webkit-filter:invert(1) grayscale(100);filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}@-webkit-keyframes spinner-border{to{transform:rotate(1turn)}}@keyframes spinner-border{to{transform:rotate(1turn)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;border:.25em solid currentColor;border-right-color:transparent;border-radius:50%;-webkit-animation:spinner-border .75s linear infinite;animation:spinner-border .75s linear infinite}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:spinner-grow .75s linear infinite;animation:spinner-grow .75s linear infinite}.spinner-grow-sm{width:1rem;height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{-webkit-animation-duration:1.5s;animation-duration:1.5s}}.offcanvas{position:fixed;bottom:0;z-index:1045;display:flex;flex-direction:column;max-width:100%;visibility:hidden;background-color:#fff;background-clip:padding-box;outline:0;transition:transform .3s ease-in-out}@media (prefers-reduced-motion:reduce){.offcanvas{transition:none}}.offcanvas-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{display:flex;align-items:center;justify-content:space-between;padding:1rem 1rem}.offcanvas-header .btn-close{padding:.5rem .5rem;margin-top:-.5rem;margin-right:-.5rem;margin-bottom:-.5rem}.offcanvas-title{margin-bottom:0;line-height:1.5}.offcanvas-body{flex-grow:1;padding:1rem 1rem;overflow-y:auto}.offcanvas-start{top:0;left:0;width:400px;border-right:1px solid rgba(0,0,0,.2);transform:translateX(-100%)}.offcanvas-end{top:0;right:0;width:400px;border-left:1px solid rgba(0,0,0,.2);transform:translateX(100%)}.offcanvas-top{top:0;border-bottom:1px solid rgba(0,0,0,.2);transform:translateY(-100%)}.offcanvas-bottom,.offcanvas-top{right:0;left:0;height:30vh;max-height:100%}.offcanvas-bottom{border-top:1px solid rgba(0,0,0,.2);transform:translateY(100%)}.offcanvas.show{transform:none}.placeholder{display:inline-block;min-height:1em;vertical-align:middle;cursor:wait;background-color:currentColor;opacity:.5}.placeholder.btn:before{display:inline-block;content:""}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{-webkit-animation:placeholder-glow 2s ease-in-out infinite;animation:placeholder-glow 2s ease-in-out infinite}@-webkit-keyframes placeholder-glow{50%{opacity:.2}}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{-webkit-mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,.8) 75%,#000 95%);mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,.8) 75%,#000 95%);-webkit-mask-size:200% 100%;mask-size:200% 100%;-webkit-animation:placeholder-wave 2s linear infinite;animation:placeholder-wave 2s linear infinite}@-webkit-keyframes placeholder-wave{to{-webkit-mask-position:-200% 0;mask-position:-200% 0}}@keyframes placeholder-wave{to{-webkit-mask-position:-200% 0;mask-position:-200% 0}}.clearfix:after{display:block;clear:both;content:""}.link-primary{color:#0d6efd}.link-primary:focus,.link-primary:hover{color:#0a58ca}.link-secondary{color:#6c757d}.link-secondary:focus,.link-secondary:hover{color:#565e64}.link-success{color:#198754}.link-success:focus,.link-success:hover{color:#146c43}.link-info{color:#0dcaf0}.link-info:focus,.link-info:hover{color:#3dd5f3}.link-warning{color:#ffc107}.link-warning:focus,.link-warning:hover{color:#ffcd39}.link-danger{color:#dc3545}.link-danger:focus,.link-danger:hover{color:#b02a37}.link-light{color:#f8f9fa}.link-light:focus,.link-light:hover{color:#f9fafb}.link-dark{color:#212529}.link-dark:focus,.link-dark:hover{color:#1a1e21}.ratio{position:relative;width:100%}.ratio:before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio:100%}.ratio-4x3{--bs-aspect-ratio:75%}.ratio-16x9{--bs-aspect-ratio:56.25%}.ratio-21x9{--bs-aspect-ratio:42.8571428571%}.fixed-top{top:0}.fixed-bottom,.fixed-top{position:fixed;right:0;left:0;z-index:1030}.fixed-bottom{bottom:0}.sticky-top{position:sticky;top:0;z-index:1020}@media (min-width:576px){.sticky-sm-top{position:sticky;top:0;z-index:1020}}@media (min-width:768px){.sticky-md-top{position:sticky;top:0;z-index:1020}}@media (min-width:992px){.sticky-lg-top{position:sticky;top:0;z-index:1020}}@media (min-width:1200px){.sticky-xl-top{position:sticky;top:0;z-index:1020}}@media (min-width:1400px){.sticky-xxl-top{position:sticky;top:0;z-index:1020}}.hstack{flex-direction:row;align-items:center}.hstack,.vstack{display:flex;align-self:stretch}.vstack{flex:1 1 auto;flex-direction:column}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){position:absolute!important;width:1px!important;height:1px!important;padding:0!important;margin:-1px!important;overflow:hidden!important;clip:rect(0,0,0,0)!important;white-space:nowrap!important;border:0!important}.stretched-link:after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{display:inline-block;align-self:stretch;width:1px;min-height:1em;background-color:currentColor;opacity:.25}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.float-start{float:left!important}.float-end{float:right!important}.float-none{float:none!important}.opacity-0{opacity:0!important}.opacity-25{opacity:.25!important}.opacity-50{opacity:.5!important}.opacity-75{opacity:.75!important}.opacity-100{opacity:1!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.overflow-visible{overflow:visible!important}.overflow-scroll{overflow:scroll!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:sticky!important}.top-0{top:0!important}.top-50{top:50%!important}.top-100{top:100%!important}.bottom-0{bottom:0!important}.bottom-50{bottom:50%!important}.bottom-100{bottom:100%!important}.start-0{left:0!important}.start-50{left:50%!important}.start-100{left:100%!important}.end-0{right:0!important}.end-50{right:50%!important}.end-100{right:100%!important}.translate-middle{transform:translate(-50%,-50%)!important}.translate-middle-x{transform:translateX(-50%)!important}.translate-middle-y{transform:translateY(-50%)!important}.border{border:1px solid #dee2e6!important}.border-0{border:0!important}.border-top{border-top:1px solid #dee2e6!important}.border-top-0{border-top:0!important}.border-end{border-right:1px solid #dee2e6!important}.border-end-0{border-right:0!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-bottom-0{border-bottom:0!important}.border-start{border-left:1px solid #dee2e6!important}.border-start-0{border-left:0!important}.border-primary{border-color:#0d6efd!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#198754!important}.border-info{border-color:#0dcaf0!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#212529!important}.border-white{border-color:#fff!important}.border-1{border-width:1px!important}.border-2{border-width:2px!important}.border-3{border-width:3px!important}.border-4{border-width:4px!important}.border-5{border-width:5px!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.mw-100{max-width:100%!important}.vw-100{width:100vw!important}.min-vw-100{min-width:100vw!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mh-100{max-height:100%!important}.vh-100{height:100vh!important}.min-vh-100{min-height:100vh!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-0{gap:0!important}.gap-1{gap:.25rem!important}.gap-2{gap:.5rem!important}.gap-3{gap:1rem!important}.gap-4{gap:1.5rem!important}.gap-5{gap:3rem!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-right:0!important;margin-left:0!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-0{margin-top:0!important;margin-bottom:0!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-right:0!important;padding-left:0!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-0{padding-top:0!important;padding-bottom:0!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}.font-monospace{font-family:var(--bs-font-monospace)!important}.fs-1{font-size:calc(1.375rem + 1.5vw)!important}.fs-2{font-size:calc(1.325rem + .9vw)!important}.fs-3{font-size:calc(1.3rem + .6vw)!important}.fs-4{font-size:calc(1.275rem + .3vw)!important}.fs-5{font-size:1.25rem!important}.fs-6{font-size:1rem!important}.fst-italic{font-style:italic!important}.fst-normal{font-style:normal!important}.fw-light{font-weight:300!important}.fw-lighter{font-weight:lighter!important}.fw-normal{font-weight:400!important}.fw-bold{font-weight:700!important}.fw-bolder{font-weight:bolder!important}.lh-1{line-height:1!important}.lh-sm{line-height:1.25!important}.lh-base{line-height:1.5!important}.lh-lg{line-height:2!important}.text-start{text-align:left!important}.text-end{text-align:right!important}.text-center{text-align:center!important}.text-decoration-none{text-decoration:none!important}.text-decoration-underline{text-decoration:underline!important}.text-decoration-line-through{text-decoration:line-through!important}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-break{word-wrap:break-word!important;word-break:break-word!important}.text-primary{--bs-text-opacity:1;color:rgba(var(--bs-primary-rgb),var(--bs-text-opacity))!important}.text-secondary{--bs-text-opacity:1;color:rgba(var(--bs-secondary-rgb),var(--bs-text-opacity))!important}.text-success{--bs-text-opacity:1;color:rgba(var(--bs-success-rgb),var(--bs-text-opacity))!important}.text-info{--bs-text-opacity:1;color:rgba(var(--bs-info-rgb),var(--bs-text-opacity))!important}.text-warning{--bs-text-opacity:1;color:rgba(var(--bs-warning-rgb),var(--bs-text-opacity))!important}.text-danger{--bs-text-opacity:1;color:rgba(var(--bs-danger-rgb),var(--bs-text-opacity))!important}.text-light{--bs-text-opacity:1;color:rgba(var(--bs-light-rgb),var(--bs-text-opacity))!important}.text-dark{--bs-text-opacity:1;color:rgba(var(--bs-dark-rgb),var(--bs-text-opacity))!important}.text-black{--bs-text-opacity:1;color:rgba(var(--bs-black-rgb),var(--bs-text-opacity))!important}.text-white{--bs-text-opacity:1;color:rgba(var(--bs-white-rgb),var(--bs-text-opacity))!important}.text-body{--bs-text-opacity:1;color:rgba(var(--bs-body-color-rgb),var(--bs-text-opacity))!important}.text-muted{--bs-text-opacity:1;color:#6c757d!important}.text-black-50{--bs-text-opacity:1;color:rgba(0,0,0,.5)!important}.text-white-50{--bs-text-opacity:1;color:hsla(0,0%,100%,.5)!important}.text-reset{--bs-text-opacity:1;color:inherit!important}.text-opacity-25{--bs-text-opacity:0.25}.text-opacity-50{--bs-text-opacity:0.5}.text-opacity-75{--bs-text-opacity:0.75}.text-opacity-100{--bs-text-opacity:1}.bg-primary{--bs-bg-opacity:1;background-color:rgba(var(--bs-primary-rgb),var(--bs-bg-opacity))!important}.bg-secondary{--bs-bg-opacity:1;background-color:rgba(var(--bs-secondary-rgb),var(--bs-bg-opacity))!important}.bg-success{--bs-bg-opacity:1;background-color:rgba(var(--bs-success-rgb),var(--bs-bg-opacity))!important}.bg-info{--bs-bg-opacity:1;background-color:rgba(var(--bs-info-rgb),var(--bs-bg-opacity))!important}.bg-warning{--bs-bg-opacity:1;background-color:rgba(var(--bs-warning-rgb),var(--bs-bg-opacity))!important}.bg-danger{--bs-bg-opacity:1;background-color:rgba(var(--bs-danger-rgb),var(--bs-bg-opacity))!important}.bg-light{--bs-bg-opacity:1;background-color:rgba(var(--bs-light-rgb),var(--bs-bg-opacity))!important}.bg-dark{--bs-bg-opacity:1;background-color:rgba(var(--bs-dark-rgb),var(--bs-bg-opacity))!important}.bg-black{--bs-bg-opacity:1;background-color:rgba(var(--bs-black-rgb),var(--bs-bg-opacity))!important}.bg-white{--bs-bg-opacity:1;background-color:rgba(var(--bs-white-rgb),var(--bs-bg-opacity))!important}.bg-body{--bs-bg-opacity:1;background-color:rgba(var(--bs-body-bg-rgb),var(--bs-bg-opacity))!important}.bg-transparent{--bs-bg-opacity:1;background-color:transparent!important}.bg-opacity-10{--bs-bg-opacity:0.1}.bg-opacity-25{--bs-bg-opacity:0.25}.bg-opacity-50{--bs-bg-opacity:0.5}.bg-opacity-75{--bs-bg-opacity:0.75}.bg-opacity-100{--bs-bg-opacity:1}.bg-gradient{background-image:var(--bs-gradient)!important}.user-select-all{-webkit-user-select:all!important;-moz-user-select:all!important;-ms-user-select:all!important;user-select:all!important}.user-select-auto{-webkit-user-select:auto!important;-moz-user-select:auto!important;-ms-user-select:auto!important;user-select:auto!important}.user-select-none{-webkit-user-select:none!important;-moz-user-select:none!important;-ms-user-select:none!important;user-select:none!important}.pe-none{pointer-events:none!important}.pe-auto{pointer-events:auto!important}.rounded{border-radius:.25rem!important}.rounded-0{border-radius:0!important}.rounded-1{border-radius:.2rem!important}.rounded-2{border-radius:.25rem!important}.rounded-3{border-radius:.3rem!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-top{border-top-left-radius:.25rem!important}.rounded-end,.rounded-top{border-top-right-radius:.25rem!important}.rounded-bottom,.rounded-end{border-bottom-right-radius:.25rem!important}.rounded-bottom,.rounded-start{border-bottom-left-radius:.25rem!important}.rounded-start{border-top-left-radius:.25rem!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media (min-width:576px){.float-sm-start{float:left!important}.float-sm-end{float:right!important}.float-sm-none{float:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-sm-0{gap:0!important}.gap-sm-1{gap:.25rem!important}.gap-sm-2{gap:.5rem!important}.gap-sm-3{gap:1rem!important}.gap-sm-4{gap:1.5rem!important}.gap-sm-5{gap:3rem!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}.text-sm-start{text-align:left!important}.text-sm-end{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.float-md-start{float:left!important}.float-md-end{float:right!important}.float-md-none{float:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-md-0{gap:0!important}.gap-md-1{gap:.25rem!important}.gap-md-2{gap:.5rem!important}.gap-md-3{gap:1rem!important}.gap-md-4{gap:1.5rem!important}.gap-md-5{gap:3rem!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-right:0!important;padding-left:0!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}.text-md-start{text-align:left!important}.text-md-end{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.float-lg-start{float:left!important}.float-lg-end{float:right!important}.float-lg-none{float:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-lg-0{gap:0!important}.gap-lg-1{gap:.25rem!important}.gap-lg-2{gap:.5rem!important}.gap-lg-3{gap:1rem!important}.gap-lg-4{gap:1.5rem!important}.gap-lg-5{gap:3rem!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}.text-lg-start{text-align:left!important}.text-lg-end{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.float-xl-start{float:left!important}.float-xl-end{float:right!important}.float-xl-none{float:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xl-0{gap:0!important}.gap-xl-1{gap:.25rem!important}.gap-xl-2{gap:.5rem!important}.gap-xl-3{gap:1rem!important}.gap-xl-4{gap:1.5rem!important}.gap-xl-5{gap:3rem!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}.text-xl-start{text-align:left!important}.text-xl-end{text-align:right!important}.text-xl-center{text-align:center!important}}@media (min-width:1400px){.float-xxl-start{float:left!important}.float-xxl-end{float:right!important}.float-xxl-none{float:none!important}.d-xxl-inline{display:inline!important}.d-xxl-inline-block{display:inline-block!important}.d-xxl-block{display:block!important}.d-xxl-grid{display:grid!important}.d-xxl-table{display:table!important}.d-xxl-table-row{display:table-row!important}.d-xxl-table-cell{display:table-cell!important}.d-xxl-flex{display:flex!important}.d-xxl-inline-flex{display:inline-flex!important}.d-xxl-none{display:none!important}.flex-xxl-fill{flex:1 1 auto!important}.flex-xxl-row{flex-direction:row!important}.flex-xxl-column{flex-direction:column!important}.flex-xxl-row-reverse{flex-direction:row-reverse!important}.flex-xxl-column-reverse{flex-direction:column-reverse!important}.flex-xxl-grow-0{flex-grow:0!important}.flex-xxl-grow-1{flex-grow:1!important}.flex-xxl-shrink-0{flex-shrink:0!important}.flex-xxl-shrink-1{flex-shrink:1!important}.flex-xxl-wrap{flex-wrap:wrap!important}.flex-xxl-nowrap{flex-wrap:nowrap!important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xxl-0{gap:0!important}.gap-xxl-1{gap:.25rem!important}.gap-xxl-2{gap:.5rem!important}.gap-xxl-3{gap:1rem!important}.gap-xxl-4{gap:1.5rem!important}.gap-xxl-5{gap:3rem!important}.justify-content-xxl-start{justify-content:flex-start!important}.justify-content-xxl-end{justify-content:flex-end!important}.justify-content-xxl-center{justify-content:center!important}.justify-content-xxl-between{justify-content:space-between!important}.justify-content-xxl-around{justify-content:space-around!important}.justify-content-xxl-evenly{justify-content:space-evenly!important}.align-items-xxl-start{align-items:flex-start!important}.align-items-xxl-end{align-items:flex-end!important}.align-items-xxl-center{align-items:center!important}.align-items-xxl-baseline{align-items:baseline!important}.align-items-xxl-stretch{align-items:stretch!important}.align-content-xxl-start{align-content:flex-start!important}.align-content-xxl-end{align-content:flex-end!important}.align-content-xxl-center{align-content:center!important}.align-content-xxl-between{align-content:space-between!important}.align-content-xxl-around{align-content:space-around!important}.align-content-xxl-stretch{align-content:stretch!important}.align-self-xxl-auto{align-self:auto!important}.align-self-xxl-start{align-self:flex-start!important}.align-self-xxl-end{align-self:flex-end!important}.align-self-xxl-center{align-self:center!important}.align-self-xxl-baseline{align-self:baseline!important}.align-self-xxl-stretch{align-self:stretch!important}.order-xxl-first{order:-1!important}.order-xxl-0{order:0!important}.order-xxl-1{order:1!important}.order-xxl-2{order:2!important}.order-xxl-3{order:3!important}.order-xxl-4{order:4!important}.order-xxl-5{order:5!important}.order-xxl-last{order:6!important}.m-xxl-0{margin:0!important}.m-xxl-1{margin:.25rem!important}.m-xxl-2{margin:.5rem!important}.m-xxl-3{margin:1rem!important}.m-xxl-4{margin:1.5rem!important}.m-xxl-5{margin:3rem!important}.m-xxl-auto{margin:auto!important}.mx-xxl-0{margin-right:0!important;margin-left:0!important}.mx-xxl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xxl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xxl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xxl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xxl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xxl-auto{margin-right:auto!important;margin-left:auto!important}.my-xxl-0{margin-top:0!important;margin-bottom:0!important}.my-xxl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xxl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xxl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xxl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xxl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xxl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xxl-0{margin-top:0!important}.mt-xxl-1{margin-top:.25rem!important}.mt-xxl-2{margin-top:.5rem!important}.mt-xxl-3{margin-top:1rem!important}.mt-xxl-4{margin-top:1.5rem!important}.mt-xxl-5{margin-top:3rem!important}.mt-xxl-auto{margin-top:auto!important}.me-xxl-0{margin-right:0!important}.me-xxl-1{margin-right:.25rem!important}.me-xxl-2{margin-right:.5rem!important}.me-xxl-3{margin-right:1rem!important}.me-xxl-4{margin-right:1.5rem!important}.me-xxl-5{margin-right:3rem!important}.me-xxl-auto{margin-right:auto!important}.mb-xxl-0{margin-bottom:0!important}.mb-xxl-1{margin-bottom:.25rem!important}.mb-xxl-2{margin-bottom:.5rem!important}.mb-xxl-3{margin-bottom:1rem!important}.mb-xxl-4{margin-bottom:1.5rem!important}.mb-xxl-5{margin-bottom:3rem!important}.mb-xxl-auto{margin-bottom:auto!important}.ms-xxl-0{margin-left:0!important}.ms-xxl-1{margin-left:.25rem!important}.ms-xxl-2{margin-left:.5rem!important}.ms-xxl-3{margin-left:1rem!important}.ms-xxl-4{margin-left:1.5rem!important}.ms-xxl-5{margin-left:3rem!important}.ms-xxl-auto{margin-left:auto!important}.p-xxl-0{padding:0!important}.p-xxl-1{padding:.25rem!important}.p-xxl-2{padding:.5rem!important}.p-xxl-3{padding:1rem!important}.p-xxl-4{padding:1.5rem!important}.p-xxl-5{padding:3rem!important}.px-xxl-0{padding-right:0!important;padding-left:0!important}.px-xxl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xxl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xxl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xxl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xxl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xxl-0{padding-top:0!important;padding-bottom:0!important}.py-xxl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xxl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xxl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xxl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xxl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xxl-0{padding-top:0!important}.pt-xxl-1{padding-top:.25rem!important}.pt-xxl-2{padding-top:.5rem!important}.pt-xxl-3{padding-top:1rem!important}.pt-xxl-4{padding-top:1.5rem!important}.pt-xxl-5{padding-top:3rem!important}.pe-xxl-0{padding-right:0!important}.pe-xxl-1{padding-right:.25rem!important}.pe-xxl-2{padding-right:.5rem!important}.pe-xxl-3{padding-right:1rem!important}.pe-xxl-4{padding-right:1.5rem!important}.pe-xxl-5{padding-right:3rem!important}.pb-xxl-0{padding-bottom:0!important}.pb-xxl-1{padding-bottom:.25rem!important}.pb-xxl-2{padding-bottom:.5rem!important}.pb-xxl-3{padding-bottom:1rem!important}.pb-xxl-4{padding-bottom:1.5rem!important}.pb-xxl-5{padding-bottom:3rem!important}.ps-xxl-0{padding-left:0!important}.ps-xxl-1{padding-left:.25rem!important}.ps-xxl-2{padding-left:.5rem!important}.ps-xxl-3{padding-left:1rem!important}.ps-xxl-4{padding-left:1.5rem!important}.ps-xxl-5{padding-left:3rem!important}.text-xxl-start{text-align:left!important}.text-xxl-end{text-align:right!important}.text-xxl-center{text-align:center!important}}@media (min-width:1200px){.fs-1{font-size:2.5rem!important}.fs-2{font-size:2rem!important}.fs-3{font-size:1.75rem!important}.fs-4{font-size:1.5rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}}@font-face{font-family:bootstrap-icons;src:url(../fonts/bootstrap-icons.c5787b4a.woff2) format("woff2"),url(../fonts/bootstrap-icons.b0dc2193.woff) format("woff")}.bi:before,[class*=" bi-"]:before,[class^=bi-]:before{display:inline-block;font-family:bootstrap-icons!important;font-style:normal;font-weight:400!important;font-variant:normal;text-transform:none;line-height:1;vertical-align:-.125em;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.bi-123:before{content:"\f67f"}.bi-alarm-fill:before{content:"\f101"}.bi-alarm:before{content:"\f102"}.bi-align-bottom:before{content:"\f103"}.bi-align-center:before{content:"\f104"}.bi-align-end:before{content:"\f105"}.bi-align-middle:before{content:"\f106"}.bi-align-start:before{content:"\f107"}.bi-align-top:before{content:"\f108"}.bi-alt:before{content:"\f109"}.bi-app-indicator:before{content:"\f10a"}.bi-app:before{content:"\f10b"}.bi-archive-fill:before{content:"\f10c"}.bi-archive:before{content:"\f10d"}.bi-arrow-90deg-down:before{content:"\f10e"}.bi-arrow-90deg-left:before{content:"\f10f"}.bi-arrow-90deg-right:before{content:"\f110"}.bi-arrow-90deg-up:before{content:"\f111"}.bi-arrow-bar-down:before{content:"\f112"}.bi-arrow-bar-left:before{content:"\f113"}.bi-arrow-bar-right:before{content:"\f114"}.bi-arrow-bar-up:before{content:"\f115"}.bi-arrow-clockwise:before{content:"\f116"}.bi-arrow-counterclockwise:before{content:"\f117"}.bi-arrow-down-circle-fill:before{content:"\f118"}.bi-arrow-down-circle:before{content:"\f119"}.bi-arrow-down-left-circle-fill:before{content:"\f11a"}.bi-arrow-down-left-circle:before{content:"\f11b"}.bi-arrow-down-left-square-fill:before{content:"\f11c"}.bi-arrow-down-left-square:before{content:"\f11d"}.bi-arrow-down-left:before{content:"\f11e"}.bi-arrow-down-right-circle-fill:before{content:"\f11f"}.bi-arrow-down-right-circle:before{content:"\f120"}.bi-arrow-down-right-square-fill:before{content:"\f121"}.bi-arrow-down-right-square:before{content:"\f122"}.bi-arrow-down-right:before{content:"\f123"}.bi-arrow-down-short:before{content:"\f124"}.bi-arrow-down-square-fill:before{content:"\f125"}.bi-arrow-down-square:before{content:"\f126"}.bi-arrow-down-up:before{content:"\f127"}.bi-arrow-down:before{content:"\f128"}.bi-arrow-left-circle-fill:before{content:"\f129"}.bi-arrow-left-circle:before{content:"\f12a"}.bi-arrow-left-right:before{content:"\f12b"}.bi-arrow-left-short:before{content:"\f12c"}.bi-arrow-left-square-fill:before{content:"\f12d"}.bi-arrow-left-square:before{content:"\f12e"}.bi-arrow-left:before{content:"\f12f"}.bi-arrow-repeat:before{content:"\f130"}.bi-arrow-return-left:before{content:"\f131"}.bi-arrow-return-right:before{content:"\f132"}.bi-arrow-right-circle-fill:before{content:"\f133"}.bi-arrow-right-circle:before{content:"\f134"}.bi-arrow-right-short:before{content:"\f135"}.bi-arrow-right-square-fill:before{content:"\f136"}.bi-arrow-right-square:before{content:"\f137"}.bi-arrow-right:before{content:"\f138"}.bi-arrow-up-circle-fill:before{content:"\f139"}.bi-arrow-up-circle:before{content:"\f13a"}.bi-arrow-up-left-circle-fill:before{content:"\f13b"}.bi-arrow-up-left-circle:before{content:"\f13c"}.bi-arrow-up-left-square-fill:before{content:"\f13d"}.bi-arrow-up-left-square:before{content:"\f13e"}.bi-arrow-up-left:before{content:"\f13f"}.bi-arrow-up-right-circle-fill:before{content:"\f140"}.bi-arrow-up-right-circle:before{content:"\f141"}.bi-arrow-up-right-square-fill:before{content:"\f142"}.bi-arrow-up-right-square:before{content:"\f143"}.bi-arrow-up-right:before{content:"\f144"}.bi-arrow-up-short:before{content:"\f145"}.bi-arrow-up-square-fill:before{content:"\f146"}.bi-arrow-up-square:before{content:"\f147"}.bi-arrow-up:before{content:"\f148"}.bi-arrows-angle-contract:before{content:"\f149"}.bi-arrows-angle-expand:before{content:"\f14a"}.bi-arrows-collapse:before{content:"\f14b"}.bi-arrows-expand:before{content:"\f14c"}.bi-arrows-fullscreen:before{content:"\f14d"}.bi-arrows-move:before{content:"\f14e"}.bi-aspect-ratio-fill:before{content:"\f14f"}.bi-aspect-ratio:before{content:"\f150"}.bi-asterisk:before{content:"\f151"}.bi-at:before{content:"\f152"}.bi-award-fill:before{content:"\f153"}.bi-award:before{content:"\f154"}.bi-back:before{content:"\f155"}.bi-backspace-fill:before{content:"\f156"}.bi-backspace-reverse-fill:before{content:"\f157"}.bi-backspace-reverse:before{content:"\f158"}.bi-backspace:before{content:"\f159"}.bi-badge-3d-fill:before{content:"\f15a"}.bi-badge-3d:before{content:"\f15b"}.bi-badge-4k-fill:before{content:"\f15c"}.bi-badge-4k:before{content:"\f15d"}.bi-badge-8k-fill:before{content:"\f15e"}.bi-badge-8k:before{content:"\f15f"}.bi-badge-ad-fill:before{content:"\f160"}.bi-badge-ad:before{content:"\f161"}.bi-badge-ar-fill:before{content:"\f162"}.bi-badge-ar:before{content:"\f163"}.bi-badge-cc-fill:before{content:"\f164"}.bi-badge-cc:before{content:"\f165"}.bi-badge-hd-fill:before{content:"\f166"}.bi-badge-hd:before{content:"\f167"}.bi-badge-tm-fill:before{content:"\f168"}.bi-badge-tm:before{content:"\f169"}.bi-badge-vo-fill:before{content:"\f16a"}.bi-badge-vo:before{content:"\f16b"}.bi-badge-vr-fill:before{content:"\f16c"}.bi-badge-vr:before{content:"\f16d"}.bi-badge-wc-fill:before{content:"\f16e"}.bi-badge-wc:before{content:"\f16f"}.bi-bag-check-fill:before{content:"\f170"}.bi-bag-check:before{content:"\f171"}.bi-bag-dash-fill:before{content:"\f172"}.bi-bag-dash:before{content:"\f173"}.bi-bag-fill:before{content:"\f174"}.bi-bag-plus-fill:before{content:"\f175"}.bi-bag-plus:before{content:"\f176"}.bi-bag-x-fill:before{content:"\f177"}.bi-bag-x:before{content:"\f178"}.bi-bag:before{content:"\f179"}.bi-bar-chart-fill:before{content:"\f17a"}.bi-bar-chart-line-fill:before{content:"\f17b"}.bi-bar-chart-line:before{content:"\f17c"}.bi-bar-chart-steps:before{content:"\f17d"}.bi-bar-chart:before{content:"\f17e"}.bi-basket-fill:before{content:"\f17f"}.bi-basket:before{content:"\f180"}.bi-basket2-fill:before{content:"\f181"}.bi-basket2:before{content:"\f182"}.bi-basket3-fill:before{content:"\f183"}.bi-basket3:before{content:"\f184"}.bi-battery-charging:before{content:"\f185"}.bi-battery-full:before{content:"\f186"}.bi-battery-half:before{content:"\f187"}.bi-battery:before{content:"\f188"}.bi-bell-fill:before{content:"\f189"}.bi-bell:before{content:"\f18a"}.bi-bezier:before{content:"\f18b"}.bi-bezier2:before{content:"\f18c"}.bi-bicycle:before{content:"\f18d"}.bi-binoculars-fill:before{content:"\f18e"}.bi-binoculars:before{content:"\f18f"}.bi-blockquote-left:before{content:"\f190"}.bi-blockquote-right:before{content:"\f191"}.bi-book-fill:before{content:"\f192"}.bi-book-half:before{content:"\f193"}.bi-book:before{content:"\f194"}.bi-bookmark-check-fill:before{content:"\f195"}.bi-bookmark-check:before{content:"\f196"}.bi-bookmark-dash-fill:before{content:"\f197"}.bi-bookmark-dash:before{content:"\f198"}.bi-bookmark-fill:before{content:"\f199"}.bi-bookmark-heart-fill:before{content:"\f19a"}.bi-bookmark-heart:before{content:"\f19b"}.bi-bookmark-plus-fill:before{content:"\f19c"}.bi-bookmark-plus:before{content:"\f19d"}.bi-bookmark-star-fill:before{content:"\f19e"}.bi-bookmark-star:before{content:"\f19f"}.bi-bookmark-x-fill:before{content:"\f1a0"}.bi-bookmark-x:before{content:"\f1a1"}.bi-bookmark:before{content:"\f1a2"}.bi-bookmarks-fill:before{content:"\f1a3"}.bi-bookmarks:before{content:"\f1a4"}.bi-bookshelf:before{content:"\f1a5"}.bi-bootstrap-fill:before{content:"\f1a6"}.bi-bootstrap-reboot:before{content:"\f1a7"}.bi-bootstrap:before{content:"\f1a8"}.bi-border-all:before{content:"\f1a9"}.bi-border-bottom:before{content:"\f1aa"}.bi-border-center:before{content:"\f1ab"}.bi-border-inner:before{content:"\f1ac"}.bi-border-left:before{content:"\f1ad"}.bi-border-middle:before{content:"\f1ae"}.bi-border-outer:before{content:"\f1af"}.bi-border-right:before{content:"\f1b0"}.bi-border-style:before{content:"\f1b1"}.bi-border-top:before{content:"\f1b2"}.bi-border-width:before{content:"\f1b3"}.bi-border:before{content:"\f1b4"}.bi-bounding-box-circles:before{content:"\f1b5"}.bi-bounding-box:before{content:"\f1b6"}.bi-box-arrow-down-left:before{content:"\f1b7"}.bi-box-arrow-down-right:before{content:"\f1b8"}.bi-box-arrow-down:before{content:"\f1b9"}.bi-box-arrow-in-down-left:before{content:"\f1ba"}.bi-box-arrow-in-down-right:before{content:"\f1bb"}.bi-box-arrow-in-down:before{content:"\f1bc"}.bi-box-arrow-in-left:before{content:"\f1bd"}.bi-box-arrow-in-right:before{content:"\f1be"}.bi-box-arrow-in-up-left:before{content:"\f1bf"}.bi-box-arrow-in-up-right:before{content:"\f1c0"}.bi-box-arrow-in-up:before{content:"\f1c1"}.bi-box-arrow-left:before{content:"\f1c2"}.bi-box-arrow-right:before{content:"\f1c3"}.bi-box-arrow-up-left:before{content:"\f1c4"}.bi-box-arrow-up-right:before{content:"\f1c5"}.bi-box-arrow-up:before{content:"\f1c6"}.bi-box-seam:before{content:"\f1c7"}.bi-box:before{content:"\f1c8"}.bi-braces:before{content:"\f1c9"}.bi-bricks:before{content:"\f1ca"}.bi-briefcase-fill:before{content:"\f1cb"}.bi-briefcase:before{content:"\f1cc"}.bi-brightness-alt-high-fill:before{content:"\f1cd"}.bi-brightness-alt-high:before{content:"\f1ce"}.bi-brightness-alt-low-fill:before{content:"\f1cf"}.bi-brightness-alt-low:before{content:"\f1d0"}.bi-brightness-high-fill:before{content:"\f1d1"}.bi-brightness-high:before{content:"\f1d2"}.bi-brightness-low-fill:before{content:"\f1d3"}.bi-brightness-low:before{content:"\f1d4"}.bi-broadcast-pin:before{content:"\f1d5"}.bi-broadcast:before{content:"\f1d6"}.bi-brush-fill:before{content:"\f1d7"}.bi-brush:before{content:"\f1d8"}.bi-bucket-fill:before{content:"\f1d9"}.bi-bucket:before{content:"\f1da"}.bi-bug-fill:before{content:"\f1db"}.bi-bug:before{content:"\f1dc"}.bi-building:before{content:"\f1dd"}.bi-bullseye:before{content:"\f1de"}.bi-calculator-fill:before{content:"\f1df"}.bi-calculator:before{content:"\f1e0"}.bi-calendar-check-fill:before{content:"\f1e1"}.bi-calendar-check:before{content:"\f1e2"}.bi-calendar-date-fill:before{content:"\f1e3"}.bi-calendar-date:before{content:"\f1e4"}.bi-calendar-day-fill:before{content:"\f1e5"}.bi-calendar-day:before{content:"\f1e6"}.bi-calendar-event-fill:before{content:"\f1e7"}.bi-calendar-event:before{content:"\f1e8"}.bi-calendar-fill:before{content:"\f1e9"}.bi-calendar-minus-fill:before{content:"\f1ea"}.bi-calendar-minus:before{content:"\f1eb"}.bi-calendar-month-fill:before{content:"\f1ec"}.bi-calendar-month:before{content:"\f1ed"}.bi-calendar-plus-fill:before{content:"\f1ee"}.bi-calendar-plus:before{content:"\f1ef"}.bi-calendar-range-fill:before{content:"\f1f0"}.bi-calendar-range:before{content:"\f1f1"}.bi-calendar-week-fill:before{content:"\f1f2"}.bi-calendar-week:before{content:"\f1f3"}.bi-calendar-x-fill:before{content:"\f1f4"}.bi-calendar-x:before{content:"\f1f5"}.bi-calendar:before{content:"\f1f6"}.bi-calendar2-check-fill:before{content:"\f1f7"}.bi-calendar2-check:before{content:"\f1f8"}.bi-calendar2-date-fill:before{content:"\f1f9"}.bi-calendar2-date:before{content:"\f1fa"}.bi-calendar2-day-fill:before{content:"\f1fb"}.bi-calendar2-day:before{content:"\f1fc"}.bi-calendar2-event-fill:before{content:"\f1fd"}.bi-calendar2-event:before{content:"\f1fe"}.bi-calendar2-fill:before{content:"\f1ff"}.bi-calendar2-minus-fill:before{content:"\f200"}.bi-calendar2-minus:before{content:"\f201"}.bi-calendar2-month-fill:before{content:"\f202"}.bi-calendar2-month:before{content:"\f203"}.bi-calendar2-plus-fill:before{content:"\f204"}.bi-calendar2-plus:before{content:"\f205"}.bi-calendar2-range-fill:before{content:"\f206"}.bi-calendar2-range:before{content:"\f207"}.bi-calendar2-week-fill:before{content:"\f208"}.bi-calendar2-week:before{content:"\f209"}.bi-calendar2-x-fill:before{content:"\f20a"}.bi-calendar2-x:before{content:"\f20b"}.bi-calendar2:before{content:"\f20c"}.bi-calendar3-event-fill:before{content:"\f20d"}.bi-calendar3-event:before{content:"\f20e"}.bi-calendar3-fill:before{content:"\f20f"}.bi-calendar3-range-fill:before{content:"\f210"}.bi-calendar3-range:before{content:"\f211"}.bi-calendar3-week-fill:before{content:"\f212"}.bi-calendar3-week:before{content:"\f213"}.bi-calendar3:before{content:"\f214"}.bi-calendar4-event:before{content:"\f215"}.bi-calendar4-range:before{content:"\f216"}.bi-calendar4-week:before{content:"\f217"}.bi-calendar4:before{content:"\f218"}.bi-camera-fill:before{content:"\f219"}.bi-camera-reels-fill:before{content:"\f21a"}.bi-camera-reels:before{content:"\f21b"}.bi-camera-video-fill:before{content:"\f21c"}.bi-camera-video-off-fill:before{content:"\f21d"}.bi-camera-video-off:before{content:"\f21e"}.bi-camera-video:before{content:"\f21f"}.bi-camera:before{content:"\f220"}.bi-camera2:before{content:"\f221"}.bi-capslock-fill:before{content:"\f222"}.bi-capslock:before{content:"\f223"}.bi-card-checklist:before{content:"\f224"}.bi-card-heading:before{content:"\f225"}.bi-card-image:before{content:"\f226"}.bi-card-list:before{content:"\f227"}.bi-card-text:before{content:"\f228"}.bi-caret-down-fill:before{content:"\f229"}.bi-caret-down-square-fill:before{content:"\f22a"}.bi-caret-down-square:before{content:"\f22b"}.bi-caret-down:before{content:"\f22c"}.bi-caret-left-fill:before{content:"\f22d"}.bi-caret-left-square-fill:before{content:"\f22e"}.bi-caret-left-square:before{content:"\f22f"}.bi-caret-left:before{content:"\f230"}.bi-caret-right-fill:before{content:"\f231"}.bi-caret-right-square-fill:before{content:"\f232"}.bi-caret-right-square:before{content:"\f233"}.bi-caret-right:before{content:"\f234"}.bi-caret-up-fill:before{content:"\f235"}.bi-caret-up-square-fill:before{content:"\f236"}.bi-caret-up-square:before{content:"\f237"}.bi-caret-up:before{content:"\f238"}.bi-cart-check-fill:before{content:"\f239"}.bi-cart-check:before{content:"\f23a"}.bi-cart-dash-fill:before{content:"\f23b"}.bi-cart-dash:before{content:"\f23c"}.bi-cart-fill:before{content:"\f23d"}.bi-cart-plus-fill:before{content:"\f23e"}.bi-cart-plus:before{content:"\f23f"}.bi-cart-x-fill:before{content:"\f240"}.bi-cart-x:before{content:"\f241"}.bi-cart:before{content:"\f242"}.bi-cart2:before{content:"\f243"}.bi-cart3:before{content:"\f244"}.bi-cart4:before{content:"\f245"}.bi-cash-stack:before{content:"\f246"}.bi-cash:before{content:"\f247"}.bi-cast:before{content:"\f248"}.bi-chat-dots-fill:before{content:"\f249"}.bi-chat-dots:before{content:"\f24a"}.bi-chat-fill:before{content:"\f24b"}.bi-chat-left-dots-fill:before{content:"\f24c"}.bi-chat-left-dots:before{content:"\f24d"}.bi-chat-left-fill:before{content:"\f24e"}.bi-chat-left-quote-fill:before{content:"\f24f"}.bi-chat-left-quote:before{content:"\f250"}.bi-chat-left-text-fill:before{content:"\f251"}.bi-chat-left-text:before{content:"\f252"}.bi-chat-left:before{content:"\f253"}.bi-chat-quote-fill:before{content:"\f254"}.bi-chat-quote:before{content:"\f255"}.bi-chat-right-dots-fill:before{content:"\f256"}.bi-chat-right-dots:before{content:"\f257"}.bi-chat-right-fill:before{content:"\f258"}.bi-chat-right-quote-fill:before{content:"\f259"}.bi-chat-right-quote:before{content:"\f25a"}.bi-chat-right-text-fill:before{content:"\f25b"}.bi-chat-right-text:before{content:"\f25c"}.bi-chat-right:before{content:"\f25d"}.bi-chat-square-dots-fill:before{content:"\f25e"}.bi-chat-square-dots:before{content:"\f25f"}.bi-chat-square-fill:before{content:"\f260"}.bi-chat-square-quote-fill:before{content:"\f261"}.bi-chat-square-quote:before{content:"\f262"}.bi-chat-square-text-fill:before{content:"\f263"}.bi-chat-square-text:before{content:"\f264"}.bi-chat-square:before{content:"\f265"}.bi-chat-text-fill:before{content:"\f266"}.bi-chat-text:before{content:"\f267"}.bi-chat:before{content:"\f268"}.bi-check-all:before{content:"\f269"}.bi-check-circle-fill:before{content:"\f26a"}.bi-check-circle:before{content:"\f26b"}.bi-check-square-fill:before{content:"\f26c"}.bi-check-square:before{content:"\f26d"}.bi-check:before{content:"\f26e"}.bi-check2-all:before{content:"\f26f"}.bi-check2-circle:before{content:"\f270"}.bi-check2-square:before{content:"\f271"}.bi-check2:before{content:"\f272"}.bi-chevron-bar-contract:before{content:"\f273"}.bi-chevron-bar-down:before{content:"\f274"}.bi-chevron-bar-expand:before{content:"\f275"}.bi-chevron-bar-left:before{content:"\f276"}.bi-chevron-bar-right:before{content:"\f277"}.bi-chevron-bar-up:before{content:"\f278"}.bi-chevron-compact-down:before{content:"\f279"}.bi-chevron-compact-left:before{content:"\f27a"}.bi-chevron-compact-right:before{content:"\f27b"}.bi-chevron-compact-up:before{content:"\f27c"}.bi-chevron-contract:before{content:"\f27d"}.bi-chevron-double-down:before{content:"\f27e"}.bi-chevron-double-left:before{content:"\f27f"}.bi-chevron-double-right:before{content:"\f280"}.bi-chevron-double-up:before{content:"\f281"}.bi-chevron-down:before{content:"\f282"}.bi-chevron-expand:before{content:"\f283"}.bi-chevron-left:before{content:"\f284"}.bi-chevron-right:before{content:"\f285"}.bi-chevron-up:before{content:"\f286"}.bi-circle-fill:before{content:"\f287"}.bi-circle-half:before{content:"\f288"}.bi-circle-square:before{content:"\f289"}.bi-circle:before{content:"\f28a"}.bi-clipboard-check:before{content:"\f28b"}.bi-clipboard-data:before{content:"\f28c"}.bi-clipboard-minus:before{content:"\f28d"}.bi-clipboard-plus:before{content:"\f28e"}.bi-clipboard-x:before{content:"\f28f"}.bi-clipboard:before{content:"\f290"}.bi-clock-fill:before{content:"\f291"}.bi-clock-history:before{content:"\f292"}.bi-clock:before{content:"\f293"}.bi-cloud-arrow-down-fill:before{content:"\f294"}.bi-cloud-arrow-down:before{content:"\f295"}.bi-cloud-arrow-up-fill:before{content:"\f296"}.bi-cloud-arrow-up:before{content:"\f297"}.bi-cloud-check-fill:before{content:"\f298"}.bi-cloud-check:before{content:"\f299"}.bi-cloud-download-fill:before{content:"\f29a"}.bi-cloud-download:before{content:"\f29b"}.bi-cloud-drizzle-fill:before{content:"\f29c"}.bi-cloud-drizzle:before{content:"\f29d"}.bi-cloud-fill:before{content:"\f29e"}.bi-cloud-fog-fill:before{content:"\f29f"}.bi-cloud-fog:before{content:"\f2a0"}.bi-cloud-fog2-fill:before{content:"\f2a1"}.bi-cloud-fog2:before{content:"\f2a2"}.bi-cloud-hail-fill:before{content:"\f2a3"}.bi-cloud-hail:before{content:"\f2a4"}.bi-cloud-haze-1:before{content:"\f2a5"}.bi-cloud-haze-fill:before{content:"\f2a6"}.bi-cloud-haze:before{content:"\f2a7"}.bi-cloud-haze2-fill:before{content:"\f2a8"}.bi-cloud-lightning-fill:before{content:"\f2a9"}.bi-cloud-lightning-rain-fill:before{content:"\f2aa"}.bi-cloud-lightning-rain:before{content:"\f2ab"}.bi-cloud-lightning:before{content:"\f2ac"}.bi-cloud-minus-fill:before{content:"\f2ad"}.bi-cloud-minus:before{content:"\f2ae"}.bi-cloud-moon-fill:before{content:"\f2af"}.bi-cloud-moon:before{content:"\f2b0"}.bi-cloud-plus-fill:before{content:"\f2b1"}.bi-cloud-plus:before{content:"\f2b2"}.bi-cloud-rain-fill:before{content:"\f2b3"}.bi-cloud-rain-heavy-fill:before{content:"\f2b4"}.bi-cloud-rain-heavy:before{content:"\f2b5"}.bi-cloud-rain:before{content:"\f2b6"}.bi-cloud-slash-fill:before{content:"\f2b7"}.bi-cloud-slash:before{content:"\f2b8"}.bi-cloud-sleet-fill:before{content:"\f2b9"}.bi-cloud-sleet:before{content:"\f2ba"}.bi-cloud-snow-fill:before{content:"\f2bb"}.bi-cloud-snow:before{content:"\f2bc"}.bi-cloud-sun-fill:before{content:"\f2bd"}.bi-cloud-sun:before{content:"\f2be"}.bi-cloud-upload-fill:before{content:"\f2bf"}.bi-cloud-upload:before{content:"\f2c0"}.bi-cloud:before{content:"\f2c1"}.bi-clouds-fill:before{content:"\f2c2"}.bi-clouds:before{content:"\f2c3"}.bi-cloudy-fill:before{content:"\f2c4"}.bi-cloudy:before{content:"\f2c5"}.bi-code-slash:before{content:"\f2c6"}.bi-code-square:before{content:"\f2c7"}.bi-code:before{content:"\f2c8"}.bi-collection-fill:before{content:"\f2c9"}.bi-collection-play-fill:before{content:"\f2ca"}.bi-collection-play:before{content:"\f2cb"}.bi-collection:before{content:"\f2cc"}.bi-columns-gap:before{content:"\f2cd"}.bi-columns:before{content:"\f2ce"}.bi-command:before{content:"\f2cf"}.bi-compass-fill:before{content:"\f2d0"}.bi-compass:before{content:"\f2d1"}.bi-cone-striped:before{content:"\f2d2"}.bi-cone:before{content:"\f2d3"}.bi-controller:before{content:"\f2d4"}.bi-cpu-fill:before{content:"\f2d5"}.bi-cpu:before{content:"\f2d6"}.bi-credit-card-2-back-fill:before{content:"\f2d7"}.bi-credit-card-2-back:before{content:"\f2d8"}.bi-credit-card-2-front-fill:before{content:"\f2d9"}.bi-credit-card-2-front:before{content:"\f2da"}.bi-credit-card-fill:before{content:"\f2db"}.bi-credit-card:before{content:"\f2dc"}.bi-crop:before{content:"\f2dd"}.bi-cup-fill:before{content:"\f2de"}.bi-cup-straw:before{content:"\f2df"}.bi-cup:before{content:"\f2e0"}.bi-cursor-fill:before{content:"\f2e1"}.bi-cursor-text:before{content:"\f2e2"}.bi-cursor:before{content:"\f2e3"}.bi-dash-circle-dotted:before{content:"\f2e4"}.bi-dash-circle-fill:before{content:"\f2e5"}.bi-dash-circle:before{content:"\f2e6"}.bi-dash-square-dotted:before{content:"\f2e7"}.bi-dash-square-fill:before{content:"\f2e8"}.bi-dash-square:before{content:"\f2e9"}.bi-dash:before{content:"\f2ea"}.bi-diagram-2-fill:before{content:"\f2eb"}.bi-diagram-2:before{content:"\f2ec"}.bi-diagram-3-fill:before{content:"\f2ed"}.bi-diagram-3:before{content:"\f2ee"}.bi-diamond-fill:before{content:"\f2ef"}.bi-diamond-half:before{content:"\f2f0"}.bi-diamond:before{content:"\f2f1"}.bi-dice-1-fill:before{content:"\f2f2"}.bi-dice-1:before{content:"\f2f3"}.bi-dice-2-fill:before{content:"\f2f4"}.bi-dice-2:before{content:"\f2f5"}.bi-dice-3-fill:before{content:"\f2f6"}.bi-dice-3:before{content:"\f2f7"}.bi-dice-4-fill:before{content:"\f2f8"}.bi-dice-4:before{content:"\f2f9"}.bi-dice-5-fill:before{content:"\f2fa"}.bi-dice-5:before{content:"\f2fb"}.bi-dice-6-fill:before{content:"\f2fc"}.bi-dice-6:before{content:"\f2fd"}.bi-disc-fill:before{content:"\f2fe"}.bi-disc:before{content:"\f2ff"}.bi-discord:before{content:"\f300"}.bi-display-fill:before{content:"\f301"}.bi-display:before{content:"\f302"}.bi-distribute-horizontal:before{content:"\f303"}.bi-distribute-vertical:before{content:"\f304"}.bi-door-closed-fill:before{content:"\f305"}.bi-door-closed:before{content:"\f306"}.bi-door-open-fill:before{content:"\f307"}.bi-door-open:before{content:"\f308"}.bi-dot:before{content:"\f309"}.bi-download:before{content:"\f30a"}.bi-droplet-fill:before{content:"\f30b"}.bi-droplet-half:before{content:"\f30c"}.bi-droplet:before{content:"\f30d"}.bi-earbuds:before{content:"\f30e"}.bi-easel-fill:before{content:"\f30f"}.bi-easel:before{content:"\f310"}.bi-egg-fill:before{content:"\f311"}.bi-egg-fried:before{content:"\f312"}.bi-egg:before{content:"\f313"}.bi-eject-fill:before{content:"\f314"}.bi-eject:before{content:"\f315"}.bi-emoji-angry-fill:before{content:"\f316"}.bi-emoji-angry:before{content:"\f317"}.bi-emoji-dizzy-fill:before{content:"\f318"}.bi-emoji-dizzy:before{content:"\f319"}.bi-emoji-expressionless-fill:before{content:"\f31a"}.bi-emoji-expressionless:before{content:"\f31b"}.bi-emoji-frown-fill:before{content:"\f31c"}.bi-emoji-frown:before{content:"\f31d"}.bi-emoji-heart-eyes-fill:before{content:"\f31e"}.bi-emoji-heart-eyes:before{content:"\f31f"}.bi-emoji-laughing-fill:before{content:"\f320"}.bi-emoji-laughing:before{content:"\f321"}.bi-emoji-neutral-fill:before{content:"\f322"}.bi-emoji-neutral:before{content:"\f323"}.bi-emoji-smile-fill:before{content:"\f324"}.bi-emoji-smile-upside-down-fill:before{content:"\f325"}.bi-emoji-smile-upside-down:before{content:"\f326"}.bi-emoji-smile:before{content:"\f327"}.bi-emoji-sunglasses-fill:before{content:"\f328"}.bi-emoji-sunglasses:before{content:"\f329"}.bi-emoji-wink-fill:before{content:"\f32a"}.bi-emoji-wink:before{content:"\f32b"}.bi-envelope-fill:before{content:"\f32c"}.bi-envelope-open-fill:before{content:"\f32d"}.bi-envelope-open:before{content:"\f32e"}.bi-envelope:before{content:"\f32f"}.bi-eraser-fill:before{content:"\f330"}.bi-eraser:before{content:"\f331"}.bi-exclamation-circle-fill:before{content:"\f332"}.bi-exclamation-circle:before{content:"\f333"}.bi-exclamation-diamond-fill:before{content:"\f334"}.bi-exclamation-diamond:before{content:"\f335"}.bi-exclamation-octagon-fill:before{content:"\f336"}.bi-exclamation-octagon:before{content:"\f337"}.bi-exclamation-square-fill:before{content:"\f338"}.bi-exclamation-square:before{content:"\f339"}.bi-exclamation-triangle-fill:before{content:"\f33a"}.bi-exclamation-triangle:before{content:"\f33b"}.bi-exclamation:before{content:"\f33c"}.bi-exclude:before{content:"\f33d"}.bi-eye-fill:before{content:"\f33e"}.bi-eye-slash-fill:before{content:"\f33f"}.bi-eye-slash:before{content:"\f340"}.bi-eye:before{content:"\f341"}.bi-eyedropper:before{content:"\f342"}.bi-eyeglasses:before{content:"\f343"}.bi-facebook:before{content:"\f344"}.bi-file-arrow-down-fill:before{content:"\f345"}.bi-file-arrow-down:before{content:"\f346"}.bi-file-arrow-up-fill:before{content:"\f347"}.bi-file-arrow-up:before{content:"\f348"}.bi-file-bar-graph-fill:before{content:"\f349"}.bi-file-bar-graph:before{content:"\f34a"}.bi-file-binary-fill:before{content:"\f34b"}.bi-file-binary:before{content:"\f34c"}.bi-file-break-fill:before{content:"\f34d"}.bi-file-break:before{content:"\f34e"}.bi-file-check-fill:before{content:"\f34f"}.bi-file-check:before{content:"\f350"}.bi-file-code-fill:before{content:"\f351"}.bi-file-code:before{content:"\f352"}.bi-file-diff-fill:before{content:"\f353"}.bi-file-diff:before{content:"\f354"}.bi-file-earmark-arrow-down-fill:before{content:"\f355"}.bi-file-earmark-arrow-down:before{content:"\f356"}.bi-file-earmark-arrow-up-fill:before{content:"\f357"}.bi-file-earmark-arrow-up:before{content:"\f358"}.bi-file-earmark-bar-graph-fill:before{content:"\f359"}.bi-file-earmark-bar-graph:before{content:"\f35a"}.bi-file-earmark-binary-fill:before{content:"\f35b"}.bi-file-earmark-binary:before{content:"\f35c"}.bi-file-earmark-break-fill:before{content:"\f35d"}.bi-file-earmark-break:before{content:"\f35e"}.bi-file-earmark-check-fill:before{content:"\f35f"}.bi-file-earmark-check:before{content:"\f360"}.bi-file-earmark-code-fill:before{content:"\f361"}.bi-file-earmark-code:before{content:"\f362"}.bi-file-earmark-diff-fill:before{content:"\f363"}.bi-file-earmark-diff:before{content:"\f364"}.bi-file-earmark-easel-fill:before{content:"\f365"}.bi-file-earmark-easel:before{content:"\f366"}.bi-file-earmark-excel-fill:before{content:"\f367"}.bi-file-earmark-excel:before{content:"\f368"}.bi-file-earmark-fill:before{content:"\f369"}.bi-file-earmark-font-fill:before{content:"\f36a"}.bi-file-earmark-font:before{content:"\f36b"}.bi-file-earmark-image-fill:before{content:"\f36c"}.bi-file-earmark-image:before{content:"\f36d"}.bi-file-earmark-lock-fill:before{content:"\f36e"}.bi-file-earmark-lock:before{content:"\f36f"}.bi-file-earmark-lock2-fill:before{content:"\f370"}.bi-file-earmark-lock2:before{content:"\f371"}.bi-file-earmark-medical-fill:before{content:"\f372"}.bi-file-earmark-medical:before{content:"\f373"}.bi-file-earmark-minus-fill:before{content:"\f374"}.bi-file-earmark-minus:before{content:"\f375"}.bi-file-earmark-music-fill:before{content:"\f376"}.bi-file-earmark-music:before{content:"\f377"}.bi-file-earmark-person-fill:before{content:"\f378"}.bi-file-earmark-person:before{content:"\f379"}.bi-file-earmark-play-fill:before{content:"\f37a"}.bi-file-earmark-play:before{content:"\f37b"}.bi-file-earmark-plus-fill:before{content:"\f37c"}.bi-file-earmark-plus:before{content:"\f37d"}.bi-file-earmark-post-fill:before{content:"\f37e"}.bi-file-earmark-post:before{content:"\f37f"}.bi-file-earmark-ppt-fill:before{content:"\f380"}.bi-file-earmark-ppt:before{content:"\f381"}.bi-file-earmark-richtext-fill:before{content:"\f382"}.bi-file-earmark-richtext:before{content:"\f383"}.bi-file-earmark-ruled-fill:before{content:"\f384"}.bi-file-earmark-ruled:before{content:"\f385"}.bi-file-earmark-slides-fill:before{content:"\f386"}.bi-file-earmark-slides:before{content:"\f387"}.bi-file-earmark-spreadsheet-fill:before{content:"\f388"}.bi-file-earmark-spreadsheet:before{content:"\f389"}.bi-file-earmark-text-fill:before{content:"\f38a"}.bi-file-earmark-text:before{content:"\f38b"}.bi-file-earmark-word-fill:before{content:"\f38c"}.bi-file-earmark-word:before{content:"\f38d"}.bi-file-earmark-x-fill:before{content:"\f38e"}.bi-file-earmark-x:before{content:"\f38f"}.bi-file-earmark-zip-fill:before{content:"\f390"}.bi-file-earmark-zip:before{content:"\f391"}.bi-file-earmark:before{content:"\f392"}.bi-file-easel-fill:before{content:"\f393"}.bi-file-easel:before{content:"\f394"}.bi-file-excel-fill:before{content:"\f395"}.bi-file-excel:before{content:"\f396"}.bi-file-fill:before{content:"\f397"}.bi-file-font-fill:before{content:"\f398"}.bi-file-font:before{content:"\f399"}.bi-file-image-fill:before{content:"\f39a"}.bi-file-image:before{content:"\f39b"}.bi-file-lock-fill:before{content:"\f39c"}.bi-file-lock:before{content:"\f39d"}.bi-file-lock2-fill:before{content:"\f39e"}.bi-file-lock2:before{content:"\f39f"}.bi-file-medical-fill:before{content:"\f3a0"}.bi-file-medical:before{content:"\f3a1"}.bi-file-minus-fill:before{content:"\f3a2"}.bi-file-minus:before{content:"\f3a3"}.bi-file-music-fill:before{content:"\f3a4"}.bi-file-music:before{content:"\f3a5"}.bi-file-person-fill:before{content:"\f3a6"}.bi-file-person:before{content:"\f3a7"}.bi-file-play-fill:before{content:"\f3a8"}.bi-file-play:before{content:"\f3a9"}.bi-file-plus-fill:before{content:"\f3aa"}.bi-file-plus:before{content:"\f3ab"}.bi-file-post-fill:before{content:"\f3ac"}.bi-file-post:before{content:"\f3ad"}.bi-file-ppt-fill:before{content:"\f3ae"}.bi-file-ppt:before{content:"\f3af"}.bi-file-richtext-fill:before{content:"\f3b0"}.bi-file-richtext:before{content:"\f3b1"}.bi-file-ruled-fill:before{content:"\f3b2"}.bi-file-ruled:before{content:"\f3b3"}.bi-file-slides-fill:before{content:"\f3b4"}.bi-file-slides:before{content:"\f3b5"}.bi-file-spreadsheet-fill:before{content:"\f3b6"}.bi-file-spreadsheet:before{content:"\f3b7"}.bi-file-text-fill:before{content:"\f3b8"}.bi-file-text:before{content:"\f3b9"}.bi-file-word-fill:before{content:"\f3ba"}.bi-file-word:before{content:"\f3bb"}.bi-file-x-fill:before{content:"\f3bc"}.bi-file-x:before{content:"\f3bd"}.bi-file-zip-fill:before{content:"\f3be"}.bi-file-zip:before{content:"\f3bf"}.bi-file:before{content:"\f3c0"}.bi-files-alt:before{content:"\f3c1"}.bi-files:before{content:"\f3c2"}.bi-film:before{content:"\f3c3"}.bi-filter-circle-fill:before{content:"\f3c4"}.bi-filter-circle:before{content:"\f3c5"}.bi-filter-left:before{content:"\f3c6"}.bi-filter-right:before{content:"\f3c7"}.bi-filter-square-fill:before{content:"\f3c8"}.bi-filter-square:before{content:"\f3c9"}.bi-filter:before{content:"\f3ca"}.bi-flag-fill:before{content:"\f3cb"}.bi-flag:before{content:"\f3cc"}.bi-flower1:before{content:"\f3cd"}.bi-flower2:before{content:"\f3ce"}.bi-flower3:before{content:"\f3cf"}.bi-folder-check:before{content:"\f3d0"}.bi-folder-fill:before{content:"\f3d1"}.bi-folder-minus:before{content:"\f3d2"}.bi-folder-plus:before{content:"\f3d3"}.bi-folder-symlink-fill:before{content:"\f3d4"}.bi-folder-symlink:before{content:"\f3d5"}.bi-folder-x:before{content:"\f3d6"}.bi-folder:before{content:"\f3d7"}.bi-folder2-open:before{content:"\f3d8"}.bi-folder2:before{content:"\f3d9"}.bi-fonts:before{content:"\f3da"}.bi-forward-fill:before{content:"\f3db"}.bi-forward:before{content:"\f3dc"}.bi-front:before{content:"\f3dd"}.bi-fullscreen-exit:before{content:"\f3de"}.bi-fullscreen:before{content:"\f3df"}.bi-funnel-fill:before{content:"\f3e0"}.bi-funnel:before{content:"\f3e1"}.bi-gear-fill:before{content:"\f3e2"}.bi-gear-wide-connected:before{content:"\f3e3"}.bi-gear-wide:before{content:"\f3e4"}.bi-gear:before{content:"\f3e5"}.bi-gem:before{content:"\f3e6"}.bi-geo-alt-fill:before{content:"\f3e7"}.bi-geo-alt:before{content:"\f3e8"}.bi-geo-fill:before{content:"\f3e9"}.bi-geo:before{content:"\f3ea"}.bi-gift-fill:before{content:"\f3eb"}.bi-gift:before{content:"\f3ec"}.bi-github:before{content:"\f3ed"}.bi-globe:before{content:"\f3ee"}.bi-globe2:before{content:"\f3ef"}.bi-google:before{content:"\f3f0"}.bi-graph-down:before{content:"\f3f1"}.bi-graph-up:before{content:"\f3f2"}.bi-grid-1x2-fill:before{content:"\f3f3"}.bi-grid-1x2:before{content:"\f3f4"}.bi-grid-3x2-gap-fill:before{content:"\f3f5"}.bi-grid-3x2-gap:before{content:"\f3f6"}.bi-grid-3x2:before{content:"\f3f7"}.bi-grid-3x3-gap-fill:before{content:"\f3f8"}.bi-grid-3x3-gap:before{content:"\f3f9"}.bi-grid-3x3:before{content:"\f3fa"}.bi-grid-fill:before{content:"\f3fb"}.bi-grid:before{content:"\f3fc"}.bi-grip-horizontal:before{content:"\f3fd"}.bi-grip-vertical:before{content:"\f3fe"}.bi-hammer:before{content:"\f3ff"}.bi-hand-index-fill:before{content:"\f400"}.bi-hand-index-thumb-fill:before{content:"\f401"}.bi-hand-index-thumb:before{content:"\f402"}.bi-hand-index:before{content:"\f403"}.bi-hand-thumbs-down-fill:before{content:"\f404"}.bi-hand-thumbs-down:before{content:"\f405"}.bi-hand-thumbs-up-fill:before{content:"\f406"}.bi-hand-thumbs-up:before{content:"\f407"}.bi-handbag-fill:before{content:"\f408"}.bi-handbag:before{content:"\f409"}.bi-hash:before{content:"\f40a"}.bi-hdd-fill:before{content:"\f40b"}.bi-hdd-network-fill:before{content:"\f40c"}.bi-hdd-network:before{content:"\f40d"}.bi-hdd-rack-fill:before{content:"\f40e"}.bi-hdd-rack:before{content:"\f40f"}.bi-hdd-stack-fill:before{content:"\f410"}.bi-hdd-stack:before{content:"\f411"}.bi-hdd:before{content:"\f412"}.bi-headphones:before{content:"\f413"}.bi-headset:before{content:"\f414"}.bi-heart-fill:before{content:"\f415"}.bi-heart-half:before{content:"\f416"}.bi-heart:before{content:"\f417"}.bi-heptagon-fill:before{content:"\f418"}.bi-heptagon-half:before{content:"\f419"}.bi-heptagon:before{content:"\f41a"}.bi-hexagon-fill:before{content:"\f41b"}.bi-hexagon-half:before{content:"\f41c"}.bi-hexagon:before{content:"\f41d"}.bi-hourglass-bottom:before{content:"\f41e"}.bi-hourglass-split:before{content:"\f41f"}.bi-hourglass-top:before{content:"\f420"}.bi-hourglass:before{content:"\f421"}.bi-house-door-fill:before{content:"\f422"}.bi-house-door:before{content:"\f423"}.bi-house-fill:before{content:"\f424"}.bi-house:before{content:"\f425"}.bi-hr:before{content:"\f426"}.bi-hurricane:before{content:"\f427"}.bi-image-alt:before{content:"\f428"}.bi-image-fill:before{content:"\f429"}.bi-image:before{content:"\f42a"}.bi-images:before{content:"\f42b"}.bi-inbox-fill:before{content:"\f42c"}.bi-inbox:before{content:"\f42d"}.bi-inboxes-fill:before{content:"\f42e"}.bi-inboxes:before{content:"\f42f"}.bi-info-circle-fill:before{content:"\f430"}.bi-info-circle:before{content:"\f431"}.bi-info-square-fill:before{content:"\f432"}.bi-info-square:before{content:"\f433"}.bi-info:before{content:"\f434"}.bi-input-cursor-text:before{content:"\f435"}.bi-input-cursor:before{content:"\f436"}.bi-instagram:before{content:"\f437"}.bi-intersect:before{content:"\f438"}.bi-journal-album:before{content:"\f439"}.bi-journal-arrow-down:before{content:"\f43a"}.bi-journal-arrow-up:before{content:"\f43b"}.bi-journal-bookmark-fill:before{content:"\f43c"}.bi-journal-bookmark:before{content:"\f43d"}.bi-journal-check:before{content:"\f43e"}.bi-journal-code:before{content:"\f43f"}.bi-journal-medical:before{content:"\f440"}.bi-journal-minus:before{content:"\f441"}.bi-journal-plus:before{content:"\f442"}.bi-journal-richtext:before{content:"\f443"}.bi-journal-text:before{content:"\f444"}.bi-journal-x:before{content:"\f445"}.bi-journal:before{content:"\f446"}.bi-journals:before{content:"\f447"}.bi-joystick:before{content:"\f448"}.bi-justify-left:before{content:"\f449"}.bi-justify-right:before{content:"\f44a"}.bi-justify:before{content:"\f44b"}.bi-kanban-fill:before{content:"\f44c"}.bi-kanban:before{content:"\f44d"}.bi-key-fill:before{content:"\f44e"}.bi-key:before{content:"\f44f"}.bi-keyboard-fill:before{content:"\f450"}.bi-keyboard:before{content:"\f451"}.bi-ladder:before{content:"\f452"}.bi-lamp-fill:before{content:"\f453"}.bi-lamp:before{content:"\f454"}.bi-laptop-fill:before{content:"\f455"}.bi-laptop:before{content:"\f456"}.bi-layer-backward:before{content:"\f457"}.bi-layer-forward:before{content:"\f458"}.bi-layers-fill:before{content:"\f459"}.bi-layers-half:before{content:"\f45a"}.bi-layers:before{content:"\f45b"}.bi-layout-sidebar-inset-reverse:before{content:"\f45c"}.bi-layout-sidebar-inset:before{content:"\f45d"}.bi-layout-sidebar-reverse:before{content:"\f45e"}.bi-layout-sidebar:before{content:"\f45f"}.bi-layout-split:before{content:"\f460"}.bi-layout-text-sidebar-reverse:before{content:"\f461"}.bi-layout-text-sidebar:before{content:"\f462"}.bi-layout-text-window-reverse:before{content:"\f463"}.bi-layout-text-window:before{content:"\f464"}.bi-layout-three-columns:before{content:"\f465"}.bi-layout-wtf:before{content:"\f466"}.bi-life-preserver:before{content:"\f467"}.bi-lightbulb-fill:before{content:"\f468"}.bi-lightbulb-off-fill:before{content:"\f469"}.bi-lightbulb-off:before{content:"\f46a"}.bi-lightbulb:before{content:"\f46b"}.bi-lightning-charge-fill:before{content:"\f46c"}.bi-lightning-charge:before{content:"\f46d"}.bi-lightning-fill:before{content:"\f46e"}.bi-lightning:before{content:"\f46f"}.bi-link-45deg:before{content:"\f470"}.bi-link:before{content:"\f471"}.bi-linkedin:before{content:"\f472"}.bi-list-check:before{content:"\f473"}.bi-list-nested:before{content:"\f474"}.bi-list-ol:before{content:"\f475"}.bi-list-stars:before{content:"\f476"}.bi-list-task:before{content:"\f477"}.bi-list-ul:before{content:"\f478"}.bi-list:before{content:"\f479"}.bi-lock-fill:before{content:"\f47a"}.bi-lock:before{content:"\f47b"}.bi-mailbox:before{content:"\f47c"}.bi-mailbox2:before{content:"\f47d"}.bi-map-fill:before{content:"\f47e"}.bi-map:before{content:"\f47f"}.bi-markdown-fill:before{content:"\f480"}.bi-markdown:before{content:"\f481"}.bi-mask:before{content:"\f482"}.bi-megaphone-fill:before{content:"\f483"}.bi-megaphone:before{content:"\f484"}.bi-menu-app-fill:before{content:"\f485"}.bi-menu-app:before{content:"\f486"}.bi-menu-button-fill:before{content:"\f487"}.bi-menu-button-wide-fill:before{content:"\f488"}.bi-menu-button-wide:before{content:"\f489"}.bi-menu-button:before{content:"\f48a"}.bi-menu-down:before{content:"\f48b"}.bi-menu-up:before{content:"\f48c"}.bi-mic-fill:before{content:"\f48d"}.bi-mic-mute-fill:before{content:"\f48e"}.bi-mic-mute:before{content:"\f48f"}.bi-mic:before{content:"\f490"}.bi-minecart-loaded:before{content:"\f491"}.bi-minecart:before{content:"\f492"}.bi-moisture:before{content:"\f493"}.bi-moon-fill:before{content:"\f494"}.bi-moon-stars-fill:before{content:"\f495"}.bi-moon-stars:before{content:"\f496"}.bi-moon:before{content:"\f497"}.bi-mouse-fill:before{content:"\f498"}.bi-mouse:before{content:"\f499"}.bi-mouse2-fill:before{content:"\f49a"}.bi-mouse2:before{content:"\f49b"}.bi-mouse3-fill:before{content:"\f49c"}.bi-mouse3:before{content:"\f49d"}.bi-music-note-beamed:before{content:"\f49e"}.bi-music-note-list:before{content:"\f49f"}.bi-music-note:before{content:"\f4a0"}.bi-music-player-fill:before{content:"\f4a1"}.bi-music-player:before{content:"\f4a2"}.bi-newspaper:before{content:"\f4a3"}.bi-node-minus-fill:before{content:"\f4a4"}.bi-node-minus:before{content:"\f4a5"}.bi-node-plus-fill:before{content:"\f4a6"}.bi-node-plus:before{content:"\f4a7"}.bi-nut-fill:before{content:"\f4a8"}.bi-nut:before{content:"\f4a9"}.bi-octagon-fill:before{content:"\f4aa"}.bi-octagon-half:before{content:"\f4ab"}.bi-octagon:before{content:"\f4ac"}.bi-option:before{content:"\f4ad"}.bi-outlet:before{content:"\f4ae"}.bi-paint-bucket:before{content:"\f4af"}.bi-palette-fill:before{content:"\f4b0"}.bi-palette:before{content:"\f4b1"}.bi-palette2:before{content:"\f4b2"}.bi-paperclip:before{content:"\f4b3"}.bi-paragraph:before{content:"\f4b4"}.bi-patch-check-fill:before{content:"\f4b5"}.bi-patch-check:before{content:"\f4b6"}.bi-patch-exclamation-fill:before{content:"\f4b7"}.bi-patch-exclamation:before{content:"\f4b8"}.bi-patch-minus-fill:before{content:"\f4b9"}.bi-patch-minus:before{content:"\f4ba"}.bi-patch-plus-fill:before{content:"\f4bb"}.bi-patch-plus:before{content:"\f4bc"}.bi-patch-question-fill:before{content:"\f4bd"}.bi-patch-question:before{content:"\f4be"}.bi-pause-btn-fill:before{content:"\f4bf"}.bi-pause-btn:before{content:"\f4c0"}.bi-pause-circle-fill:before{content:"\f4c1"}.bi-pause-circle:before{content:"\f4c2"}.bi-pause-fill:before{content:"\f4c3"}.bi-pause:before{content:"\f4c4"}.bi-peace-fill:before{content:"\f4c5"}.bi-peace:before{content:"\f4c6"}.bi-pen-fill:before{content:"\f4c7"}.bi-pen:before{content:"\f4c8"}.bi-pencil-fill:before{content:"\f4c9"}.bi-pencil-square:before{content:"\f4ca"}.bi-pencil:before{content:"\f4cb"}.bi-pentagon-fill:before{content:"\f4cc"}.bi-pentagon-half:before{content:"\f4cd"}.bi-pentagon:before{content:"\f4ce"}.bi-people-fill:before{content:"\f4cf"}.bi-people:before{content:"\f4d0"}.bi-percent:before{content:"\f4d1"}.bi-person-badge-fill:before{content:"\f4d2"}.bi-person-badge:before{content:"\f4d3"}.bi-person-bounding-box:before{content:"\f4d4"}.bi-person-check-fill:before{content:"\f4d5"}.bi-person-check:before{content:"\f4d6"}.bi-person-circle:before{content:"\f4d7"}.bi-person-dash-fill:before{content:"\f4d8"}.bi-person-dash:before{content:"\f4d9"}.bi-person-fill:before{content:"\f4da"}.bi-person-lines-fill:before{content:"\f4db"}.bi-person-plus-fill:before{content:"\f4dc"}.bi-person-plus:before{content:"\f4dd"}.bi-person-square:before{content:"\f4de"}.bi-person-x-fill:before{content:"\f4df"}.bi-person-x:before{content:"\f4e0"}.bi-person:before{content:"\f4e1"}.bi-phone-fill:before{content:"\f4e2"}.bi-phone-landscape-fill:before{content:"\f4e3"}.bi-phone-landscape:before{content:"\f4e4"}.bi-phone-vibrate-fill:before{content:"\f4e5"}.bi-phone-vibrate:before{content:"\f4e6"}.bi-phone:before{content:"\f4e7"}.bi-pie-chart-fill:before{content:"\f4e8"}.bi-pie-chart:before{content:"\f4e9"}.bi-pin-angle-fill:before{content:"\f4ea"}.bi-pin-angle:before{content:"\f4eb"}.bi-pin-fill:before{content:"\f4ec"}.bi-pin:before{content:"\f4ed"}.bi-pip-fill:before{content:"\f4ee"}.bi-pip:before{content:"\f4ef"}.bi-play-btn-fill:before{content:"\f4f0"}.bi-play-btn:before{content:"\f4f1"}.bi-play-circle-fill:before{content:"\f4f2"}.bi-play-circle:before{content:"\f4f3"}.bi-play-fill:before{content:"\f4f4"}.bi-play:before{content:"\f4f5"}.bi-plug-fill:before{content:"\f4f6"}.bi-plug:before{content:"\f4f7"}.bi-plus-circle-dotted:before{content:"\f4f8"}.bi-plus-circle-fill:before{content:"\f4f9"}.bi-plus-circle:before{content:"\f4fa"}.bi-plus-square-dotted:before{content:"\f4fb"}.bi-plus-square-fill:before{content:"\f4fc"}.bi-plus-square:before{content:"\f4fd"}.bi-plus:before{content:"\f4fe"}.bi-power:before{content:"\f4ff"}.bi-printer-fill:before{content:"\f500"}.bi-printer:before{content:"\f501"}.bi-puzzle-fill:before{content:"\f502"}.bi-puzzle:before{content:"\f503"}.bi-question-circle-fill:before{content:"\f504"}.bi-question-circle:before{content:"\f505"}.bi-question-diamond-fill:before{content:"\f506"}.bi-question-diamond:before{content:"\f507"}.bi-question-octagon-fill:before{content:"\f508"}.bi-question-octagon:before{content:"\f509"}.bi-question-square-fill:before{content:"\f50a"}.bi-question-square:before{content:"\f50b"}.bi-question:before{content:"\f50c"}.bi-rainbow:before{content:"\f50d"}.bi-receipt-cutoff:before{content:"\f50e"}.bi-receipt:before{content:"\f50f"}.bi-reception-0:before{content:"\f510"}.bi-reception-1:before{content:"\f511"}.bi-reception-2:before{content:"\f512"}.bi-reception-3:before{content:"\f513"}.bi-reception-4:before{content:"\f514"}.bi-record-btn-fill:before{content:"\f515"}.bi-record-btn:before{content:"\f516"}.bi-record-circle-fill:before{content:"\f517"}.bi-record-circle:before{content:"\f518"}.bi-record-fill:before{content:"\f519"}.bi-record:before{content:"\f51a"}.bi-record2-fill:before{content:"\f51b"}.bi-record2:before{content:"\f51c"}.bi-reply-all-fill:before{content:"\f51d"}.bi-reply-all:before{content:"\f51e"}.bi-reply-fill:before{content:"\f51f"}.bi-reply:before{content:"\f520"}.bi-rss-fill:before{content:"\f521"}.bi-rss:before{content:"\f522"}.bi-rulers:before{content:"\f523"}.bi-save-fill:before{content:"\f524"}.bi-save:before{content:"\f525"}.bi-save2-fill:before{content:"\f526"}.bi-save2:before{content:"\f527"}.bi-scissors:before{content:"\f528"}.bi-screwdriver:before{content:"\f529"}.bi-search:before{content:"\f52a"}.bi-segmented-nav:before{content:"\f52b"}.bi-server:before{content:"\f52c"}.bi-share-fill:before{content:"\f52d"}.bi-share:before{content:"\f52e"}.bi-shield-check:before{content:"\f52f"}.bi-shield-exclamation:before{content:"\f530"}.bi-shield-fill-check:before{content:"\f531"}.bi-shield-fill-exclamation:before{content:"\f532"}.bi-shield-fill-minus:before{content:"\f533"}.bi-shield-fill-plus:before{content:"\f534"}.bi-shield-fill-x:before{content:"\f535"}.bi-shield-fill:before{content:"\f536"}.bi-shield-lock-fill:before{content:"\f537"}.bi-shield-lock:before{content:"\f538"}.bi-shield-minus:before{content:"\f539"}.bi-shield-plus:before{content:"\f53a"}.bi-shield-shaded:before{content:"\f53b"}.bi-shield-slash-fill:before{content:"\f53c"}.bi-shield-slash:before{content:"\f53d"}.bi-shield-x:before{content:"\f53e"}.bi-shield:before{content:"\f53f"}.bi-shift-fill:before{content:"\f540"}.bi-shift:before{content:"\f541"}.bi-shop-window:before{content:"\f542"}.bi-shop:before{content:"\f543"}.bi-shuffle:before{content:"\f544"}.bi-signpost-2-fill:before{content:"\f545"}.bi-signpost-2:before{content:"\f546"}.bi-signpost-fill:before{content:"\f547"}.bi-signpost-split-fill:before{content:"\f548"}.bi-signpost-split:before{content:"\f549"}.bi-signpost:before{content:"\f54a"}.bi-sim-fill:before{content:"\f54b"}.bi-sim:before{content:"\f54c"}.bi-skip-backward-btn-fill:before{content:"\f54d"}.bi-skip-backward-btn:before{content:"\f54e"}.bi-skip-backward-circle-fill:before{content:"\f54f"}.bi-skip-backward-circle:before{content:"\f550"}.bi-skip-backward-fill:before{content:"\f551"}.bi-skip-backward:before{content:"\f552"}.bi-skip-end-btn-fill:before{content:"\f553"}.bi-skip-end-btn:before{content:"\f554"}.bi-skip-end-circle-fill:before{content:"\f555"}.bi-skip-end-circle:before{content:"\f556"}.bi-skip-end-fill:before{content:"\f557"}.bi-skip-end:before{content:"\f558"}.bi-skip-forward-btn-fill:before{content:"\f559"}.bi-skip-forward-btn:before{content:"\f55a"}.bi-skip-forward-circle-fill:before{content:"\f55b"}.bi-skip-forward-circle:before{content:"\f55c"}.bi-skip-forward-fill:before{content:"\f55d"}.bi-skip-forward:before{content:"\f55e"}.bi-skip-start-btn-fill:before{content:"\f55f"}.bi-skip-start-btn:before{content:"\f560"}.bi-skip-start-circle-fill:before{content:"\f561"}.bi-skip-start-circle:before{content:"\f562"}.bi-skip-start-fill:before{content:"\f563"}.bi-skip-start:before{content:"\f564"}.bi-slack:before{content:"\f565"}.bi-slash-circle-fill:before{content:"\f566"}.bi-slash-circle:before{content:"\f567"}.bi-slash-square-fill:before{content:"\f568"}.bi-slash-square:before{content:"\f569"}.bi-slash:before{content:"\f56a"}.bi-sliders:before{content:"\f56b"}.bi-smartwatch:before{content:"\f56c"}.bi-snow:before{content:"\f56d"}.bi-snow2:before{content:"\f56e"}.bi-snow3:before{content:"\f56f"}.bi-sort-alpha-down-alt:before{content:"\f570"}.bi-sort-alpha-down:before{content:"\f571"}.bi-sort-alpha-up-alt:before{content:"\f572"}.bi-sort-alpha-up:before{content:"\f573"}.bi-sort-down-alt:before{content:"\f574"}.bi-sort-down:before{content:"\f575"}.bi-sort-numeric-down-alt:before{content:"\f576"}.bi-sort-numeric-down:before{content:"\f577"}.bi-sort-numeric-up-alt:before{content:"\f578"}.bi-sort-numeric-up:before{content:"\f579"}.bi-sort-up-alt:before{content:"\f57a"}.bi-sort-up:before{content:"\f57b"}.bi-soundwave:before{content:"\f57c"}.bi-speaker-fill:before{content:"\f57d"}.bi-speaker:before{content:"\f57e"}.bi-speedometer:before{content:"\f57f"}.bi-speedometer2:before{content:"\f580"}.bi-spellcheck:before{content:"\f581"}.bi-square-fill:before{content:"\f582"}.bi-square-half:before{content:"\f583"}.bi-square:before{content:"\f584"}.bi-stack:before{content:"\f585"}.bi-star-fill:before{content:"\f586"}.bi-star-half:before{content:"\f587"}.bi-star:before{content:"\f588"}.bi-stars:before{content:"\f589"}.bi-stickies-fill:before{content:"\f58a"}.bi-stickies:before{content:"\f58b"}.bi-sticky-fill:before{content:"\f58c"}.bi-sticky:before{content:"\f58d"}.bi-stop-btn-fill:before{content:"\f58e"}.bi-stop-btn:before{content:"\f58f"}.bi-stop-circle-fill:before{content:"\f590"}.bi-stop-circle:before{content:"\f591"}.bi-stop-fill:before{content:"\f592"}.bi-stop:before{content:"\f593"}.bi-stoplights-fill:before{content:"\f594"}.bi-stoplights:before{content:"\f595"}.bi-stopwatch-fill:before{content:"\f596"}.bi-stopwatch:before{content:"\f597"}.bi-subtract:before{content:"\f598"}.bi-suit-club-fill:before{content:"\f599"}.bi-suit-club:before{content:"\f59a"}.bi-suit-diamond-fill:before{content:"\f59b"}.bi-suit-diamond:before{content:"\f59c"}.bi-suit-heart-fill:before{content:"\f59d"}.bi-suit-heart:before{content:"\f59e"}.bi-suit-spade-fill:before{content:"\f59f"}.bi-suit-spade:before{content:"\f5a0"}.bi-sun-fill:before{content:"\f5a1"}.bi-sun:before{content:"\f5a2"}.bi-sunglasses:before{content:"\f5a3"}.bi-sunrise-fill:before{content:"\f5a4"}.bi-sunrise:before{content:"\f5a5"}.bi-sunset-fill:before{content:"\f5a6"}.bi-sunset:before{content:"\f5a7"}.bi-symmetry-horizontal:before{content:"\f5a8"}.bi-symmetry-vertical:before{content:"\f5a9"}.bi-table:before{content:"\f5aa"}.bi-tablet-fill:before{content:"\f5ab"}.bi-tablet-landscape-fill:before{content:"\f5ac"}.bi-tablet-landscape:before{content:"\f5ad"}.bi-tablet:before{content:"\f5ae"}.bi-tag-fill:before{content:"\f5af"}.bi-tag:before{content:"\f5b0"}.bi-tags-fill:before{content:"\f5b1"}.bi-tags:before{content:"\f5b2"}.bi-telegram:before{content:"\f5b3"}.bi-telephone-fill:before{content:"\f5b4"}.bi-telephone-forward-fill:before{content:"\f5b5"}.bi-telephone-forward:before{content:"\f5b6"}.bi-telephone-inbound-fill:before{content:"\f5b7"}.bi-telephone-inbound:before{content:"\f5b8"}.bi-telephone-minus-fill:before{content:"\f5b9"}.bi-telephone-minus:before{content:"\f5ba"}.bi-telephone-outbound-fill:before{content:"\f5bb"}.bi-telephone-outbound:before{content:"\f5bc"}.bi-telephone-plus-fill:before{content:"\f5bd"}.bi-telephone-plus:before{content:"\f5be"}.bi-telephone-x-fill:before{content:"\f5bf"}.bi-telephone-x:before{content:"\f5c0"}.bi-telephone:before{content:"\f5c1"}.bi-terminal-fill:before{content:"\f5c2"}.bi-terminal:before{content:"\f5c3"}.bi-text-center:before{content:"\f5c4"}.bi-text-indent-left:before{content:"\f5c5"}.bi-text-indent-right:before{content:"\f5c6"}.bi-text-left:before{content:"\f5c7"}.bi-text-paragraph:before{content:"\f5c8"}.bi-text-right:before{content:"\f5c9"}.bi-textarea-resize:before{content:"\f5ca"}.bi-textarea-t:before{content:"\f5cb"}.bi-textarea:before{content:"\f5cc"}.bi-thermometer-half:before{content:"\f5cd"}.bi-thermometer-high:before{content:"\f5ce"}.bi-thermometer-low:before{content:"\f5cf"}.bi-thermometer-snow:before{content:"\f5d0"}.bi-thermometer-sun:before{content:"\f5d1"}.bi-thermometer:before{content:"\f5d2"}.bi-three-dots-vertical:before{content:"\f5d3"}.bi-three-dots:before{content:"\f5d4"}.bi-toggle-off:before{content:"\f5d5"}.bi-toggle-on:before{content:"\f5d6"}.bi-toggle2-off:before{content:"\f5d7"}.bi-toggle2-on:before{content:"\f5d8"}.bi-toggles:before{content:"\f5d9"}.bi-toggles2:before{content:"\f5da"}.bi-tools:before{content:"\f5db"}.bi-tornado:before{content:"\f5dc"}.bi-trash-fill:before{content:"\f5dd"}.bi-trash:before{content:"\f5de"}.bi-trash2-fill:before{content:"\f5df"}.bi-trash2:before{content:"\f5e0"}.bi-tree-fill:before{content:"\f5e1"}.bi-tree:before{content:"\f5e2"}.bi-triangle-fill:before{content:"\f5e3"}.bi-triangle-half:before{content:"\f5e4"}.bi-triangle:before{content:"\f5e5"}.bi-trophy-fill:before{content:"\f5e6"}.bi-trophy:before{content:"\f5e7"}.bi-tropical-storm:before{content:"\f5e8"}.bi-truck-flatbed:before{content:"\f5e9"}.bi-truck:before{content:"\f5ea"}.bi-tsunami:before{content:"\f5eb"}.bi-tv-fill:before{content:"\f5ec"}.bi-tv:before{content:"\f5ed"}.bi-twitch:before{content:"\f5ee"}.bi-twitter:before{content:"\f5ef"}.bi-type-bold:before{content:"\f5f0"}.bi-type-h1:before{content:"\f5f1"}.bi-type-h2:before{content:"\f5f2"}.bi-type-h3:before{content:"\f5f3"}.bi-type-italic:before{content:"\f5f4"}.bi-type-strikethrough:before{content:"\f5f5"}.bi-type-underline:before{content:"\f5f6"}.bi-type:before{content:"\f5f7"}.bi-ui-checks-grid:before{content:"\f5f8"}.bi-ui-checks:before{content:"\f5f9"}.bi-ui-radios-grid:before{content:"\f5fa"}.bi-ui-radios:before{content:"\f5fb"}.bi-umbrella-fill:before{content:"\f5fc"}.bi-umbrella:before{content:"\f5fd"}.bi-union:before{content:"\f5fe"}.bi-unlock-fill:before{content:"\f5ff"}.bi-unlock:before{content:"\f600"}.bi-upc-scan:before{content:"\f601"}.bi-upc:before{content:"\f602"}.bi-upload:before{content:"\f603"}.bi-vector-pen:before{content:"\f604"}.bi-view-list:before{content:"\f605"}.bi-view-stacked:before{content:"\f606"}.bi-vinyl-fill:before{content:"\f607"}.bi-vinyl:before{content:"\f608"}.bi-voicemail:before{content:"\f609"}.bi-volume-down-fill:before{content:"\f60a"}.bi-volume-down:before{content:"\f60b"}.bi-volume-mute-fill:before{content:"\f60c"}.bi-volume-mute:before{content:"\f60d"}.bi-volume-off-fill:before{content:"\f60e"}.bi-volume-off:before{content:"\f60f"}.bi-volume-up-fill:before{content:"\f610"}.bi-volume-up:before{content:"\f611"}.bi-vr:before{content:"\f612"}.bi-wallet-fill:before{content:"\f613"}.bi-wallet:before{content:"\f614"}.bi-wallet2:before{content:"\f615"}.bi-watch:before{content:"\f616"}.bi-water:before{content:"\f617"}.bi-whatsapp:before{content:"\f618"}.bi-wifi-1:before{content:"\f619"}.bi-wifi-2:before{content:"\f61a"}.bi-wifi-off:before{content:"\f61b"}.bi-wifi:before{content:"\f61c"}.bi-wind:before{content:"\f61d"}.bi-window-dock:before{content:"\f61e"}.bi-window-sidebar:before{content:"\f61f"}.bi-window:before{content:"\f620"}.bi-wrench:before{content:"\f621"}.bi-x-circle-fill:before{content:"\f622"}.bi-x-circle:before{content:"\f623"}.bi-x-diamond-fill:before{content:"\f624"}.bi-x-diamond:before{content:"\f625"}.bi-x-octagon-fill:before{content:"\f626"}.bi-x-octagon:before{content:"\f627"}.bi-x-square-fill:before{content:"\f628"}.bi-x-square:before{content:"\f629"}.bi-x:before{content:"\f62a"}.bi-youtube:before{content:"\f62b"}.bi-zoom-in:before{content:"\f62c"}.bi-zoom-out:before{content:"\f62d"}.bi-bank:before{content:"\f62e"}.bi-bank2:before{content:"\f62f"}.bi-bell-slash-fill:before{content:"\f630"}.bi-bell-slash:before{content:"\f631"}.bi-cash-coin:before{content:"\f632"}.bi-check-lg:before{content:"\f633"}.bi-coin:before{content:"\f634"}.bi-currency-bitcoin:before{content:"\f635"}.bi-currency-dollar:before{content:"\f636"}.bi-currency-euro:before{content:"\f637"}.bi-currency-exchange:before{content:"\f638"}.bi-currency-pound:before{content:"\f639"}.bi-currency-yen:before{content:"\f63a"}.bi-dash-lg:before{content:"\f63b"}.bi-exclamation-lg:before{content:"\f63c"}.bi-file-earmark-pdf-fill:before{content:"\f63d"}.bi-file-earmark-pdf:before{content:"\f63e"}.bi-file-pdf-fill:before{content:"\f63f"}.bi-file-pdf:before{content:"\f640"}.bi-gender-ambiguous:before{content:"\f641"}.bi-gender-female:before{content:"\f642"}.bi-gender-male:before{content:"\f643"}.bi-gender-trans:before{content:"\f644"}.bi-headset-vr:before{content:"\f645"}.bi-info-lg:before{content:"\f646"}.bi-mastodon:before{content:"\f647"}.bi-messenger:before{content:"\f648"}.bi-piggy-bank-fill:before{content:"\f649"}.bi-piggy-bank:before{content:"\f64a"}.bi-pin-map-fill:before{content:"\f64b"}.bi-pin-map:before{content:"\f64c"}.bi-plus-lg:before{content:"\f64d"}.bi-question-lg:before{content:"\f64e"}.bi-recycle:before{content:"\f64f"}.bi-reddit:before{content:"\f650"}.bi-safe-fill:before{content:"\f651"}.bi-safe2-fill:before{content:"\f652"}.bi-safe2:before{content:"\f653"}.bi-sd-card-fill:before{content:"\f654"}.bi-sd-card:before{content:"\f655"}.bi-skype:before{content:"\f656"}.bi-slash-lg:before{content:"\f657"}.bi-translate:before{content:"\f658"}.bi-x-lg:before{content:"\f659"}.bi-safe:before{content:"\f65a"}.bi-apple:before{content:"\f65b"}.bi-microsoft:before{content:"\f65d"}.bi-windows:before{content:"\f65e"}.bi-behance:before{content:"\f65c"}.bi-dribbble:before{content:"\f65f"}.bi-line:before{content:"\f660"}.bi-medium:before{content:"\f661"}.bi-paypal:before{content:"\f662"}.bi-pinterest:before{content:"\f663"}.bi-signal:before{content:"\f664"}.bi-snapchat:before{content:"\f665"}.bi-spotify:before{content:"\f666"}.bi-stack-overflow:before{content:"\f667"}.bi-strava:before{content:"\f668"}.bi-wordpress:before{content:"\f669"}.bi-vimeo:before{content:"\f66a"}.bi-activity:before{content:"\f66b"}.bi-easel2-fill:before{content:"\f66c"}.bi-easel2:before{content:"\f66d"}.bi-easel3-fill:before{content:"\f66e"}.bi-easel3:before{content:"\f66f"}.bi-fan:before{content:"\f670"}.bi-fingerprint:before{content:"\f671"}.bi-graph-down-arrow:before{content:"\f672"}.bi-graph-up-arrow:before{content:"\f673"}.bi-hypnotize:before{content:"\f674"}.bi-magic:before{content:"\f675"}.bi-person-rolodex:before{content:"\f676"}.bi-person-video:before{content:"\f677"}.bi-person-video2:before{content:"\f678"}.bi-person-video3:before{content:"\f679"}.bi-person-workspace:before{content:"\f67a"}.bi-radioactive:before{content:"\f67b"}.bi-webcam-fill:before{content:"\f67c"}.bi-webcam:before{content:"\f67d"}.bi-yin-yang:before{content:"\f67e"}.bi-bandaid-fill:before{content:"\f680"}.bi-bandaid:before{content:"\f681"}.bi-bluetooth:before{content:"\f682"}.bi-body-text:before{content:"\f683"}.bi-boombox:before{content:"\f684"}.bi-boxes:before{content:"\f685"}.bi-dpad-fill:before{content:"\f686"}.bi-dpad:before{content:"\f687"}.bi-ear-fill:before{content:"\f688"}.bi-ear:before{content:"\f689"}.bi-envelope-check-1:before{content:"\f68a"}.bi-envelope-check-fill:before{content:"\f68b"}.bi-envelope-check:before{content:"\f68c"}.bi-envelope-dash-1:before{content:"\f68d"}.bi-envelope-dash-fill:before{content:"\f68e"}.bi-envelope-dash:before{content:"\f68f"}.bi-envelope-exclamation-1:before{content:"\f690"}.bi-envelope-exclamation-fill:before{content:"\f691"}.bi-envelope-exclamation:before{content:"\f692"}.bi-envelope-plus-fill:before{content:"\f693"}.bi-envelope-plus:before{content:"\f694"}.bi-envelope-slash-1:before{content:"\f695"}.bi-envelope-slash-fill:before{content:"\f696"}.bi-envelope-slash:before{content:"\f697"}.bi-envelope-x-1:before{content:"\f698"}.bi-envelope-x-fill:before{content:"\f699"}.bi-envelope-x:before{content:"\f69a"}.bi-explicit-fill:before{content:"\f69b"}.bi-explicit:before{content:"\f69c"}.bi-git:before{content:"\f69d"}.bi-infinity:before{content:"\f69e"}.bi-list-columns-reverse:before{content:"\f69f"}.bi-list-columns:before{content:"\f6a0"}.bi-meta:before{content:"\f6a1"}.bi-mortorboard-fill:before{content:"\f6a2"}.bi-mortorboard:before{content:"\f6a3"}.bi-nintendo-switch:before{content:"\f6a4"}.bi-pc-display-horizontal:before{content:"\f6a5"}.bi-pc-display:before{content:"\f6a6"}.bi-pc-horizontal:before{content:"\f6a7"}.bi-pc:before{content:"\f6a8"}.bi-playstation:before{content:"\f6a9"}.bi-plus-slash-minus:before{content:"\f6aa"}.bi-projector-fill:before{content:"\f6ab"}.bi-projector:before{content:"\f6ac"}.bi-qr-code-scan:before{content:"\f6ad"}.bi-qr-code:before{content:"\f6ae"}.bi-quora:before{content:"\f6af"}.bi-quote:before{content:"\f6b0"}.bi-robot:before{content:"\f6b1"}.bi-send-check-fill:before{content:"\f6b2"}.bi-send-check:before{content:"\f6b3"}.bi-send-dash-fill:before{content:"\f6b4"}.bi-send-dash:before{content:"\f6b5"}.bi-send-exclamation-1:before{content:"\f6b6"}.bi-send-exclamation-fill:before{content:"\f6b7"}.bi-send-exclamation:before{content:"\f6b8"}.bi-send-fill:before{content:"\f6b9"}.bi-send-plus-fill:before{content:"\f6ba"}.bi-send-plus:before{content:"\f6bb"}.bi-send-slash-fill:before{content:"\f6bc"}.bi-send-slash:before{content:"\f6bd"}.bi-send-x-fill:before{content:"\f6be"}.bi-send-x:before{content:"\f6bf"}.bi-send:before{content:"\f6c0"}.bi-steam:before{content:"\f6c1"}.bi-terminal-dash-1:before{content:"\f6c2"}.bi-terminal-dash:before{content:"\f6c3"}.bi-terminal-plus:before{content:"\f6c4"}.bi-terminal-split:before{content:"\f6c5"}.bi-ticket-detailed-fill:before{content:"\f6c6"}.bi-ticket-detailed:before{content:"\f6c7"}.bi-ticket-fill:before{content:"\f6c8"}.bi-ticket-perforated-fill:before{content:"\f6c9"}.bi-ticket-perforated:before{content:"\f6ca"}.bi-ticket:before{content:"\f6cb"}.bi-tiktok:before{content:"\f6cc"}.bi-window-dash:before{content:"\f6cd"}.bi-window-desktop:before{content:"\f6ce"}.bi-window-fullscreen:before{content:"\f6cf"}.bi-window-plus:before{content:"\f6d0"}.bi-window-split:before{content:"\f6d1"}.bi-window-stack:before{content:"\f6d2"}.bi-window-x:before{content:"\f6d3"}.bi-xbox:before{content:"\f6d4"}.bi-ethernet:before{content:"\f6d5"}.bi-hdmi-fill:before{content:"\f6d6"}.bi-hdmi:before{content:"\f6d7"}.bi-usb-c-fill:before{content:"\f6d8"}.bi-usb-c:before{content:"\f6d9"}.bi-usb-fill:before{content:"\f6da"}.bi-usb-plug-fill:before{content:"\f6db"}.bi-usb-plug:before{content:"\f6dc"}.bi-usb-symbol:before{content:"\f6dd"}.bi-usb:before{content:"\f6de"}.bi-boombox-fill:before{content:"\f6df"}.bi-displayport-1:before{content:"\f6e0"}.bi-displayport:before{content:"\f6e1"}.bi-gpu-card:before{content:"\f6e2"}.bi-memory:before{content:"\f6e3"}.bi-modem-fill:before{content:"\f6e4"}.bi-modem:before{content:"\f6e5"}.bi-motherboard-fill:before{content:"\f6e6"}.bi-motherboard:before{content:"\f6e7"}.bi-optical-audio-fill:before{content:"\f6e8"}.bi-optical-audio:before{content:"\f6e9"}.bi-pci-card:before{content:"\f6ea"}.bi-router-fill:before{content:"\f6eb"}.bi-router:before{content:"\f6ec"}.bi-ssd-fill:before{content:"\f6ed"}.bi-ssd:before{content:"\f6ee"}.bi-thunderbolt-fill:before{content:"\f6ef"}.bi-thunderbolt:before{content:"\f6f0"}.bi-usb-drive-fill:before{content:"\f6f1"}.bi-usb-drive:before{content:"\f6f2"}.bi-usb-micro-fill:before{content:"\f6f3"}.bi-usb-micro:before{content:"\f6f4"}.bi-usb-mini-fill:before{content:"\f6f5"}.bi-usb-mini:before{content:"\f6f6"}.bi-cloud-haze2:before{content:"\f6f7"}.bi-device-hdd-fill:before{content:"\f6f8"}.bi-device-hdd:before{content:"\f6f9"}.bi-device-ssd-fill:before{content:"\f6fa"}.bi-device-ssd:before{content:"\f6fb"}.bi-displayport-fill:before{content:"\f6fc"}.bi-mortarboard-fill:before{content:"\f6fd"}.bi-mortarboard:before{content:"\f6fe"}.bi-terminal-x:before{content:"\f6ff"} \ No newline at end of file diff --git a/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/constants.py b/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/constants.py deleted file mode 100644 index 2915b2846e5f1b1678991e81f6572776ace8a4c9..0000000000000000000000000000000000000000 --- a/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/constants.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Constants that are used by the model -""" -HARAQAT = ["ْ", "ّ", "ٌ", "ٍ", "ِ", "ً", "َ", "ُ"] -ARAB_CHARS = "ىعظحرسيشضق ثلصطكآماإهزءأفؤغجئدةخوبذتن" -PUNCTUATIONS = [".", "،", ":", "؛", "-", "؟"] -VALID_ARABIC = HARAQAT + list(ARAB_CHARS) -BASIC_HARAQAT = { - "َ": "Fatha ", - "ً": "Fathatah ", - "ُ": "Damma ", - "ٌ": "Dammatan ", - "ِ": "Kasra ", - "ٍ": "Kasratan ", - "ْ": "Sukun ", - "ّ": "Shaddah ", -} -ALL_POSSIBLE_HARAQAT = { - "": "No Diacritic ", - "َ": "Fatha ", - "ً": "Fathatah ", - "ُ": "Damma ", - "ٌ": "Dammatan ", - "ِ": "Kasra ", - "ٍ": "Kasratan ", - "ْ": "Sukun ", - "ّ": "Shaddah ", - "َّ": "Shaddah + Fatha ", - "ًّ": "Shaddah + Fathatah ", - "ُّ": "Shaddah + Damma ", - "ٌّ": "Shaddah + Dammatan ", - "ِّ": "Shaddah + Kasra ", - "ٍّ": "Shaddah + Kasratan ", -} diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/psa_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/psa_head.py deleted file mode 100644 index 480dbd1a081262e45bf87e32c4a339ac8f8b4ffb..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/psa_head.py +++ /dev/null @@ -1,196 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - -try: - from annotator.uniformer.mmcv.ops import PSAMask -except ModuleNotFoundError: - PSAMask = None - - -@HEADS.register_module() -class PSAHead(BaseDecodeHead): - """Point-wise Spatial Attention Network for Scene Parsing. - - This head is the implementation of `PSANet - `_. - - Args: - mask_size (tuple[int]): The PSA mask size. It usually equals input - size. - psa_type (str): The type of psa module. Options are 'collect', - 'distribute', 'bi-direction'. Default: 'bi-direction' - compact (bool): Whether use compact map for 'collect' mode. - Default: True. - shrink_factor (int): The downsample factors of psa mask. Default: 2. - normalization_factor (float): The normalize factor of attention. - psa_softmax (bool): Whether use softmax for attention. - """ - - def __init__(self, - mask_size, - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - **kwargs): - if PSAMask is None: - raise RuntimeError('Please install mmcv-full for PSAMask ops') - super(PSAHead, self).__init__(**kwargs) - assert psa_type in ['collect', 'distribute', 'bi-direction'] - self.psa_type = psa_type - self.compact = compact - self.shrink_factor = shrink_factor - self.mask_size = mask_size - mask_h, mask_w = mask_size - self.psa_softmax = psa_softmax - if normalization_factor is None: - normalization_factor = mask_h * mask_w - self.normalization_factor = normalization_factor - - self.reduce = ConvModule( - self.in_channels, - self.channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.attention = nn.Sequential( - ConvModule( - self.channels, - self.channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg), - nn.Conv2d( - self.channels, mask_h * mask_w, kernel_size=1, bias=False)) - if psa_type == 'bi-direction': - self.reduce_p = ConvModule( - self.in_channels, - self.channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.attention_p = nn.Sequential( - ConvModule( - self.channels, - self.channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg), - nn.Conv2d( - self.channels, mask_h * mask_w, kernel_size=1, bias=False)) - self.psamask_collect = PSAMask('collect', mask_size) - self.psamask_distribute = PSAMask('distribute', mask_size) - else: - self.psamask = PSAMask(psa_type, mask_size) - self.proj = ConvModule( - self.channels * (2 if psa_type == 'bi-direction' else 1), - self.in_channels, - kernel_size=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.bottleneck = ConvModule( - self.in_channels * 2, - self.channels, - kernel_size=3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - identity = x - align_corners = self.align_corners - if self.psa_type in ['collect', 'distribute']: - out = self.reduce(x) - n, c, h, w = out.size() - if self.shrink_factor != 1: - if h % self.shrink_factor and w % self.shrink_factor: - h = (h - 1) // self.shrink_factor + 1 - w = (w - 1) // self.shrink_factor + 1 - align_corners = True - else: - h = h // self.shrink_factor - w = w // self.shrink_factor - align_corners = False - out = resize( - out, - size=(h, w), - mode='bilinear', - align_corners=align_corners) - y = self.attention(out) - if self.compact: - if self.psa_type == 'collect': - y = y.view(n, h * w, - h * w).transpose(1, 2).view(n, h * w, h, w) - else: - y = self.psamask(y) - if self.psa_softmax: - y = F.softmax(y, dim=1) - out = torch.bmm( - out.view(n, c, h * w), y.view(n, h * w, h * w)).view( - n, c, h, w) * (1.0 / self.normalization_factor) - else: - x_col = self.reduce(x) - x_dis = self.reduce_p(x) - n, c, h, w = x_col.size() - if self.shrink_factor != 1: - if h % self.shrink_factor and w % self.shrink_factor: - h = (h - 1) // self.shrink_factor + 1 - w = (w - 1) // self.shrink_factor + 1 - align_corners = True - else: - h = h // self.shrink_factor - w = w // self.shrink_factor - align_corners = False - x_col = resize( - x_col, - size=(h, w), - mode='bilinear', - align_corners=align_corners) - x_dis = resize( - x_dis, - size=(h, w), - mode='bilinear', - align_corners=align_corners) - y_col = self.attention(x_col) - y_dis = self.attention_p(x_dis) - if self.compact: - y_dis = y_dis.view(n, h * w, - h * w).transpose(1, 2).view(n, h * w, h, w) - else: - y_col = self.psamask_collect(y_col) - y_dis = self.psamask_distribute(y_dis) - if self.psa_softmax: - y_col = F.softmax(y_col, dim=1) - y_dis = F.softmax(y_dis, dim=1) - x_col = torch.bmm( - x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view( - n, c, h, w) * (1.0 / self.normalization_factor) - x_dis = torch.bmm( - x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view( - n, c, h, w) * (1.0 / self.normalization_factor) - out = torch.cat([x_col, x_dis], 1) - out = self.proj(out) - out = resize( - out, - size=identity.shape[2:], - mode='bilinear', - align_corners=align_corners) - out = self.bottleneck(torch.cat((identity, out), dim=1)) - out = self.cls_seg(out) - return out diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/schedules/schedule_40k.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/schedules/schedule_40k.py deleted file mode 100644 index 1a03ea075e3cf315a058ef262da9b8374affad20..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/schedules/schedule_40k.py +++ /dev/null @@ -1,20 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from UniFormer repo: From https://github.com/Sense-X/UniFormer - * Apache-2.0 license -''' -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=40000) -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cc_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cc_head.py deleted file mode 100644 index bbf068e8980a81b7acbf6bc87f74c18e3cf9dd23..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cc_head.py +++ /dev/null @@ -1,54 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import torch - -from ..builder import HEADS -from .fcn_head import FCNHead - -try: - from annotator.uniformer.mmcv.ops import CrissCrossAttention -except ModuleNotFoundError: - CrissCrossAttention = None - - -@HEADS.register_module() -class CCHead(FCNHead): - """CCNet: Criss-Cross Attention for Semantic Segmentation. - - This head is the implementation of `CCNet - `_. - - Args: - recurrence (int): Number of recurrence of Criss Cross Attention - module. Default: 2. - """ - - def __init__(self, recurrence=2, **kwargs): - if CrissCrossAttention is None: - raise RuntimeError('Please install mmcv-full for ' - 'CrissCrossAttention ops') - super(CCHead, self).__init__(num_convs=2, **kwargs) - self.recurrence = recurrence - self.cca = CrissCrossAttention(self.channels) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - for _ in range(self.recurrence): - output = self.cca(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/glxext_arb.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/glxext_arb.py deleted file mode 100644 index 11763a61b7eb6de2cd9819f7bd8b31e625cc0676..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/gl/glxext_arb.py +++ /dev/null @@ -1,954 +0,0 @@ -"""Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glxext.h - -Generated by tools/gengl.py. -Do not modify this file. -""" - -import ctypes -from ctypes import * -from pyglet.gl.lib import link_GLX as _link_function - - -if not hasattr(ctypes, 'c_int64'): - # XXX TODO completely wrong, but at least can import. - # Can c_longlong still be used? - c_int64 = c_long - c_uint64 = c_ulong - -# BEGIN GENERATED CONTENT (do not edit below this line) - -# This content is generated by tools/gengl.py. -# Wrapper for http://www.opengl.org/registry/api/glxext.h - -import pyglet.libs.x11.xlib -import pyglet.gl.glx - -# H (/usr/include/GL/glx.h:26) -# ARB_get_proc_address (/usr/include/GL/glx.h:317) -# GLXEXT_LEGACY (/usr/include/GL/glx.h:334) -GLX_GLXEXT_VERSION = 32 # GL/glxext.h:53 -# VERSION_1_3 (GL/glxext.h:55) -# VERSION_1_4 (GL/glxext.h:114) -# ARB_get_proc_address (GL/glxext.h:119) -# ARB_multisample (GL/glxext.h:122) -GLX_SAMPLE_BUFFERS_ARB = 100000 # GL/glxext.h:123 -GLX_SAMPLES_ARB = 100001 # GL/glxext.h:124 -# ARB_vertex_buffer_object (GL/glxext.h:127) -GLX_CONTEXT_ALLOW_BUFFER_BYTE_ORDER_MISMATCH_ARB = 8341 # GL/glxext.h:128 -# ARB_fbconfig_float (GL/glxext.h:131) -GLX_RGBA_FLOAT_TYPE_ARB = 8377 # GL/glxext.h:132 -GLX_RGBA_FLOAT_BIT_ARB = 4 # GL/glxext.h:133 -# ARB_framebuffer_sRGB (GL/glxext.h:136) -GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB = 8370 # GL/glxext.h:137 -# ARB_create_context (GL/glxext.h:140) -GLX_CONTEXT_DEBUG_BIT_ARB = 1 # GL/glxext.h:141 -GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB = 2 # GL/glxext.h:142 -GLX_CONTEXT_MAJOR_VERSION_ARB = 8337 # GL/glxext.h:143 -GLX_CONTEXT_MINOR_VERSION_ARB = 8338 # GL/glxext.h:144 -GLX_CONTEXT_FLAGS_ARB = 8340 # GL/glxext.h:145 -# ARB_create_context_profile (GL/glxext.h:148) -GLX_CONTEXT_CORE_PROFILE_BIT_ARB = 1 # GL/glxext.h:149 -GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB = 2 # GL/glxext.h:150 -GLX_CONTEXT_PROFILE_MASK_ARB = 37158 # GL/glxext.h:151 -# ARB_create_context_robustness (GL/glxext.h:154) -GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB = 4 # GL/glxext.h:155 -GLX_LOSE_CONTEXT_ON_RESET_ARB = 33362 # GL/glxext.h:156 -GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB = 33366 # GL/glxext.h:157 -GLX_NO_RESET_NOTIFICATION_ARB = 33377 # GL/glxext.h:158 -# SGIS_multisample (GL/glxext.h:161) -GLX_SAMPLE_BUFFERS_SGIS = 100000 # GL/glxext.h:162 -GLX_SAMPLES_SGIS = 100001 # GL/glxext.h:163 -# EXT_visual_info (GL/glxext.h:166) -GLX_X_VISUAL_TYPE_EXT = 34 # GL/glxext.h:167 -GLX_TRANSPARENT_TYPE_EXT = 35 # GL/glxext.h:168 -GLX_TRANSPARENT_INDEX_VALUE_EXT = 36 # GL/glxext.h:169 -GLX_TRANSPARENT_RED_VALUE_EXT = 37 # GL/glxext.h:170 -GLX_TRANSPARENT_GREEN_VALUE_EXT = 38 # GL/glxext.h:171 -GLX_TRANSPARENT_BLUE_VALUE_EXT = 39 # GL/glxext.h:172 -GLX_TRANSPARENT_ALPHA_VALUE_EXT = 40 # GL/glxext.h:173 -GLX_NONE_EXT = 32768 # GL/glxext.h:174 -GLX_TRUE_COLOR_EXT = 32770 # GL/glxext.h:175 -GLX_DIRECT_COLOR_EXT = 32771 # GL/glxext.h:176 -GLX_PSEUDO_COLOR_EXT = 32772 # GL/glxext.h:177 -GLX_STATIC_COLOR_EXT = 32773 # GL/glxext.h:178 -GLX_GRAY_SCALE_EXT = 32774 # GL/glxext.h:179 -GLX_STATIC_GRAY_EXT = 32775 # GL/glxext.h:180 -GLX_TRANSPARENT_RGB_EXT = 32776 # GL/glxext.h:181 -GLX_TRANSPARENT_INDEX_EXT = 32777 # GL/glxext.h:182 -# SGI_swap_control (GL/glxext.h:185) -# SGI_video_sync (GL/glxext.h:188) -# SGI_make_current_read (GL/glxext.h:191) -# SGIX_video_source (GL/glxext.h:194) -# EXT_visual_rating (GL/glxext.h:197) -GLX_VISUAL_CAVEAT_EXT = 32 # GL/glxext.h:198 -GLX_SLOW_VISUAL_EXT = 32769 # GL/glxext.h:199 -GLX_NON_CONFORMANT_VISUAL_EXT = 32781 # GL/glxext.h:200 -# EXT_import_context (GL/glxext.h:204) -GLX_SHARE_CONTEXT_EXT = 32778 # GL/glxext.h:205 -GLX_VISUAL_ID_EXT = 32779 # GL/glxext.h:206 -GLX_SCREEN_EXT = 32780 # GL/glxext.h:207 -# SGIX_fbconfig (GL/glxext.h:210) -GLX_WINDOW_BIT_SGIX = 1 # GL/glxext.h:211 -GLX_PIXMAP_BIT_SGIX = 2 # GL/glxext.h:212 -GLX_RGBA_BIT_SGIX = 1 # GL/glxext.h:213 -GLX_COLOR_INDEX_BIT_SGIX = 2 # GL/glxext.h:214 -GLX_DRAWABLE_TYPE_SGIX = 32784 # GL/glxext.h:215 -GLX_RENDER_TYPE_SGIX = 32785 # GL/glxext.h:216 -GLX_X_RENDERABLE_SGIX = 32786 # GL/glxext.h:217 -GLX_FBCONFIG_ID_SGIX = 32787 # GL/glxext.h:218 -GLX_RGBA_TYPE_SGIX = 32788 # GL/glxext.h:219 -GLX_COLOR_INDEX_TYPE_SGIX = 32789 # GL/glxext.h:220 -# SGIX_pbuffer (GL/glxext.h:224) -GLX_PBUFFER_BIT_SGIX = 4 # GL/glxext.h:225 -GLX_BUFFER_CLOBBER_MASK_SGIX = 134217728 # GL/glxext.h:226 -GLX_FRONT_LEFT_BUFFER_BIT_SGIX = 1 # GL/glxext.h:227 -GLX_FRONT_RIGHT_BUFFER_BIT_SGIX = 2 # GL/glxext.h:228 -GLX_BACK_LEFT_BUFFER_BIT_SGIX = 4 # GL/glxext.h:229 -GLX_BACK_RIGHT_BUFFER_BIT_SGIX = 8 # GL/glxext.h:230 -GLX_AUX_BUFFERS_BIT_SGIX = 16 # GL/glxext.h:231 -GLX_DEPTH_BUFFER_BIT_SGIX = 32 # GL/glxext.h:232 -GLX_STENCIL_BUFFER_BIT_SGIX = 64 # GL/glxext.h:233 -GLX_ACCUM_BUFFER_BIT_SGIX = 128 # GL/glxext.h:234 -GLX_SAMPLE_BUFFERS_BIT_SGIX = 256 # GL/glxext.h:235 -GLX_MAX_PBUFFER_WIDTH_SGIX = 32790 # GL/glxext.h:236 -GLX_MAX_PBUFFER_HEIGHT_SGIX = 32791 # GL/glxext.h:237 -GLX_MAX_PBUFFER_PIXELS_SGIX = 32792 # GL/glxext.h:238 -GLX_OPTIMAL_PBUFFER_WIDTH_SGIX = 32793 # GL/glxext.h:239 -GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX = 32794 # GL/glxext.h:240 -GLX_PRESERVED_CONTENTS_SGIX = 32795 # GL/glxext.h:241 -GLX_LARGEST_PBUFFER_SGIX = 32796 # GL/glxext.h:242 -GLX_WIDTH_SGIX = 32797 # GL/glxext.h:243 -GLX_HEIGHT_SGIX = 32798 # GL/glxext.h:244 -GLX_EVENT_MASK_SGIX = 32799 # GL/glxext.h:245 -GLX_DAMAGED_SGIX = 32800 # GL/glxext.h:246 -GLX_SAVED_SGIX = 32801 # GL/glxext.h:247 -GLX_WINDOW_SGIX = 32802 # GL/glxext.h:248 -GLX_PBUFFER_SGIX = 32803 # GL/glxext.h:249 -# SGI_cushion (GL/glxext.h:252) -# SGIX_video_resize (GL/glxext.h:255) -GLX_SYNC_FRAME_SGIX = 0 # GL/glxext.h:256 -GLX_SYNC_SWAP_SGIX = 1 # GL/glxext.h:257 -# SGIX_dmbuffer (GL/glxext.h:260) -GLX_DIGITAL_MEDIA_PBUFFER_SGIX = 32804 # GL/glxext.h:261 -# SGIX_swap_group (GL/glxext.h:264) -# SGIX_swap_barrier (GL/glxext.h:267) -# SGIS_blended_overlay (GL/glxext.h:270) -GLX_BLENDED_RGBA_SGIS = 32805 # GL/glxext.h:271 -# SGIS_shared_multisample (GL/glxext.h:274) -GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS = 32806 # GL/glxext.h:275 -GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS = 32807 # GL/glxext.h:276 -# SUN_get_transparent_index (GL/glxext.h:279) -# 3DFX_multisample (GL/glxext.h:282) -GLX_SAMPLE_BUFFERS_3DFX = 32848 # GL/glxext.h:283 -GLX_SAMPLES_3DFX = 32849 # GL/glxext.h:284 -# MESA_copy_sub_buffer (GL/glxext.h:287) -# MESA_pixmap_colormap (GL/glxext.h:290) -# MESA_release_buffers (GL/glxext.h:293) -# MESA_set_3dfx_mode (GL/glxext.h:296) -GLX_3DFX_WINDOW_MODE_MESA = 1 # GL/glxext.h:297 -GLX_3DFX_FULLSCREEN_MODE_MESA = 2 # GL/glxext.h:298 -# SGIX_visual_select_group (GL/glxext.h:301) -GLX_VISUAL_SELECT_GROUP_SGIX = 32808 # GL/glxext.h:302 -# OML_swap_method (GL/glxext.h:305) -GLX_SWAP_METHOD_OML = 32864 # GL/glxext.h:306 -GLX_SWAP_EXCHANGE_OML = 32865 # GL/glxext.h:307 -GLX_SWAP_COPY_OML = 32866 # GL/glxext.h:308 -GLX_SWAP_UNDEFINED_OML = 32867 # GL/glxext.h:309 -# OML_sync_control (GL/glxext.h:312) -# NV_float_buffer (GL/glxext.h:315) -GLX_FLOAT_COMPONENTS_NV = 8368 # GL/glxext.h:316 -# SGIX_hyperpipe (GL/glxext.h:319) -GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX = 80 # GL/glxext.h:320 -GLX_BAD_HYPERPIPE_CONFIG_SGIX = 91 # GL/glxext.h:321 -GLX_BAD_HYPERPIPE_SGIX = 92 # GL/glxext.h:322 -GLX_HYPERPIPE_DISPLAY_PIPE_SGIX = 1 # GL/glxext.h:323 -GLX_HYPERPIPE_RENDER_PIPE_SGIX = 2 # GL/glxext.h:324 -GLX_PIPE_RECT_SGIX = 1 # GL/glxext.h:325 -GLX_PIPE_RECT_LIMITS_SGIX = 2 # GL/glxext.h:326 -GLX_HYPERPIPE_STEREO_SGIX = 3 # GL/glxext.h:327 -GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX = 4 # GL/glxext.h:328 -GLX_HYPERPIPE_ID_SGIX = 32816 # GL/glxext.h:329 -# MESA_agp_offset (GL/glxext.h:332) -# EXT_fbconfig_packed_float (GL/glxext.h:335) -GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT = 8369 # GL/glxext.h:336 -GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT = 8 # GL/glxext.h:337 -# EXT_framebuffer_sRGB (GL/glxext.h:340) -GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT = 8370 # GL/glxext.h:341 -# EXT_texture_from_pixmap (GL/glxext.h:344) -GLX_TEXTURE_1D_BIT_EXT = 1 # GL/glxext.h:345 -GLX_TEXTURE_2D_BIT_EXT = 2 # GL/glxext.h:346 -GLX_TEXTURE_RECTANGLE_BIT_EXT = 4 # GL/glxext.h:347 -GLX_BIND_TO_TEXTURE_RGB_EXT = 8400 # GL/glxext.h:348 -GLX_BIND_TO_TEXTURE_RGBA_EXT = 8401 # GL/glxext.h:349 -GLX_BIND_TO_MIPMAP_TEXTURE_EXT = 8402 # GL/glxext.h:350 -GLX_BIND_TO_TEXTURE_TARGETS_EXT = 8403 # GL/glxext.h:351 -GLX_Y_INVERTED_EXT = 8404 # GL/glxext.h:352 -GLX_TEXTURE_FORMAT_EXT = 8405 # GL/glxext.h:353 -GLX_TEXTURE_TARGET_EXT = 8406 # GL/glxext.h:354 -GLX_MIPMAP_TEXTURE_EXT = 8407 # GL/glxext.h:355 -GLX_TEXTURE_FORMAT_NONE_EXT = 8408 # GL/glxext.h:356 -GLX_TEXTURE_FORMAT_RGB_EXT = 8409 # GL/glxext.h:357 -GLX_TEXTURE_FORMAT_RGBA_EXT = 8410 # GL/glxext.h:358 -GLX_TEXTURE_1D_EXT = 8411 # GL/glxext.h:359 -GLX_TEXTURE_2D_EXT = 8412 # GL/glxext.h:360 -GLX_TEXTURE_RECTANGLE_EXT = 8413 # GL/glxext.h:361 -GLX_FRONT_LEFT_EXT = 8414 # GL/glxext.h:362 -GLX_FRONT_RIGHT_EXT = 8415 # GL/glxext.h:363 -GLX_BACK_LEFT_EXT = 8416 # GL/glxext.h:364 -GLX_BACK_RIGHT_EXT = 8417 # GL/glxext.h:365 -GLX_FRONT_EXT = 8414 # GL/glxext.h:366 -GLX_BACK_EXT = 8416 # GL/glxext.h:367 -GLX_AUX0_EXT = 8418 # GL/glxext.h:368 -GLX_AUX1_EXT = 8419 # GL/glxext.h:369 -GLX_AUX2_EXT = 8420 # GL/glxext.h:370 -GLX_AUX3_EXT = 8421 # GL/glxext.h:371 -GLX_AUX4_EXT = 8422 # GL/glxext.h:372 -GLX_AUX5_EXT = 8423 # GL/glxext.h:373 -GLX_AUX6_EXT = 8424 # GL/glxext.h:374 -GLX_AUX7_EXT = 8425 # GL/glxext.h:375 -GLX_AUX8_EXT = 8426 # GL/glxext.h:376 -GLX_AUX9_EXT = 8427 # GL/glxext.h:377 -# NV_present_video (GL/glxext.h:380) -GLX_NUM_VIDEO_SLOTS_NV = 8432 # GL/glxext.h:381 -# NV_video_out (GL/glxext.h:384) -GLX_VIDEO_OUT_COLOR_NV = 8387 # GL/glxext.h:385 -GLX_VIDEO_OUT_ALPHA_NV = 8388 # GL/glxext.h:386 -GLX_VIDEO_OUT_DEPTH_NV = 8389 # GL/glxext.h:387 -GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV = 8390 # GL/glxext.h:388 -GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV = 8391 # GL/glxext.h:389 -GLX_VIDEO_OUT_FRAME_NV = 8392 # GL/glxext.h:390 -GLX_VIDEO_OUT_FIELD_1_NV = 8393 # GL/glxext.h:391 -GLX_VIDEO_OUT_FIELD_2_NV = 8394 # GL/glxext.h:392 -GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV = 8395 # GL/glxext.h:393 -GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV = 8396 # GL/glxext.h:394 -# NV_swap_group (GL/glxext.h:397) -# NV_video_capture (GL/glxext.h:400) -GLX_DEVICE_ID_NV = 8397 # GL/glxext.h:401 -GLX_UNIQUE_ID_NV = 8398 # GL/glxext.h:402 -GLX_NUM_VIDEO_CAPTURE_SLOTS_NV = 8399 # GL/glxext.h:403 -# EXT_swap_control (GL/glxext.h:406) -GLX_SWAP_INTERVAL_EXT = 8433 # GL/glxext.h:407 -GLX_MAX_SWAP_INTERVAL_EXT = 8434 # GL/glxext.h:408 -# NV_copy_image (GL/glxext.h:411) -# INTEL_swap_event (GL/glxext.h:414) -GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK = 67108864 # GL/glxext.h:415 -GLX_EXCHANGE_COMPLETE_INTEL = 33152 # GL/glxext.h:416 -GLX_COPY_COMPLETE_INTEL = 33153 # GL/glxext.h:417 -GLX_FLIP_COMPLETE_INTEL = 33154 # GL/glxext.h:418 -# NV_multisample_coverage (GL/glxext.h:421) -GLX_COVERAGE_SAMPLES_NV = 100001 # GL/glxext.h:422 -GLX_COLOR_SAMPLES_NV = 8371 # GL/glxext.h:423 -# AMD_gpu_association (GL/glxext.h:426) -GLX_GPU_VENDOR_AMD = 7936 # GL/glxext.h:427 -GLX_GPU_RENDERER_STRING_AMD = 7937 # GL/glxext.h:428 -GLX_GPU_OPENGL_VERSION_STRING_AMD = 7938 # GL/glxext.h:429 -GLX_GPU_FASTEST_TARGET_GPUS_AMD = 8610 # GL/glxext.h:430 -GLX_GPU_RAM_AMD = 8611 # GL/glxext.h:431 -GLX_GPU_CLOCK_AMD = 8612 # GL/glxext.h:432 -GLX_GPU_NUM_PIPES_AMD = 8613 # GL/glxext.h:433 -GLX_GPU_NUM_SIMD_AMD = 8614 # GL/glxext.h:434 -GLX_GPU_NUM_RB_AMD = 8615 # GL/glxext.h:435 -GLX_GPU_NUM_SPI_AMD = 8616 # GL/glxext.h:436 -# EXT_create_context_es2_profile (GL/glxext.h:439) -GLX_CONTEXT_ES2_PROFILE_BIT_EXT = 4 # GL/glxext.h:440 -# ARB_get_proc_address (GL/glxext.h:446) -# SGIX_video_source (GL/glxext.h:450) -XID = pyglet.libs.x11.xlib.XID -GLXVideoSourceSGIX = XID # GL/glxext.h:451 -# SGIX_fbconfig (GL/glxext.h:454) -GLXFBConfigIDSGIX = XID # GL/glxext.h:455 -class struct___GLXFBConfigRec(Structure): - __slots__ = [ - ] -struct___GLXFBConfigRec._fields_ = [ - ('_opaque_struct', c_int) -] - -class struct___GLXFBConfigRec(Structure): - __slots__ = [ - ] -struct___GLXFBConfigRec._fields_ = [ - ('_opaque_struct', c_int) -] - -GLXFBConfigSGIX = POINTER(struct___GLXFBConfigRec) # GL/glxext.h:456 -# SGIX_pbuffer (GL/glxext.h:459) -GLXPbufferSGIX = XID # GL/glxext.h:460 -class struct_anon_106(Structure): - __slots__ = [ - 'type', - 'serial', - 'send_event', - 'display', - 'drawable', - 'event_type', - 'draw_type', - 'mask', - 'x', - 'y', - 'width', - 'height', - 'count', - ] -Display = pyglet.libs.x11.xlib.Display -GLXDrawable = pyglet.gl.glx.GLXDrawable -struct_anon_106._fields_ = [ - ('type', c_int), - ('serial', c_ulong), - ('send_event', c_int), - ('display', POINTER(Display)), - ('drawable', GLXDrawable), - ('event_type', c_int), - ('draw_type', c_int), - ('mask', c_uint), - ('x', c_int), - ('y', c_int), - ('width', c_int), - ('height', c_int), - ('count', c_int), -] - -GLXBufferClobberEventSGIX = struct_anon_106 # GL/glxext.h:473 -# NV_video_output (GL/glxext.h:476) -GLXVideoDeviceNV = c_uint # GL/glxext.h:477 -# NV_video_capture (GL/glxext.h:480) -GLXVideoCaptureDeviceNV = XID # GL/glxext.h:481 -# VERSION_1_3 (GL/glxext.h:521) -# VERSION_1_4 (GL/glxext.h:563) -# ARB_get_proc_address (GL/glxext.h:571) -# ARB_multisample (GL/glxext.h:579) -GLX_ARB_multisample = 1 # GL/glxext.h:580 -# ARB_fbconfig_float (GL/glxext.h:583) -GLX_ARB_fbconfig_float = 1 # GL/glxext.h:584 -# ARB_framebuffer_sRGB (GL/glxext.h:587) -GLX_ARB_framebuffer_sRGB = 1 # GL/glxext.h:588 -# ARB_create_context (GL/glxext.h:591) -GLX_ARB_create_context = 1 # GL/glxext.h:592 -GLXContext = pyglet.gl.glx.GLXContext -GLXFBConfig = pyglet.gl.glx.GLXFBConfig -# GL/glxext.h:594 -glXCreateContextAttribsARB = _link_function('glXCreateContextAttribsARB', GLXContext, [POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)], 'ARB_create_context') - -PFNGLXCREATECONTEXTATTRIBSARBPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:596 -# ARB_create_context_profile (GL/glxext.h:599) -GLX_ARB_create_context_profile = 1 # GL/glxext.h:600 -# ARB_create_context_robustness (GL/glxext.h:603) -GLX_ARB_create_context_robustness = 1 # GL/glxext.h:604 -# SGIS_multisample (GL/glxext.h:607) -GLX_SGIS_multisample = 1 # GL/glxext.h:608 -# EXT_visual_info (GL/glxext.h:611) -GLX_EXT_visual_info = 1 # GL/glxext.h:612 -# SGI_swap_control (GL/glxext.h:615) -GLX_SGI_swap_control = 1 # GL/glxext.h:616 -# GL/glxext.h:618 -glXSwapIntervalSGI = _link_function('glXSwapIntervalSGI', c_int, [c_int], 'SGI_swap_control') - -PFNGLXSWAPINTERVALSGIPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:620 -# SGI_video_sync (GL/glxext.h:623) -GLX_SGI_video_sync = 1 # GL/glxext.h:624 -# GL/glxext.h:626 -glXGetVideoSyncSGI = _link_function('glXGetVideoSyncSGI', c_int, [POINTER(c_uint)], 'SGI_video_sync') - -# GL/glxext.h:627 -glXWaitVideoSyncSGI = _link_function('glXWaitVideoSyncSGI', c_int, [c_int, c_int, POINTER(c_uint)], 'SGI_video_sync') - -PFNGLXGETVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, POINTER(c_uint)) # GL/glxext.h:629 -PFNGLXWAITVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_uint)) # GL/glxext.h:630 -# SGI_make_current_read (GL/glxext.h:633) -GLX_SGI_make_current_read = 1 # GL/glxext.h:634 -# GL/glxext.h:636 -glXMakeCurrentReadSGI = _link_function('glXMakeCurrentReadSGI', c_int, [POINTER(Display), GLXDrawable, GLXDrawable, GLXContext], 'SGI_make_current_read') - -# GL/glxext.h:637 -glXGetCurrentReadDrawableSGI = _link_function('glXGetCurrentReadDrawableSGI', GLXDrawable, [], 'SGI_make_current_read') - -PFNGLXMAKECURRENTREADSGIPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, GLXDrawable, GLXContext) # GL/glxext.h:639 -PFNGLXGETCURRENTREADDRAWABLESGIPROC = CFUNCTYPE(GLXDrawable) # GL/glxext.h:640 -# SGIX_video_source (GL/glxext.h:643) -GLX_SGIX_video_source = 1 # GL/glxext.h:644 -# EXT_visual_rating (GL/glxext.h:655) -GLX_EXT_visual_rating = 1 # GL/glxext.h:656 -# EXT_import_context (GL/glxext.h:659) -GLX_EXT_import_context = 1 # GL/glxext.h:660 -# GL/glxext.h:662 -glXGetCurrentDisplayEXT = _link_function('glXGetCurrentDisplayEXT', POINTER(Display), [], 'EXT_import_context') - -# GL/glxext.h:663 -glXQueryContextInfoEXT = _link_function('glXQueryContextInfoEXT', c_int, [POINTER(Display), GLXContext, c_int, POINTER(c_int)], 'EXT_import_context') - -GLXContextID = pyglet.gl.glx.GLXContextID -# GL/glxext.h:664 -glXGetContextIDEXT = _link_function('glXGetContextIDEXT', GLXContextID, [GLXContext], 'EXT_import_context') - -# GL/glxext.h:665 -glXImportContextEXT = _link_function('glXImportContextEXT', GLXContext, [POINTER(Display), GLXContextID], 'EXT_import_context') - -# GL/glxext.h:666 -glXFreeContextEXT = _link_function('glXFreeContextEXT', None, [POINTER(Display), GLXContext], 'EXT_import_context') - -PFNGLXGETCURRENTDISPLAYEXTPROC = CFUNCTYPE(POINTER(Display)) # GL/glxext.h:668 -PFNGLXQUERYCONTEXTINFOEXTPROC = CFUNCTYPE(c_int, POINTER(Display), GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:669 -PFNGLXGETCONTEXTIDEXTPROC = CFUNCTYPE(GLXContextID, GLXContext) # GL/glxext.h:670 -PFNGLXIMPORTCONTEXTEXTPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXContextID) # GL/glxext.h:671 -PFNGLXFREECONTEXTEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXContext) # GL/glxext.h:672 -# SGIX_fbconfig (GL/glxext.h:675) -GLX_SGIX_fbconfig = 1 # GL/glxext.h:676 -# GL/glxext.h:678 -glXGetFBConfigAttribSGIX = _link_function('glXGetFBConfigAttribSGIX', c_int, [POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)], 'SGIX_fbconfig') - -# GL/glxext.h:679 -glXChooseFBConfigSGIX = _link_function('glXChooseFBConfigSGIX', POINTER(GLXFBConfigSGIX), [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)], 'SGIX_fbconfig') - -GLXPixmap = pyglet.gl.glx.GLXPixmap -Pixmap = pyglet.libs.x11.xlib.Pixmap -# GL/glxext.h:680 -glXCreateGLXPixmapWithConfigSGIX = _link_function('glXCreateGLXPixmapWithConfigSGIX', GLXPixmap, [POINTER(Display), GLXFBConfigSGIX, Pixmap], 'SGIX_fbconfig') - -# GL/glxext.h:681 -glXCreateContextWithConfigSGIX = _link_function('glXCreateContextWithConfigSGIX', GLXContext, [POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int], 'SGIX_fbconfig') - -XVisualInfo = pyglet.libs.x11.xlib.XVisualInfo -# GL/glxext.h:682 -glXGetVisualFromFBConfigSGIX = _link_function('glXGetVisualFromFBConfigSGIX', POINTER(XVisualInfo), [POINTER(Display), GLXFBConfigSGIX], 'SGIX_fbconfig') - -# GL/glxext.h:683 -glXGetFBConfigFromVisualSGIX = _link_function('glXGetFBConfigFromVisualSGIX', GLXFBConfigSGIX, [POINTER(Display), POINTER(XVisualInfo)], 'SGIX_fbconfig') - -PFNGLXGETFBCONFIGATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)) # GL/glxext.h:685 -PFNGLXCHOOSEFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXFBConfigSGIX), POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)) # GL/glxext.h:686 -PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), GLXFBConfigSGIX, Pixmap) # GL/glxext.h:687 -PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int) # GL/glxext.h:688 -PFNGLXGETVISUALFROMFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(XVisualInfo), POINTER(Display), GLXFBConfigSGIX) # GL/glxext.h:689 -PFNGLXGETFBCONFIGFROMVISUALSGIXPROC = CFUNCTYPE(GLXFBConfigSGIX, POINTER(Display), POINTER(XVisualInfo)) # GL/glxext.h:690 -# SGIX_pbuffer (GL/glxext.h:693) -GLX_SGIX_pbuffer = 1 # GL/glxext.h:694 -# GL/glxext.h:696 -glXCreateGLXPbufferSGIX = _link_function('glXCreateGLXPbufferSGIX', GLXPbufferSGIX, [POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)], 'SGIX_pbuffer') - -# GL/glxext.h:697 -glXDestroyGLXPbufferSGIX = _link_function('glXDestroyGLXPbufferSGIX', None, [POINTER(Display), GLXPbufferSGIX], 'SGIX_pbuffer') - -# GL/glxext.h:698 -glXQueryGLXPbufferSGIX = _link_function('glXQueryGLXPbufferSGIX', c_int, [POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)], 'SGIX_pbuffer') - -# GL/glxext.h:699 -glXSelectEventSGIX = _link_function('glXSelectEventSGIX', None, [POINTER(Display), GLXDrawable, c_ulong], 'SGIX_pbuffer') - -# GL/glxext.h:700 -glXGetSelectedEventSGIX = _link_function('glXGetSelectedEventSGIX', None, [POINTER(Display), GLXDrawable, POINTER(c_ulong)], 'SGIX_pbuffer') - -PFNGLXCREATEGLXPBUFFERSGIXPROC = CFUNCTYPE(GLXPbufferSGIX, POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:702 -PFNGLXDESTROYGLXPBUFFERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXPbufferSGIX) # GL/glxext.h:703 -PFNGLXQUERYGLXPBUFFERSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)) # GL/glxext.h:704 -PFNGLXSELECTEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_ulong) # GL/glxext.h:705 -PFNGLXGETSELECTEDEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, POINTER(c_ulong)) # GL/glxext.h:706 -# SGI_cushion (GL/glxext.h:709) -GLX_SGI_cushion = 1 # GL/glxext.h:710 -Window = pyglet.libs.x11.xlib.Window -# GL/glxext.h:712 -glXCushionSGI = _link_function('glXCushionSGI', None, [POINTER(Display), Window, c_float], 'SGI_cushion') - -PFNGLXCUSHIONSGIPROC = CFUNCTYPE(None, POINTER(Display), Window, c_float) # GL/glxext.h:714 -# SGIX_video_resize (GL/glxext.h:717) -GLX_SGIX_video_resize = 1 # GL/glxext.h:718 -# GL/glxext.h:720 -glXBindChannelToWindowSGIX = _link_function('glXBindChannelToWindowSGIX', c_int, [POINTER(Display), c_int, c_int, Window], 'SGIX_video_resize') - -# GL/glxext.h:721 -glXChannelRectSGIX = _link_function('glXChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int], 'SGIX_video_resize') - -# GL/glxext.h:722 -glXQueryChannelRectSGIX = _link_function('glXQueryChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize') - -# GL/glxext.h:723 -glXQueryChannelDeltasSGIX = _link_function('glXQueryChannelDeltasSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize') - -GLenum = c_uint # /usr/include/GL/gl.h:153 -# GL/glxext.h:724 -glXChannelRectSyncSGIX = _link_function('glXChannelRectSyncSGIX', c_int, [POINTER(Display), c_int, c_int, GLenum], 'SGIX_video_resize') - -PFNGLXBINDCHANNELTOWINDOWSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, Window) # GL/glxext.h:726 -PFNGLXCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int) # GL/glxext.h:727 -PFNGLXQUERYCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:728 -PFNGLXQUERYCHANNELDELTASSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:729 -PFNGLXCHANNELRECTSYNCSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, GLenum) # GL/glxext.h:730 -# SGIX_dmbuffer (GL/glxext.h:733) -GLX_SGIX_dmbuffer = 1 # GL/glxext.h:734 -# SGIX_swap_group (GL/glxext.h:743) -GLX_SGIX_swap_group = 1 # GL/glxext.h:744 -# GL/glxext.h:746 -glXJoinSwapGroupSGIX = _link_function('glXJoinSwapGroupSGIX', None, [POINTER(Display), GLXDrawable, GLXDrawable], 'SGIX_swap_group') - -PFNGLXJOINSWAPGROUPSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, GLXDrawable) # GL/glxext.h:748 -# SGIX_swap_barrier (GL/glxext.h:751) -GLX_SGIX_swap_barrier = 1 # GL/glxext.h:752 -# GL/glxext.h:754 -glXBindSwapBarrierSGIX = _link_function('glXBindSwapBarrierSGIX', None, [POINTER(Display), GLXDrawable, c_int], 'SGIX_swap_barrier') - -# GL/glxext.h:755 -glXQueryMaxSwapBarriersSGIX = _link_function('glXQueryMaxSwapBarriersSGIX', c_int, [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_swap_barrier') - -PFNGLXBINDSWAPBARRIERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:757 -PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:758 -# SUN_get_transparent_index (GL/glxext.h:761) -GLX_SUN_get_transparent_index = 1 # GL/glxext.h:762 -# GL/glxext.h:764 -glXGetTransparentIndexSUN = _link_function('glXGetTransparentIndexSUN', c_int, [POINTER(Display), Window, Window, POINTER(c_long)], 'SUN_get_transparent_index') - -PFNGLXGETTRANSPARENTINDEXSUNPROC = CFUNCTYPE(c_int, POINTER(Display), Window, Window, POINTER(c_long)) # GL/glxext.h:766 -# MESA_copy_sub_buffer (GL/glxext.h:769) -GLX_MESA_copy_sub_buffer = 1 # GL/glxext.h:770 -# GL/glxext.h:772 -glXCopySubBufferMESA = _link_function('glXCopySubBufferMESA', None, [POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int], 'MESA_copy_sub_buffer') - -PFNGLXCOPYSUBBUFFERMESAPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int) # GL/glxext.h:774 -# MESA_pixmap_colormap (GL/glxext.h:777) -GLX_MESA_pixmap_colormap = 1 # GL/glxext.h:778 -Colormap = pyglet.libs.x11.xlib.Colormap -# GL/glxext.h:780 -glXCreateGLXPixmapMESA = _link_function('glXCreateGLXPixmapMESA', GLXPixmap, [POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap], 'MESA_pixmap_colormap') - -PFNGLXCREATEGLXPIXMAPMESAPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap) # GL/glxext.h:782 -# MESA_release_buffers (GL/glxext.h:785) -GLX_MESA_release_buffers = 1 # GL/glxext.h:786 -# GL/glxext.h:788 -glXReleaseBuffersMESA = _link_function('glXReleaseBuffersMESA', c_int, [POINTER(Display), GLXDrawable], 'MESA_release_buffers') - -PFNGLXRELEASEBUFFERSMESAPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable) # GL/glxext.h:790 -# MESA_set_3dfx_mode (GL/glxext.h:793) -GLX_MESA_set_3dfx_mode = 1 # GL/glxext.h:794 -# GL/glxext.h:796 -glXSet3DfxModeMESA = _link_function('glXSet3DfxModeMESA', c_int, [c_int], 'MESA_set_3dfx_mode') - -PFNGLXSET3DFXMODEMESAPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:798 -# SGIX_visual_select_group (GL/glxext.h:801) -GLX_SGIX_visual_select_group = 1 # GL/glxext.h:802 -# OML_swap_method (GL/glxext.h:805) -GLX_OML_swap_method = 1 # GL/glxext.h:806 -# OML_sync_control (GL/glxext.h:809) -GLX_OML_sync_control = 1 # GL/glxext.h:810 -# GL/glxext.h:812 -glXGetSyncValuesOML = _link_function('glXGetSyncValuesOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control') - -# GL/glxext.h:813 -glXGetMscRateOML = _link_function('glXGetMscRateOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)], 'OML_sync_control') - -# GL/glxext.h:814 -glXSwapBuffersMscOML = _link_function('glXSwapBuffersMscOML', c_int64, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64], 'OML_sync_control') - -# GL/glxext.h:815 -glXWaitForMscOML = _link_function('glXWaitForMscOML', c_int, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control') - -# GL/glxext.h:816 -glXWaitForSbcOML = _link_function('glXWaitForSbcOML', c_int, [POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control') - -PFNGLXGETSYNCVALUESOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:818 -PFNGLXGETMSCRATEOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)) # GL/glxext.h:819 -PFNGLXSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(c_int64, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64) # GL/glxext.h:820 -PFNGLXWAITFORMSCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:821 -PFNGLXWAITFORSBCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:822 -# NV_float_buffer (GL/glxext.h:825) -GLX_NV_float_buffer = 1 # GL/glxext.h:826 -# SGIX_hyperpipe (GL/glxext.h:829) -GLX_SGIX_hyperpipe = 1 # GL/glxext.h:830 -class struct_anon_107(Structure): - __slots__ = [ - 'pipeName', - 'networkId', - ] -struct_anon_107._fields_ = [ - ('pipeName', c_char * 80), - ('networkId', c_int), -] - -GLXHyperpipeNetworkSGIX = struct_anon_107 # GL/glxext.h:835 -class struct_anon_108(Structure): - __slots__ = [ - 'pipeName', - 'channel', - 'participationType', - 'timeSlice', - ] -struct_anon_108._fields_ = [ - ('pipeName', c_char * 80), - ('channel', c_int), - ('participationType', c_uint), - ('timeSlice', c_int), -] - -GLXHyperpipeConfigSGIX = struct_anon_108 # GL/glxext.h:843 -class struct_anon_109(Structure): - __slots__ = [ - 'pipeName', - 'srcXOrigin', - 'srcYOrigin', - 'srcWidth', - 'srcHeight', - 'destXOrigin', - 'destYOrigin', - 'destWidth', - 'destHeight', - ] -struct_anon_109._fields_ = [ - ('pipeName', c_char * 80), - ('srcXOrigin', c_int), - ('srcYOrigin', c_int), - ('srcWidth', c_int), - ('srcHeight', c_int), - ('destXOrigin', c_int), - ('destYOrigin', c_int), - ('destWidth', c_int), - ('destHeight', c_int), -] - -GLXPipeRect = struct_anon_109 # GL/glxext.h:849 -class struct_anon_110(Structure): - __slots__ = [ - 'pipeName', - 'XOrigin', - 'YOrigin', - 'maxHeight', - 'maxWidth', - ] -struct_anon_110._fields_ = [ - ('pipeName', c_char * 80), - ('XOrigin', c_int), - ('YOrigin', c_int), - ('maxHeight', c_int), - ('maxWidth', c_int), -] - -GLXPipeRectLimits = struct_anon_110 # GL/glxext.h:854 -# GL/glxext.h:857 -glXQueryHyperpipeNetworkSGIX = _link_function('glXQueryHyperpipeNetworkSGIX', POINTER(GLXHyperpipeNetworkSGIX), [POINTER(Display), POINTER(c_int)], 'SGIX_hyperpipe') - -# GL/glxext.h:858 -glXHyperpipeConfigSGIX = _link_function('glXHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)], 'SGIX_hyperpipe') - -# GL/glxext.h:859 -glXQueryHyperpipeConfigSGIX = _link_function('glXQueryHyperpipeConfigSGIX', POINTER(GLXHyperpipeConfigSGIX), [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_hyperpipe') - -# GL/glxext.h:860 -glXDestroyHyperpipeConfigSGIX = _link_function('glXDestroyHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe') - -# GL/glxext.h:861 -glXBindHyperpipeSGIX = _link_function('glXBindHyperpipeSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe') - -# GL/glxext.h:862 -glXQueryHyperpipeBestAttribSGIX = _link_function('glXQueryHyperpipeBestAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)], 'SGIX_hyperpipe') - -# GL/glxext.h:863 -glXHyperpipeAttribSGIX = _link_function('glXHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe') - -# GL/glxext.h:864 -glXQueryHyperpipeAttribSGIX = _link_function('glXQueryHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe') - -PFNGLXQUERYHYPERPIPENETWORKSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeNetworkSGIX), POINTER(Display), POINTER(c_int)) # GL/glxext.h:866 -PFNGLXHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)) # GL/glxext.h:867 -PFNGLXQUERYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeConfigSGIX), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:868 -PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:869 -PFNGLXBINDHYPERPIPESGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:870 -PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)) # GL/glxext.h:871 -PFNGLXHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:872 -PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:873 -# MESA_agp_offset (GL/glxext.h:876) -GLX_MESA_agp_offset = 1 # GL/glxext.h:877 -# GL/glxext.h:879 -glXGetAGPOffsetMESA = _link_function('glXGetAGPOffsetMESA', c_uint, [POINTER(None)], 'MESA_agp_offset') - -PFNGLXGETAGPOFFSETMESAPROC = CFUNCTYPE(c_uint, POINTER(None)) # GL/glxext.h:881 -# EXT_fbconfig_packed_float (GL/glxext.h:884) -GLX_EXT_fbconfig_packed_float = 1 # GL/glxext.h:885 -# EXT_framebuffer_sRGB (GL/glxext.h:888) -GLX_EXT_framebuffer_sRGB = 1 # GL/glxext.h:889 -# EXT_texture_from_pixmap (GL/glxext.h:892) -GLX_EXT_texture_from_pixmap = 1 # GL/glxext.h:893 -# GL/glxext.h:895 -glXBindTexImageEXT = _link_function('glXBindTexImageEXT', None, [POINTER(Display), GLXDrawable, c_int, POINTER(c_int)], 'EXT_texture_from_pixmap') - -# GL/glxext.h:896 -glXReleaseTexImageEXT = _link_function('glXReleaseTexImageEXT', None, [POINTER(Display), GLXDrawable, c_int], 'EXT_texture_from_pixmap') - -PFNGLXBINDTEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, POINTER(c_int)) # GL/glxext.h:898 -PFNGLXRELEASETEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:899 -# NV_present_video (GL/glxext.h:902) -GLX_NV_present_video = 1 # GL/glxext.h:903 -# GL/glxext.h:905 -glXEnumerateVideoDevicesNV = _link_function('glXEnumerateVideoDevicesNV', POINTER(c_uint), [POINTER(Display), c_int, POINTER(c_int)], 'NV_present_video') - -# GL/glxext.h:906 -glXBindVideoDeviceNV = _link_function('glXBindVideoDeviceNV', c_int, [POINTER(Display), c_uint, c_uint, POINTER(c_int)], 'NV_present_video') - -PFNGLXENUMERATEVIDEODEVICESNVPROC = CFUNCTYPE(POINTER(c_uint), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:908 -PFNGLXBINDVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:909 -# NV_video_output (GL/glxext.h:912) -GLX_NV_video_output = 1 # GL/glxext.h:913 -# GL/glxext.h:915 -glXGetVideoDeviceNV = _link_function('glXGetVideoDeviceNV', c_int, [POINTER(Display), c_int, c_int, POINTER(GLXVideoDeviceNV)], 'NV_video_output') - -# GL/glxext.h:916 -glXReleaseVideoDeviceNV = _link_function('glXReleaseVideoDeviceNV', c_int, [POINTER(Display), c_int, GLXVideoDeviceNV], 'NV_video_output') - -GLXPbuffer = pyglet.gl.glx.GLXPbuffer -# GL/glxext.h:917 -glXBindVideoImageNV = _link_function('glXBindVideoImageNV', c_int, [POINTER(Display), GLXVideoDeviceNV, GLXPbuffer, c_int], 'NV_video_output') - -# GL/glxext.h:918 -glXReleaseVideoImageNV = _link_function('glXReleaseVideoImageNV', c_int, [POINTER(Display), GLXPbuffer], 'NV_video_output') - -GLboolean = c_ubyte # /usr/include/GL/gl.h:154 -# GL/glxext.h:919 -glXSendPbufferToVideoNV = _link_function('glXSendPbufferToVideoNV', c_int, [POINTER(Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean], 'NV_video_output') - -# GL/glxext.h:920 -glXGetVideoInfoNV = _link_function('glXGetVideoInfoNV', c_int, [POINTER(Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)], 'NV_video_output') - -PFNGLXGETVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(GLXVideoDeviceNV)) # GL/glxext.h:922 -PFNGLXRELEASEVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, GLXVideoDeviceNV) # GL/glxext.h:923 -PFNGLXBINDVIDEOIMAGENVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXVideoDeviceNV, GLXPbuffer, c_int) # GL/glxext.h:924 -PFNGLXRELEASEVIDEOIMAGENVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbuffer) # GL/glxext.h:925 -PFNGLXSENDPBUFFERTOVIDEONVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean) # GL/glxext.h:926 -PFNGLXGETVIDEOINFONVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)) # GL/glxext.h:927 -# NV_swap_group (GL/glxext.h:930) -GLX_NV_swap_group = 1 # GL/glxext.h:931 -GLuint = c_uint # /usr/include/GL/gl.h:162 -# GL/glxext.h:933 -glXJoinSwapGroupNV = _link_function('glXJoinSwapGroupNV', c_int, [POINTER(Display), GLXDrawable, GLuint], 'NV_swap_group') - -# GL/glxext.h:934 -glXBindSwapBarrierNV = _link_function('glXBindSwapBarrierNV', c_int, [POINTER(Display), GLuint, GLuint], 'NV_swap_group') - -# GL/glxext.h:935 -glXQuerySwapGroupNV = _link_function('glXQuerySwapGroupNV', c_int, [POINTER(Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group') - -# GL/glxext.h:936 -glXQueryMaxSwapGroupsNV = _link_function('glXQueryMaxSwapGroupsNV', c_int, [POINTER(Display), c_int, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group') - -# GL/glxext.h:937 -glXQueryFrameCountNV = _link_function('glXQueryFrameCountNV', c_int, [POINTER(Display), c_int, POINTER(GLuint)], 'NV_swap_group') - -# GL/glxext.h:938 -glXResetFrameCountNV = _link_function('glXResetFrameCountNV', c_int, [POINTER(Display), c_int], 'NV_swap_group') - -PFNGLXJOINSWAPGROUPNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, GLuint) # GL/glxext.h:940 -PFNGLXBINDSWAPBARRIERNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLuint, GLuint) # GL/glxext.h:941 -PFNGLXQUERYSWAPGROUPNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:942 -PFNGLXQUERYMAXSWAPGROUPSNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:943 -PFNGLXQUERYFRAMECOUNTNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(GLuint)) # GL/glxext.h:944 -PFNGLXRESETFRAMECOUNTNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:945 -# NV_video_capture (GL/glxext.h:948) -GLX_NV_video_capture = 1 # GL/glxext.h:949 -# GL/glxext.h:951 -glXBindVideoCaptureDeviceNV = _link_function('glXBindVideoCaptureDeviceNV', c_int, [POINTER(Display), c_uint, GLXVideoCaptureDeviceNV], 'NV_video_capture') - -# GL/glxext.h:952 -glXEnumerateVideoCaptureDevicesNV = _link_function('glXEnumerateVideoCaptureDevicesNV', POINTER(GLXVideoCaptureDeviceNV), [POINTER(Display), c_int, POINTER(c_int)], 'NV_video_capture') - -# GL/glxext.h:953 -glXLockVideoCaptureDeviceNV = _link_function('glXLockVideoCaptureDeviceNV', None, [POINTER(Display), GLXVideoCaptureDeviceNV], 'NV_video_capture') - -# GL/glxext.h:954 -glXQueryVideoCaptureDeviceNV = _link_function('glXQueryVideoCaptureDeviceNV', c_int, [POINTER(Display), GLXVideoCaptureDeviceNV, c_int, POINTER(c_int)], 'NV_video_capture') - -# GL/glxext.h:955 -glXReleaseVideoCaptureDeviceNV = _link_function('glXReleaseVideoCaptureDeviceNV', None, [POINTER(Display), GLXVideoCaptureDeviceNV], 'NV_video_capture') - -PFNGLXBINDVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_uint, GLXVideoCaptureDeviceNV) # GL/glxext.h:957 -PFNGLXENUMERATEVIDEOCAPTUREDEVICESNVPROC = CFUNCTYPE(POINTER(GLXVideoCaptureDeviceNV), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:958 -PFNGLXLOCKVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(None, POINTER(Display), GLXVideoCaptureDeviceNV) # GL/glxext.h:959 -PFNGLXQUERYVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXVideoCaptureDeviceNV, c_int, POINTER(c_int)) # GL/glxext.h:960 -PFNGLXRELEASEVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(None, POINTER(Display), GLXVideoCaptureDeviceNV) # GL/glxext.h:961 -# EXT_swap_control (GL/glxext.h:964) -GLX_EXT_swap_control = 1 # GL/glxext.h:965 -# GL/glxext.h:967 -glXSwapIntervalEXT = _link_function('glXSwapIntervalEXT', c_int, [POINTER(Display), GLXDrawable, c_int], 'EXT_swap_control') - -PFNGLXSWAPINTERVALEXTPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:969 -# NV_copy_image (GL/glxext.h:972) -GLX_NV_copy_image = 1 # GL/glxext.h:973 -GLint = c_int # /usr/include/GL/gl.h:159 -GLsizei = c_int # /usr/include/GL/gl.h:163 -# GL/glxext.h:975 -glXCopyImageSubDataNV = _link_function('glXCopyImageSubDataNV', None, [POINTER(Display), GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], 'NV_copy_image') - -PFNGLXCOPYIMAGESUBDATANVPROC = CFUNCTYPE(None, POINTER(Display), GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei) # GL/glxext.h:977 -# INTEL_swap_event (GL/glxext.h:980) -GLX_INTEL_swap_event = 1 # GL/glxext.h:981 -# NV_multisample_coverage (GL/glxext.h:984) -GLX_NV_multisample_coverage = 1 # GL/glxext.h:985 -# NV_vertex_array_range (/usr/include/GL/glx.h:349) -# MESA_allocate_memory (/usr/include/GL/glx.h:363) -# ARB_render_texture (/usr/include/GL/glx.h:380) -# NV_float_buffer (/usr/include/GL/glx.h:393) -# MESA_swap_frame_usage (/usr/include/GL/glx.h:405) -# MESA_swap_control (/usr/include/GL/glx.h:425) -# EXT_texture_from_pixmap (/usr/include/GL/glx.h:442) - -__all__ = ['GLX_GLXEXT_VERSION', 'GLX_SAMPLE_BUFFERS_ARB', 'GLX_SAMPLES_ARB', -'GLX_CONTEXT_ALLOW_BUFFER_BYTE_ORDER_MISMATCH_ARB', 'GLX_RGBA_FLOAT_TYPE_ARB', -'GLX_RGBA_FLOAT_BIT_ARB', 'GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB', -'GLX_CONTEXT_DEBUG_BIT_ARB', 'GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB', -'GLX_CONTEXT_MAJOR_VERSION_ARB', 'GLX_CONTEXT_MINOR_VERSION_ARB', -'GLX_CONTEXT_FLAGS_ARB', 'GLX_CONTEXT_CORE_PROFILE_BIT_ARB', -'GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB', 'GLX_CONTEXT_PROFILE_MASK_ARB', -'GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB', 'GLX_LOSE_CONTEXT_ON_RESET_ARB', -'GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB', -'GLX_NO_RESET_NOTIFICATION_ARB', 'GLX_SAMPLE_BUFFERS_SGIS', -'GLX_SAMPLES_SGIS', 'GLX_X_VISUAL_TYPE_EXT', 'GLX_TRANSPARENT_TYPE_EXT', -'GLX_TRANSPARENT_INDEX_VALUE_EXT', 'GLX_TRANSPARENT_RED_VALUE_EXT', -'GLX_TRANSPARENT_GREEN_VALUE_EXT', 'GLX_TRANSPARENT_BLUE_VALUE_EXT', -'GLX_TRANSPARENT_ALPHA_VALUE_EXT', 'GLX_NONE_EXT', 'GLX_TRUE_COLOR_EXT', -'GLX_DIRECT_COLOR_EXT', 'GLX_PSEUDO_COLOR_EXT', 'GLX_STATIC_COLOR_EXT', -'GLX_GRAY_SCALE_EXT', 'GLX_STATIC_GRAY_EXT', 'GLX_TRANSPARENT_RGB_EXT', -'GLX_TRANSPARENT_INDEX_EXT', 'GLX_VISUAL_CAVEAT_EXT', 'GLX_SLOW_VISUAL_EXT', -'GLX_NON_CONFORMANT_VISUAL_EXT', 'GLX_SHARE_CONTEXT_EXT', 'GLX_VISUAL_ID_EXT', -'GLX_SCREEN_EXT', 'GLX_WINDOW_BIT_SGIX', 'GLX_PIXMAP_BIT_SGIX', -'GLX_RGBA_BIT_SGIX', 'GLX_COLOR_INDEX_BIT_SGIX', 'GLX_DRAWABLE_TYPE_SGIX', -'GLX_RENDER_TYPE_SGIX', 'GLX_X_RENDERABLE_SGIX', 'GLX_FBCONFIG_ID_SGIX', -'GLX_RGBA_TYPE_SGIX', 'GLX_COLOR_INDEX_TYPE_SGIX', 'GLX_PBUFFER_BIT_SGIX', -'GLX_BUFFER_CLOBBER_MASK_SGIX', 'GLX_FRONT_LEFT_BUFFER_BIT_SGIX', -'GLX_FRONT_RIGHT_BUFFER_BIT_SGIX', 'GLX_BACK_LEFT_BUFFER_BIT_SGIX', -'GLX_BACK_RIGHT_BUFFER_BIT_SGIX', 'GLX_AUX_BUFFERS_BIT_SGIX', -'GLX_DEPTH_BUFFER_BIT_SGIX', 'GLX_STENCIL_BUFFER_BIT_SGIX', -'GLX_ACCUM_BUFFER_BIT_SGIX', 'GLX_SAMPLE_BUFFERS_BIT_SGIX', -'GLX_MAX_PBUFFER_WIDTH_SGIX', 'GLX_MAX_PBUFFER_HEIGHT_SGIX', -'GLX_MAX_PBUFFER_PIXELS_SGIX', 'GLX_OPTIMAL_PBUFFER_WIDTH_SGIX', -'GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX', 'GLX_PRESERVED_CONTENTS_SGIX', -'GLX_LARGEST_PBUFFER_SGIX', 'GLX_WIDTH_SGIX', 'GLX_HEIGHT_SGIX', -'GLX_EVENT_MASK_SGIX', 'GLX_DAMAGED_SGIX', 'GLX_SAVED_SGIX', -'GLX_WINDOW_SGIX', 'GLX_PBUFFER_SGIX', 'GLX_SYNC_FRAME_SGIX', -'GLX_SYNC_SWAP_SGIX', 'GLX_DIGITAL_MEDIA_PBUFFER_SGIX', -'GLX_BLENDED_RGBA_SGIS', 'GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS', -'GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS', 'GLX_SAMPLE_BUFFERS_3DFX', -'GLX_SAMPLES_3DFX', 'GLX_3DFX_WINDOW_MODE_MESA', -'GLX_3DFX_FULLSCREEN_MODE_MESA', 'GLX_VISUAL_SELECT_GROUP_SGIX', -'GLX_SWAP_METHOD_OML', 'GLX_SWAP_EXCHANGE_OML', 'GLX_SWAP_COPY_OML', -'GLX_SWAP_UNDEFINED_OML', 'GLX_FLOAT_COMPONENTS_NV', -'GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX', 'GLX_BAD_HYPERPIPE_CONFIG_SGIX', -'GLX_BAD_HYPERPIPE_SGIX', 'GLX_HYPERPIPE_DISPLAY_PIPE_SGIX', -'GLX_HYPERPIPE_RENDER_PIPE_SGIX', 'GLX_PIPE_RECT_SGIX', -'GLX_PIPE_RECT_LIMITS_SGIX', 'GLX_HYPERPIPE_STEREO_SGIX', -'GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX', 'GLX_HYPERPIPE_ID_SGIX', -'GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT', 'GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT', -'GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT', 'GLX_TEXTURE_1D_BIT_EXT', -'GLX_TEXTURE_2D_BIT_EXT', 'GLX_TEXTURE_RECTANGLE_BIT_EXT', -'GLX_BIND_TO_TEXTURE_RGB_EXT', 'GLX_BIND_TO_TEXTURE_RGBA_EXT', -'GLX_BIND_TO_MIPMAP_TEXTURE_EXT', 'GLX_BIND_TO_TEXTURE_TARGETS_EXT', -'GLX_Y_INVERTED_EXT', 'GLX_TEXTURE_FORMAT_EXT', 'GLX_TEXTURE_TARGET_EXT', -'GLX_MIPMAP_TEXTURE_EXT', 'GLX_TEXTURE_FORMAT_NONE_EXT', -'GLX_TEXTURE_FORMAT_RGB_EXT', 'GLX_TEXTURE_FORMAT_RGBA_EXT', -'GLX_TEXTURE_1D_EXT', 'GLX_TEXTURE_2D_EXT', 'GLX_TEXTURE_RECTANGLE_EXT', -'GLX_FRONT_LEFT_EXT', 'GLX_FRONT_RIGHT_EXT', 'GLX_BACK_LEFT_EXT', -'GLX_BACK_RIGHT_EXT', 'GLX_FRONT_EXT', 'GLX_BACK_EXT', 'GLX_AUX0_EXT', -'GLX_AUX1_EXT', 'GLX_AUX2_EXT', 'GLX_AUX3_EXT', 'GLX_AUX4_EXT', -'GLX_AUX5_EXT', 'GLX_AUX6_EXT', 'GLX_AUX7_EXT', 'GLX_AUX8_EXT', -'GLX_AUX9_EXT', 'GLX_NUM_VIDEO_SLOTS_NV', 'GLX_VIDEO_OUT_COLOR_NV', -'GLX_VIDEO_OUT_ALPHA_NV', 'GLX_VIDEO_OUT_DEPTH_NV', -'GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV', 'GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV', -'GLX_VIDEO_OUT_FRAME_NV', 'GLX_VIDEO_OUT_FIELD_1_NV', -'GLX_VIDEO_OUT_FIELD_2_NV', 'GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV', -'GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV', 'GLX_DEVICE_ID_NV', 'GLX_UNIQUE_ID_NV', -'GLX_NUM_VIDEO_CAPTURE_SLOTS_NV', 'GLX_SWAP_INTERVAL_EXT', -'GLX_MAX_SWAP_INTERVAL_EXT', 'GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK', -'GLX_EXCHANGE_COMPLETE_INTEL', 'GLX_COPY_COMPLETE_INTEL', -'GLX_FLIP_COMPLETE_INTEL', 'GLX_COVERAGE_SAMPLES_NV', 'GLX_COLOR_SAMPLES_NV', -'GLX_GPU_VENDOR_AMD', 'GLX_GPU_RENDERER_STRING_AMD', -'GLX_GPU_OPENGL_VERSION_STRING_AMD', 'GLX_GPU_FASTEST_TARGET_GPUS_AMD', -'GLX_GPU_RAM_AMD', 'GLX_GPU_CLOCK_AMD', 'GLX_GPU_NUM_PIPES_AMD', -'GLX_GPU_NUM_SIMD_AMD', 'GLX_GPU_NUM_RB_AMD', 'GLX_GPU_NUM_SPI_AMD', -'GLX_CONTEXT_ES2_PROFILE_BIT_EXT', 'GLXVideoSourceSGIX', 'GLXFBConfigIDSGIX', -'GLXFBConfigSGIX', 'GLXPbufferSGIX', 'GLXBufferClobberEventSGIX', -'GLXVideoDeviceNV', 'GLXVideoCaptureDeviceNV', 'GLX_ARB_multisample', -'GLX_ARB_fbconfig_float', 'GLX_ARB_framebuffer_sRGB', -'GLX_ARB_create_context', 'glXCreateContextAttribsARB', -'PFNGLXCREATECONTEXTATTRIBSARBPROC', 'GLX_ARB_create_context_profile', -'GLX_ARB_create_context_robustness', 'GLX_SGIS_multisample', -'GLX_EXT_visual_info', 'GLX_SGI_swap_control', 'glXSwapIntervalSGI', -'PFNGLXSWAPINTERVALSGIPROC', 'GLX_SGI_video_sync', 'glXGetVideoSyncSGI', -'glXWaitVideoSyncSGI', 'PFNGLXGETVIDEOSYNCSGIPROC', -'PFNGLXWAITVIDEOSYNCSGIPROC', 'GLX_SGI_make_current_read', -'glXMakeCurrentReadSGI', 'glXGetCurrentReadDrawableSGI', -'PFNGLXMAKECURRENTREADSGIPROC', 'PFNGLXGETCURRENTREADDRAWABLESGIPROC', -'GLX_SGIX_video_source', 'GLX_EXT_visual_rating', 'GLX_EXT_import_context', -'glXGetCurrentDisplayEXT', 'glXQueryContextInfoEXT', 'glXGetContextIDEXT', -'glXImportContextEXT', 'glXFreeContextEXT', 'PFNGLXGETCURRENTDISPLAYEXTPROC', -'PFNGLXQUERYCONTEXTINFOEXTPROC', 'PFNGLXGETCONTEXTIDEXTPROC', -'PFNGLXIMPORTCONTEXTEXTPROC', 'PFNGLXFREECONTEXTEXTPROC', 'GLX_SGIX_fbconfig', -'glXGetFBConfigAttribSGIX', 'glXChooseFBConfigSGIX', -'glXCreateGLXPixmapWithConfigSGIX', 'glXCreateContextWithConfigSGIX', -'glXGetVisualFromFBConfigSGIX', 'glXGetFBConfigFromVisualSGIX', -'PFNGLXGETFBCONFIGATTRIBSGIXPROC', 'PFNGLXCHOOSEFBCONFIGSGIXPROC', -'PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC', -'PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC', -'PFNGLXGETVISUALFROMFBCONFIGSGIXPROC', 'PFNGLXGETFBCONFIGFROMVISUALSGIXPROC', -'GLX_SGIX_pbuffer', 'glXCreateGLXPbufferSGIX', 'glXDestroyGLXPbufferSGIX', -'glXQueryGLXPbufferSGIX', 'glXSelectEventSGIX', 'glXGetSelectedEventSGIX', -'PFNGLXCREATEGLXPBUFFERSGIXPROC', 'PFNGLXDESTROYGLXPBUFFERSGIXPROC', -'PFNGLXQUERYGLXPBUFFERSGIXPROC', 'PFNGLXSELECTEVENTSGIXPROC', -'PFNGLXGETSELECTEDEVENTSGIXPROC', 'GLX_SGI_cushion', 'glXCushionSGI', -'PFNGLXCUSHIONSGIPROC', 'GLX_SGIX_video_resize', 'glXBindChannelToWindowSGIX', -'glXChannelRectSGIX', 'glXQueryChannelRectSGIX', 'glXQueryChannelDeltasSGIX', -'glXChannelRectSyncSGIX', 'PFNGLXBINDCHANNELTOWINDOWSGIXPROC', -'PFNGLXCHANNELRECTSGIXPROC', 'PFNGLXQUERYCHANNELRECTSGIXPROC', -'PFNGLXQUERYCHANNELDELTASSGIXPROC', 'PFNGLXCHANNELRECTSYNCSGIXPROC', -'GLX_SGIX_dmbuffer', 'GLX_SGIX_swap_group', 'glXJoinSwapGroupSGIX', -'PFNGLXJOINSWAPGROUPSGIXPROC', 'GLX_SGIX_swap_barrier', -'glXBindSwapBarrierSGIX', 'glXQueryMaxSwapBarriersSGIX', -'PFNGLXBINDSWAPBARRIERSGIXPROC', 'PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC', -'GLX_SUN_get_transparent_index', 'glXGetTransparentIndexSUN', -'PFNGLXGETTRANSPARENTINDEXSUNPROC', 'GLX_MESA_copy_sub_buffer', -'glXCopySubBufferMESA', 'PFNGLXCOPYSUBBUFFERMESAPROC', -'GLX_MESA_pixmap_colormap', 'glXCreateGLXPixmapMESA', -'PFNGLXCREATEGLXPIXMAPMESAPROC', 'GLX_MESA_release_buffers', -'glXReleaseBuffersMESA', 'PFNGLXRELEASEBUFFERSMESAPROC', -'GLX_MESA_set_3dfx_mode', 'glXSet3DfxModeMESA', 'PFNGLXSET3DFXMODEMESAPROC', -'GLX_SGIX_visual_select_group', 'GLX_OML_swap_method', 'GLX_OML_sync_control', -'glXGetSyncValuesOML', 'glXGetMscRateOML', 'glXSwapBuffersMscOML', -'glXWaitForMscOML', 'glXWaitForSbcOML', 'PFNGLXGETSYNCVALUESOMLPROC', -'PFNGLXGETMSCRATEOMLPROC', 'PFNGLXSWAPBUFFERSMSCOMLPROC', -'PFNGLXWAITFORMSCOMLPROC', 'PFNGLXWAITFORSBCOMLPROC', 'GLX_NV_float_buffer', -'GLX_SGIX_hyperpipe', 'GLXHyperpipeNetworkSGIX', 'GLXHyperpipeConfigSGIX', -'GLXPipeRect', 'GLXPipeRectLimits', 'glXQueryHyperpipeNetworkSGIX', -'glXHyperpipeConfigSGIX', 'glXQueryHyperpipeConfigSGIX', -'glXDestroyHyperpipeConfigSGIX', 'glXBindHyperpipeSGIX', -'glXQueryHyperpipeBestAttribSGIX', 'glXHyperpipeAttribSGIX', -'glXQueryHyperpipeAttribSGIX', 'PFNGLXQUERYHYPERPIPENETWORKSGIXPROC', -'PFNGLXHYPERPIPECONFIGSGIXPROC', 'PFNGLXQUERYHYPERPIPECONFIGSGIXPROC', -'PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC', 'PFNGLXBINDHYPERPIPESGIXPROC', -'PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC', 'PFNGLXHYPERPIPEATTRIBSGIXPROC', -'PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC', 'GLX_MESA_agp_offset', -'glXGetAGPOffsetMESA', 'PFNGLXGETAGPOFFSETMESAPROC', -'GLX_EXT_fbconfig_packed_float', 'GLX_EXT_framebuffer_sRGB', -'GLX_EXT_texture_from_pixmap', 'glXBindTexImageEXT', 'glXReleaseTexImageEXT', -'PFNGLXBINDTEXIMAGEEXTPROC', 'PFNGLXRELEASETEXIMAGEEXTPROC', -'GLX_NV_present_video', 'glXEnumerateVideoDevicesNV', 'glXBindVideoDeviceNV', -'PFNGLXENUMERATEVIDEODEVICESNVPROC', 'PFNGLXBINDVIDEODEVICENVPROC', -'GLX_NV_video_output', 'glXGetVideoDeviceNV', 'glXReleaseVideoDeviceNV', -'glXBindVideoImageNV', 'glXReleaseVideoImageNV', 'glXSendPbufferToVideoNV', -'glXGetVideoInfoNV', 'PFNGLXGETVIDEODEVICENVPROC', -'PFNGLXRELEASEVIDEODEVICENVPROC', 'PFNGLXBINDVIDEOIMAGENVPROC', -'PFNGLXRELEASEVIDEOIMAGENVPROC', 'PFNGLXSENDPBUFFERTOVIDEONVPROC', -'PFNGLXGETVIDEOINFONVPROC', 'GLX_NV_swap_group', 'glXJoinSwapGroupNV', -'glXBindSwapBarrierNV', 'glXQuerySwapGroupNV', 'glXQueryMaxSwapGroupsNV', -'glXQueryFrameCountNV', 'glXResetFrameCountNV', 'PFNGLXJOINSWAPGROUPNVPROC', -'PFNGLXBINDSWAPBARRIERNVPROC', 'PFNGLXQUERYSWAPGROUPNVPROC', -'PFNGLXQUERYMAXSWAPGROUPSNVPROC', 'PFNGLXQUERYFRAMECOUNTNVPROC', -'PFNGLXRESETFRAMECOUNTNVPROC', 'GLX_NV_video_capture', -'glXBindVideoCaptureDeviceNV', 'glXEnumerateVideoCaptureDevicesNV', -'glXLockVideoCaptureDeviceNV', 'glXQueryVideoCaptureDeviceNV', -'glXReleaseVideoCaptureDeviceNV', 'PFNGLXBINDVIDEOCAPTUREDEVICENVPROC', -'PFNGLXENUMERATEVIDEOCAPTUREDEVICESNVPROC', -'PFNGLXLOCKVIDEOCAPTUREDEVICENVPROC', 'PFNGLXQUERYVIDEOCAPTUREDEVICENVPROC', -'PFNGLXRELEASEVIDEOCAPTUREDEVICENVPROC', 'GLX_EXT_swap_control', -'glXSwapIntervalEXT', 'PFNGLXSWAPINTERVALEXTPROC', 'GLX_NV_copy_image', -'glXCopyImageSubDataNV', 'PFNGLXCOPYIMAGESUBDATANVPROC', -'GLX_INTEL_swap_event', 'GLX_NV_multisample_coverage'] -# END GENERATED CONTENT (do not edit above this line) - - - - - - - - diff --git a/spaces/achterbrain/Intel-Generative-Image-Dashboard/README.md b/spaces/achterbrain/Intel-Generative-Image-Dashboard/README.md deleted file mode 100644 index cf701f8ad1ed141602332f2f12b90934912cad21..0000000000000000000000000000000000000000 --- a/spaces/achterbrain/Intel-Generative-Image-Dashboard/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Intel Generative Image Dashboard -emoji: 🧑‍🎨 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.15.2 -app_file: Dashboard.py -pinned: false -license: mit ---- - -We provide a version of local hosting with a customization guide on https://github.com/8erberg/Intel-Generative-Image-Dashboard-experimental diff --git a/spaces/adirik/stylemc-demo/encoder4editing/scripts/train.py b/spaces/adirik/stylemc-demo/encoder4editing/scripts/train.py deleted file mode 100644 index d885cfde49a0b21140e663e475918698d5e51ee3..0000000000000000000000000000000000000000 --- a/spaces/adirik/stylemc-demo/encoder4editing/scripts/train.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -This file runs the main training/val loop -""" -import os -import json -import math -import sys -import pprint -import torch -from argparse import Namespace - -sys.path.append(".") -sys.path.append("..") - -from options.train_options import TrainOptions -from training.coach import Coach - - -def main(): - opts = TrainOptions().parse() - previous_train_ckpt = None - if opts.resume_training_from_ckpt: - opts, previous_train_ckpt = load_train_checkpoint(opts) - else: - setup_progressive_steps(opts) - create_initial_experiment_dir(opts) - - coach = Coach(opts, previous_train_ckpt) - coach.train() - - -def load_train_checkpoint(opts): - train_ckpt_path = opts.resume_training_from_ckpt - previous_train_ckpt = torch.load(opts.resume_training_from_ckpt, map_location='cpu') - new_opts_dict = vars(opts) - opts = previous_train_ckpt['opts'] - opts['resume_training_from_ckpt'] = train_ckpt_path - update_new_configs(opts, new_opts_dict) - pprint.pprint(opts) - opts = Namespace(**opts) - if opts.sub_exp_dir is not None: - sub_exp_dir = opts.sub_exp_dir - opts.exp_dir = os.path.join(opts.exp_dir, sub_exp_dir) - create_initial_experiment_dir(opts) - return opts, previous_train_ckpt - - -def setup_progressive_steps(opts): - log_size = int(math.log(opts.stylegan_size, 2)) - num_style_layers = 2*log_size - 2 - num_deltas = num_style_layers - 1 - if opts.progressive_start is not None: # If progressive delta training - opts.progressive_steps = [0] - next_progressive_step = opts.progressive_start - for i in range(num_deltas): - opts.progressive_steps.append(next_progressive_step) - next_progressive_step += opts.progressive_step_every - - assert opts.progressive_steps is None or is_valid_progressive_steps(opts, num_style_layers), \ - "Invalid progressive training input" - - -def is_valid_progressive_steps(opts, num_style_layers): - return len(opts.progressive_steps) == num_style_layers and opts.progressive_steps[0] == 0 - - -def create_initial_experiment_dir(opts): - if os.path.exists(opts.exp_dir): - raise Exception('Oops... {} already exists'.format(opts.exp_dir)) - os.makedirs(opts.exp_dir) - - opts_dict = vars(opts) - pprint.pprint(opts_dict) - with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f: - json.dump(opts_dict, f, indent=4, sort_keys=True) - - -def update_new_configs(ckpt_opts, new_opts): - for k, v in new_opts.items(): - if k not in ckpt_opts: - ckpt_opts[k] = v - if new_opts['update_param_list']: - for param in new_opts['update_param_list']: - ckpt_opts[param] = new_opts[param] - - -if __name__ == '__main__': - main() diff --git a/spaces/ahuss/pet/README.md b/spaces/ahuss/pet/README.md deleted file mode 100644 index f2ed36e2c8fe709b80f7ad62e9fb624435b888aa..0000000000000000000000000000000000000000 --- a/spaces/ahuss/pet/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pet -emoji: 💻 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/aijack/jojo/e4e/criteria/moco_loss.py b/spaces/aijack/jojo/e4e/criteria/moco_loss.py deleted file mode 100644 index 8fb13fbd426202cff9014c876c85b0d5c4ec6a9d..0000000000000000000000000000000000000000 --- a/spaces/aijack/jojo/e4e/criteria/moco_loss.py +++ /dev/null @@ -1,71 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from configs.paths_config import model_paths - - -class MocoLoss(nn.Module): - - def __init__(self, opts): - super(MocoLoss, self).__init__() - print("Loading MOCO model from path: {}".format(model_paths["moco"])) - self.model = self.__load_model() - self.model.eval() - for param in self.model.parameters(): - param.requires_grad = False - - @staticmethod - def __load_model(): - import torchvision.models as models - model = models.__dict__["resnet50"]() - # freeze all layers but the last fc - for name, param in model.named_parameters(): - if name not in ['fc.weight', 'fc.bias']: - param.requires_grad = False - checkpoint = torch.load(model_paths['moco'], map_location="cpu") - state_dict = checkpoint['state_dict'] - # rename moco pre-trained keys - for k in list(state_dict.keys()): - # retain only encoder_q up to before the embedding layer - if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'): - # remove prefix - state_dict[k[len("module.encoder_q."):]] = state_dict[k] - # delete renamed or unused k - del state_dict[k] - msg = model.load_state_dict(state_dict, strict=False) - assert set(msg.missing_keys) == {"fc.weight", "fc.bias"} - # remove output layer - model = nn.Sequential(*list(model.children())[:-1]).cuda() - return model - - def extract_feats(self, x): - x = F.interpolate(x, size=224) - x_feats = self.model(x) - x_feats = nn.functional.normalize(x_feats, dim=1) - x_feats = x_feats.squeeze() - return x_feats - - def forward(self, y_hat, y, x): - n_samples = x.shape[0] - x_feats = self.extract_feats(x) - y_feats = self.extract_feats(y) - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - sim_improvement = 0 - sim_logs = [] - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - diff_input = y_hat_feats[i].dot(x_feats[i]) - diff_views = y_feats[i].dot(x_feats[i]) - sim_logs.append({'diff_target': float(diff_target), - 'diff_input': float(diff_input), - 'diff_views': float(diff_views)}) - loss += 1 - diff_target - sim_diff = float(diff_target) - float(diff_views) - sim_improvement += sim_diff - count += 1 - - return loss / count, sim_improvement / count, sim_logs diff --git a/spaces/akhaliq/paint-by-example/README.md b/spaces/akhaliq/paint-by-example/README.md deleted file mode 100644 index 593ac11b4df4495c4df46879540a2fece3d3418e..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/paint-by-example/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Paint by example -emoji: 🔥 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -duplicated_from: runwayml/stable-diffusion-inpainting ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_codes.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_codes.py deleted file mode 100644 index 1f2877bb2bd520253502b1c05bb811bb0d7ef64c..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_emoji_codes.py +++ /dev/null @@ -1,3610 +0,0 @@ -EMOJI = { - "1st_place_medal": "🥇", - "2nd_place_medal": "🥈", - "3rd_place_medal": "🥉", - "ab_button_(blood_type)": "🆎", - "atm_sign": "🏧", - "a_button_(blood_type)": "🅰", - "afghanistan": "🇦🇫", - "albania": "🇦🇱", - "algeria": "🇩🇿", - "american_samoa": "🇦🇸", - "andorra": "🇦🇩", - "angola": "🇦🇴", - "anguilla": "🇦🇮", - "antarctica": "🇦🇶", - "antigua_&_barbuda": "🇦🇬", - "aquarius": "♒", - "argentina": "🇦🇷", - "aries": "♈", - "armenia": "🇦🇲", - "aruba": "🇦🇼", - "ascension_island": "🇦🇨", - "australia": "🇦🇺", - "austria": "🇦🇹", - "azerbaijan": "🇦🇿", - "back_arrow": "🔙", - "b_button_(blood_type)": "🅱", - "bahamas": "🇧🇸", - "bahrain": "🇧🇭", - "bangladesh": "🇧🇩", - "barbados": "🇧🇧", - "belarus": "🇧🇾", - "belgium": "🇧🇪", - "belize": "🇧🇿", - "benin": "🇧🇯", - "bermuda": "🇧🇲", - "bhutan": "🇧🇹", - "bolivia": "🇧🇴", - "bosnia_&_herzegovina": "🇧🇦", - "botswana": "🇧🇼", - "bouvet_island": "🇧🇻", - "brazil": "🇧🇷", - "british_indian_ocean_territory": "🇮🇴", - "british_virgin_islands": "🇻🇬", - "brunei": "🇧🇳", - "bulgaria": "🇧🇬", - "burkina_faso": "🇧🇫", - "burundi": "🇧🇮", - "cl_button": "🆑", - "cool_button": "🆒", - "cambodia": "🇰🇭", - "cameroon": "🇨🇲", - "canada": "🇨🇦", - "canary_islands": "🇮🇨", - "cancer": "♋", - "cape_verde": "🇨🇻", - "capricorn": "♑", - "caribbean_netherlands": "🇧🇶", - "cayman_islands": "🇰🇾", - "central_african_republic": "🇨🇫", - "ceuta_&_melilla": "🇪🇦", - "chad": "🇹🇩", - "chile": "🇨🇱", - "china": "🇨🇳", - "christmas_island": "🇨🇽", - "christmas_tree": "🎄", - "clipperton_island": "🇨🇵", - "cocos_(keeling)_islands": "🇨🇨", - "colombia": "🇨🇴", - "comoros": "🇰🇲", - "congo_-_brazzaville": "🇨🇬", - "congo_-_kinshasa": "🇨🇩", - "cook_islands": "🇨🇰", - "costa_rica": "🇨🇷", - "croatia": "🇭🇷", - "cuba": "🇨🇺", - "curaçao": "🇨🇼", - "cyprus": "🇨🇾", - "czechia": "🇨🇿", - "côte_d’ivoire": "🇨🇮", - "denmark": "🇩🇰", - "diego_garcia": "🇩🇬", - "djibouti": "🇩🇯", - "dominica": "🇩🇲", - "dominican_republic": "🇩🇴", - "end_arrow": "🔚", - "ecuador": "🇪🇨", - "egypt": "🇪🇬", - "el_salvador": "🇸🇻", - "england": "🏴\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f", - "equatorial_guinea": "🇬🇶", - "eritrea": "🇪🇷", - "estonia": "🇪🇪", - "ethiopia": "🇪🇹", - "european_union": "🇪🇺", - "free_button": "🆓", - "falkland_islands": "🇫🇰", - "faroe_islands": "🇫🇴", - "fiji": "🇫🇯", - "finland": "🇫🇮", - "france": "🇫🇷", - "french_guiana": "🇬🇫", - "french_polynesia": "🇵🇫", - "french_southern_territories": "🇹🇫", - "gabon": "🇬🇦", - "gambia": "🇬🇲", - "gemini": "♊", - "georgia": "🇬🇪", - "germany": "🇩🇪", - "ghana": "🇬🇭", - "gibraltar": "🇬🇮", - "greece": "🇬🇷", - "greenland": "🇬🇱", - "grenada": "🇬🇩", - "guadeloupe": "🇬🇵", - "guam": "🇬🇺", - "guatemala": "🇬🇹", - "guernsey": "🇬🇬", - "guinea": "🇬🇳", - "guinea-bissau": "🇬🇼", - "guyana": "🇬🇾", - "haiti": "🇭🇹", - "heard_&_mcdonald_islands": "🇭🇲", - "honduras": "🇭🇳", - "hong_kong_sar_china": "🇭🇰", - "hungary": "🇭🇺", - "id_button": "🆔", - "iceland": "🇮🇸", - "india": "🇮🇳", - "indonesia": "🇮🇩", - "iran": "🇮🇷", - "iraq": "🇮🇶", - "ireland": "🇮🇪", - "isle_of_man": "🇮🇲", - "israel": "🇮🇱", - "italy": "🇮🇹", - "jamaica": "🇯🇲", - "japan": "🗾", - "japanese_acceptable_button": "🉑", - "japanese_application_button": "🈸", - "japanese_bargain_button": "🉐", - "japanese_castle": "🏯", - "japanese_congratulations_button": "㊗", - "japanese_discount_button": "🈹", - "japanese_dolls": "🎎", - "japanese_free_of_charge_button": "🈚", - "japanese_here_button": "🈁", - "japanese_monthly_amount_button": "🈷", - "japanese_no_vacancy_button": "🈵", - "japanese_not_free_of_charge_button": "🈶", - "japanese_open_for_business_button": "🈺", - "japanese_passing_grade_button": "🈴", - "japanese_post_office": "🏣", - "japanese_prohibited_button": "🈲", - "japanese_reserved_button": "🈯", - "japanese_secret_button": "㊙", - "japanese_service_charge_button": "🈂", - "japanese_symbol_for_beginner": "🔰", - "japanese_vacancy_button": "🈳", - "jersey": "🇯🇪", - "jordan": "🇯🇴", - "kazakhstan": "🇰🇿", - "kenya": "🇰🇪", - "kiribati": "🇰🇮", - "kosovo": "🇽🇰", - "kuwait": "🇰🇼", - "kyrgyzstan": "🇰🇬", - "laos": "🇱🇦", - "latvia": "🇱🇻", - "lebanon": "🇱🇧", - "leo": "♌", - "lesotho": "🇱🇸", - "liberia": "🇱🇷", - "libra": "♎", - "libya": "🇱🇾", - "liechtenstein": "🇱🇮", - "lithuania": "🇱🇹", - "luxembourg": "🇱🇺", - "macau_sar_china": "🇲🇴", - "macedonia": "🇲🇰", - "madagascar": "🇲🇬", - "malawi": "🇲🇼", - "malaysia": "🇲🇾", - "maldives": "🇲🇻", - "mali": "🇲🇱", - "malta": "🇲🇹", - "marshall_islands": "🇲🇭", - "martinique": "🇲🇶", - "mauritania": "🇲🇷", - "mauritius": "🇲🇺", - "mayotte": "🇾🇹", - "mexico": "🇲🇽", - "micronesia": "🇫🇲", - "moldova": "🇲🇩", - "monaco": "🇲🇨", - "mongolia": "🇲🇳", - "montenegro": "🇲🇪", - "montserrat": "🇲🇸", - "morocco": "🇲🇦", - "mozambique": "🇲🇿", - "mrs._claus": "🤶", - "mrs._claus_dark_skin_tone": "🤶🏿", - "mrs._claus_light_skin_tone": "🤶🏻", - "mrs._claus_medium-dark_skin_tone": "🤶🏾", - "mrs._claus_medium-light_skin_tone": "🤶🏼", - "mrs._claus_medium_skin_tone": "🤶🏽", - "myanmar_(burma)": "🇲🇲", - "new_button": "🆕", - "ng_button": "🆖", - "namibia": "🇳🇦", - "nauru": "🇳🇷", - "nepal": "🇳🇵", - "netherlands": "🇳🇱", - "new_caledonia": "🇳🇨", - "new_zealand": "🇳🇿", - "nicaragua": "🇳🇮", - "niger": "🇳🇪", - "nigeria": "🇳🇬", - "niue": "🇳🇺", - "norfolk_island": "🇳🇫", - "north_korea": "🇰🇵", - "northern_mariana_islands": "🇲🇵", - "norway": "🇳🇴", - "ok_button": "🆗", - "ok_hand": "👌", - "ok_hand_dark_skin_tone": "👌🏿", - "ok_hand_light_skin_tone": "👌🏻", - "ok_hand_medium-dark_skin_tone": "👌🏾", - "ok_hand_medium-light_skin_tone": "👌🏼", - "ok_hand_medium_skin_tone": "👌🏽", - "on!_arrow": "🔛", - "o_button_(blood_type)": "🅾", - "oman": "🇴🇲", - "ophiuchus": "⛎", - "p_button": "🅿", - "pakistan": "🇵🇰", - "palau": "🇵🇼", - "palestinian_territories": "🇵🇸", - "panama": "🇵🇦", - "papua_new_guinea": "🇵🇬", - "paraguay": "🇵🇾", - "peru": "🇵🇪", - "philippines": "🇵🇭", - "pisces": "♓", - "pitcairn_islands": "🇵🇳", - "poland": "🇵🇱", - "portugal": "🇵🇹", - "puerto_rico": "🇵🇷", - "qatar": "🇶🇦", - "romania": "🇷🇴", - "russia": "🇷🇺", - "rwanda": "🇷🇼", - "réunion": "🇷🇪", - "soon_arrow": "🔜", - "sos_button": "🆘", - "sagittarius": "♐", - "samoa": "🇼🇸", - "san_marino": "🇸🇲", - "santa_claus": "🎅", - "santa_claus_dark_skin_tone": "🎅🏿", - "santa_claus_light_skin_tone": "🎅🏻", - "santa_claus_medium-dark_skin_tone": "🎅🏾", - "santa_claus_medium-light_skin_tone": "🎅🏼", - "santa_claus_medium_skin_tone": "🎅🏽", - "saudi_arabia": "🇸🇦", - "scorpio": "♏", - "scotland": "🏴\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f", - "senegal": "🇸🇳", - "serbia": "🇷🇸", - "seychelles": "🇸🇨", - "sierra_leone": "🇸🇱", - "singapore": "🇸🇬", - "sint_maarten": "🇸🇽", - "slovakia": "🇸🇰", - "slovenia": "🇸🇮", - "solomon_islands": "🇸🇧", - "somalia": "🇸🇴", - "south_africa": "🇿🇦", - "south_georgia_&_south_sandwich_islands": "🇬🇸", - "south_korea": "🇰🇷", - "south_sudan": "🇸🇸", - "spain": "🇪🇸", - "sri_lanka": "🇱🇰", - "st._barthélemy": "🇧🇱", - "st._helena": "🇸🇭", - "st._kitts_&_nevis": "🇰🇳", - "st._lucia": "🇱🇨", - "st._martin": "🇲🇫", - "st._pierre_&_miquelon": "🇵🇲", - "st._vincent_&_grenadines": "🇻🇨", - "statue_of_liberty": "🗽", - "sudan": "🇸🇩", - "suriname": "🇸🇷", - "svalbard_&_jan_mayen": "🇸🇯", - "swaziland": "🇸🇿", - "sweden": "🇸🇪", - "switzerland": "🇨🇭", - "syria": "🇸🇾", - "são_tomé_&_príncipe": "🇸🇹", - "t-rex": "🦖", - "top_arrow": "🔝", - "taiwan": "🇹🇼", - "tajikistan": "🇹🇯", - "tanzania": "🇹🇿", - "taurus": "♉", - "thailand": "🇹🇭", - "timor-leste": "🇹🇱", - "togo": "🇹🇬", - "tokelau": "🇹🇰", - "tokyo_tower": "🗼", - "tonga": "🇹🇴", - "trinidad_&_tobago": "🇹🇹", - "tristan_da_cunha": "🇹🇦", - "tunisia": "🇹🇳", - "turkey": "🦃", - "turkmenistan": "🇹🇲", - "turks_&_caicos_islands": "🇹🇨", - "tuvalu": "🇹🇻", - "u.s._outlying_islands": "🇺🇲", - "u.s._virgin_islands": "🇻🇮", - "up!_button": "🆙", - "uganda": "🇺🇬", - "ukraine": "🇺🇦", - "united_arab_emirates": "🇦🇪", - "united_kingdom": "🇬🇧", - "united_nations": "🇺🇳", - "united_states": "🇺🇸", - "uruguay": "🇺🇾", - "uzbekistan": "🇺🇿", - "vs_button": "🆚", - "vanuatu": "🇻🇺", - "vatican_city": "🇻🇦", - "venezuela": "🇻🇪", - "vietnam": "🇻🇳", - "virgo": "♍", - "wales": "🏴\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f", - "wallis_&_futuna": "🇼🇫", - "western_sahara": "🇪🇭", - "yemen": "🇾🇪", - "zambia": "🇿🇲", - "zimbabwe": "🇿🇼", - "abacus": "🧮", - "adhesive_bandage": "🩹", - "admission_tickets": "🎟", - "adult": "🧑", - "adult_dark_skin_tone": "🧑🏿", - "adult_light_skin_tone": "🧑🏻", - "adult_medium-dark_skin_tone": "🧑🏾", - "adult_medium-light_skin_tone": "🧑🏼", - "adult_medium_skin_tone": "🧑🏽", - "aerial_tramway": "🚡", - "airplane": "✈", - "airplane_arrival": "🛬", - "airplane_departure": "🛫", - "alarm_clock": "⏰", - "alembic": "⚗", - "alien": "👽", - "alien_monster": "👾", - "ambulance": "🚑", - "american_football": "🏈", - "amphora": "🏺", - "anchor": "⚓", - "anger_symbol": "💢", - "angry_face": "😠", - "angry_face_with_horns": "👿", - "anguished_face": "😧", - "ant": "🐜", - "antenna_bars": "📶", - "anxious_face_with_sweat": "😰", - "articulated_lorry": "🚛", - "artist_palette": "🎨", - "astonished_face": "😲", - "atom_symbol": "⚛", - "auto_rickshaw": "🛺", - "automobile": "🚗", - "avocado": "🥑", - "axe": "🪓", - "baby": "👶", - "baby_angel": "👼", - "baby_angel_dark_skin_tone": "👼🏿", - "baby_angel_light_skin_tone": "👼🏻", - "baby_angel_medium-dark_skin_tone": "👼🏾", - "baby_angel_medium-light_skin_tone": "👼🏼", - "baby_angel_medium_skin_tone": "👼🏽", - "baby_bottle": "🍼", - "baby_chick": "🐤", - "baby_dark_skin_tone": "👶🏿", - "baby_light_skin_tone": "👶🏻", - "baby_medium-dark_skin_tone": "👶🏾", - "baby_medium-light_skin_tone": "👶🏼", - "baby_medium_skin_tone": "👶🏽", - "baby_symbol": "🚼", - "backhand_index_pointing_down": "👇", - "backhand_index_pointing_down_dark_skin_tone": "👇🏿", - "backhand_index_pointing_down_light_skin_tone": "👇🏻", - "backhand_index_pointing_down_medium-dark_skin_tone": "👇🏾", - "backhand_index_pointing_down_medium-light_skin_tone": "👇🏼", - "backhand_index_pointing_down_medium_skin_tone": "👇🏽", - "backhand_index_pointing_left": "👈", - "backhand_index_pointing_left_dark_skin_tone": "👈🏿", - "backhand_index_pointing_left_light_skin_tone": "👈🏻", - "backhand_index_pointing_left_medium-dark_skin_tone": "👈🏾", - "backhand_index_pointing_left_medium-light_skin_tone": "👈🏼", - "backhand_index_pointing_left_medium_skin_tone": "👈🏽", - "backhand_index_pointing_right": "👉", - "backhand_index_pointing_right_dark_skin_tone": "👉🏿", - "backhand_index_pointing_right_light_skin_tone": "👉🏻", - "backhand_index_pointing_right_medium-dark_skin_tone": "👉🏾", - "backhand_index_pointing_right_medium-light_skin_tone": "👉🏼", - "backhand_index_pointing_right_medium_skin_tone": "👉🏽", - "backhand_index_pointing_up": "👆", - "backhand_index_pointing_up_dark_skin_tone": "👆🏿", - "backhand_index_pointing_up_light_skin_tone": "👆🏻", - "backhand_index_pointing_up_medium-dark_skin_tone": "👆🏾", - "backhand_index_pointing_up_medium-light_skin_tone": "👆🏼", - "backhand_index_pointing_up_medium_skin_tone": "👆🏽", - "bacon": "🥓", - "badger": "🦡", - "badminton": "🏸", - "bagel": "🥯", - "baggage_claim": "🛄", - "baguette_bread": "🥖", - "balance_scale": "⚖", - "bald": "🦲", - "bald_man": "👨\u200d🦲", - "bald_woman": "👩\u200d🦲", - "ballet_shoes": "🩰", - "balloon": "🎈", - "ballot_box_with_ballot": "🗳", - "ballot_box_with_check": "☑", - "banana": "🍌", - "banjo": "🪕", - "bank": "🏦", - "bar_chart": "📊", - "barber_pole": "💈", - "baseball": "⚾", - "basket": "🧺", - "basketball": "🏀", - "bat": "🦇", - "bathtub": "🛁", - "battery": "🔋", - "beach_with_umbrella": "🏖", - "beaming_face_with_smiling_eyes": "😁", - "bear_face": "🐻", - "bearded_person": "🧔", - "bearded_person_dark_skin_tone": "🧔🏿", - "bearded_person_light_skin_tone": "🧔🏻", - "bearded_person_medium-dark_skin_tone": "🧔🏾", - "bearded_person_medium-light_skin_tone": "🧔🏼", - "bearded_person_medium_skin_tone": "🧔🏽", - "beating_heart": "💓", - "bed": "🛏", - "beer_mug": "🍺", - "bell": "🔔", - "bell_with_slash": "🔕", - "bellhop_bell": "🛎", - "bento_box": "🍱", - "beverage_box": "🧃", - "bicycle": "🚲", - "bikini": "👙", - "billed_cap": "🧢", - "biohazard": "☣", - "bird": "🐦", - "birthday_cake": "🎂", - "black_circle": "⚫", - "black_flag": "🏴", - "black_heart": "🖤", - "black_large_square": "⬛", - "black_medium-small_square": "◾", - "black_medium_square": "◼", - "black_nib": "✒", - "black_small_square": "▪", - "black_square_button": "🔲", - "blond-haired_man": "👱\u200d♂️", - "blond-haired_man_dark_skin_tone": "👱🏿\u200d♂️", - "blond-haired_man_light_skin_tone": "👱🏻\u200d♂️", - "blond-haired_man_medium-dark_skin_tone": "👱🏾\u200d♂️", - "blond-haired_man_medium-light_skin_tone": "👱🏼\u200d♂️", - "blond-haired_man_medium_skin_tone": "👱🏽\u200d♂️", - "blond-haired_person": "👱", - "blond-haired_person_dark_skin_tone": "👱🏿", - "blond-haired_person_light_skin_tone": "👱🏻", - "blond-haired_person_medium-dark_skin_tone": "👱🏾", - "blond-haired_person_medium-light_skin_tone": "👱🏼", - "blond-haired_person_medium_skin_tone": "👱🏽", - "blond-haired_woman": "👱\u200d♀️", - "blond-haired_woman_dark_skin_tone": "👱🏿\u200d♀️", - "blond-haired_woman_light_skin_tone": "👱🏻\u200d♀️", - "blond-haired_woman_medium-dark_skin_tone": "👱🏾\u200d♀️", - "blond-haired_woman_medium-light_skin_tone": "👱🏼\u200d♀️", - "blond-haired_woman_medium_skin_tone": "👱🏽\u200d♀️", - "blossom": "🌼", - "blowfish": "🐡", - "blue_book": "📘", - "blue_circle": "🔵", - "blue_heart": "💙", - "blue_square": "🟦", - "boar": "🐗", - "bomb": "💣", - "bone": "🦴", - "bookmark": "🔖", - "bookmark_tabs": "📑", - "books": "📚", - "bottle_with_popping_cork": "🍾", - "bouquet": "💐", - "bow_and_arrow": "🏹", - "bowl_with_spoon": "🥣", - "bowling": "🎳", - "boxing_glove": "🥊", - "boy": "👦", - "boy_dark_skin_tone": "👦🏿", - "boy_light_skin_tone": "👦🏻", - "boy_medium-dark_skin_tone": "👦🏾", - "boy_medium-light_skin_tone": "👦🏼", - "boy_medium_skin_tone": "👦🏽", - "brain": "🧠", - "bread": "🍞", - "breast-feeding": "🤱", - "breast-feeding_dark_skin_tone": "🤱🏿", - "breast-feeding_light_skin_tone": "🤱🏻", - "breast-feeding_medium-dark_skin_tone": "🤱🏾", - "breast-feeding_medium-light_skin_tone": "🤱🏼", - "breast-feeding_medium_skin_tone": "🤱🏽", - "brick": "🧱", - "bride_with_veil": "👰", - "bride_with_veil_dark_skin_tone": "👰🏿", - "bride_with_veil_light_skin_tone": "👰🏻", - "bride_with_veil_medium-dark_skin_tone": "👰🏾", - "bride_with_veil_medium-light_skin_tone": "👰🏼", - "bride_with_veil_medium_skin_tone": "👰🏽", - "bridge_at_night": "🌉", - "briefcase": "💼", - "briefs": "🩲", - "bright_button": "🔆", - "broccoli": "🥦", - "broken_heart": "💔", - "broom": "🧹", - "brown_circle": "🟤", - "brown_heart": "🤎", - "brown_square": "🟫", - "bug": "🐛", - "building_construction": "🏗", - "bullet_train": "🚅", - "burrito": "🌯", - "bus": "🚌", - "bus_stop": "🚏", - "bust_in_silhouette": "👤", - "busts_in_silhouette": "👥", - "butter": "🧈", - "butterfly": "🦋", - "cactus": "🌵", - "calendar": "📆", - "call_me_hand": "🤙", - "call_me_hand_dark_skin_tone": "🤙🏿", - "call_me_hand_light_skin_tone": "🤙🏻", - "call_me_hand_medium-dark_skin_tone": "🤙🏾", - "call_me_hand_medium-light_skin_tone": "🤙🏼", - "call_me_hand_medium_skin_tone": "🤙🏽", - "camel": "🐫", - "camera": "📷", - "camera_with_flash": "📸", - "camping": "🏕", - "candle": "🕯", - "candy": "🍬", - "canned_food": "🥫", - "canoe": "🛶", - "card_file_box": "🗃", - "card_index": "📇", - "card_index_dividers": "🗂", - "carousel_horse": "🎠", - "carp_streamer": "🎏", - "carrot": "🥕", - "castle": "🏰", - "cat": "🐱", - "cat_face": "🐱", - "cat_face_with_tears_of_joy": "😹", - "cat_face_with_wry_smile": "😼", - "chains": "⛓", - "chair": "🪑", - "chart_decreasing": "📉", - "chart_increasing": "📈", - "chart_increasing_with_yen": "💹", - "cheese_wedge": "🧀", - "chequered_flag": "🏁", - "cherries": "🍒", - "cherry_blossom": "🌸", - "chess_pawn": "♟", - "chestnut": "🌰", - "chicken": "🐔", - "child": "🧒", - "child_dark_skin_tone": "🧒🏿", - "child_light_skin_tone": "🧒🏻", - "child_medium-dark_skin_tone": "🧒🏾", - "child_medium-light_skin_tone": "🧒🏼", - "child_medium_skin_tone": "🧒🏽", - "children_crossing": "🚸", - "chipmunk": "🐿", - "chocolate_bar": "🍫", - "chopsticks": "🥢", - "church": "⛪", - "cigarette": "🚬", - "cinema": "🎦", - "circled_m": "Ⓜ", - "circus_tent": "🎪", - "cityscape": "🏙", - "cityscape_at_dusk": "🌆", - "clamp": "🗜", - "clapper_board": "🎬", - "clapping_hands": "👏", - "clapping_hands_dark_skin_tone": "👏🏿", - "clapping_hands_light_skin_tone": "👏🏻", - "clapping_hands_medium-dark_skin_tone": "👏🏾", - "clapping_hands_medium-light_skin_tone": "👏🏼", - "clapping_hands_medium_skin_tone": "👏🏽", - "classical_building": "🏛", - "clinking_beer_mugs": "🍻", - "clinking_glasses": "🥂", - "clipboard": "📋", - "clockwise_vertical_arrows": "🔃", - "closed_book": "📕", - "closed_mailbox_with_lowered_flag": "📪", - "closed_mailbox_with_raised_flag": "📫", - "closed_umbrella": "🌂", - "cloud": "☁", - "cloud_with_lightning": "🌩", - "cloud_with_lightning_and_rain": "⛈", - "cloud_with_rain": "🌧", - "cloud_with_snow": "🌨", - "clown_face": "🤡", - "club_suit": "♣", - "clutch_bag": "👝", - "coat": "🧥", - "cocktail_glass": "🍸", - "coconut": "🥥", - "coffin": "⚰", - "cold_face": "🥶", - "collision": "💥", - "comet": "☄", - "compass": "🧭", - "computer_disk": "💽", - "computer_mouse": "🖱", - "confetti_ball": "🎊", - "confounded_face": "😖", - "confused_face": "😕", - "construction": "🚧", - "construction_worker": "👷", - "construction_worker_dark_skin_tone": "👷🏿", - "construction_worker_light_skin_tone": "👷🏻", - "construction_worker_medium-dark_skin_tone": "👷🏾", - "construction_worker_medium-light_skin_tone": "👷🏼", - "construction_worker_medium_skin_tone": "👷🏽", - "control_knobs": "🎛", - "convenience_store": "🏪", - "cooked_rice": "🍚", - "cookie": "🍪", - "cooking": "🍳", - "copyright": "©", - "couch_and_lamp": "🛋", - "counterclockwise_arrows_button": "🔄", - "couple_with_heart": "💑", - "couple_with_heart_man_man": "👨\u200d❤️\u200d👨", - "couple_with_heart_woman_man": "👩\u200d❤️\u200d👨", - "couple_with_heart_woman_woman": "👩\u200d❤️\u200d👩", - "cow": "🐮", - "cow_face": "🐮", - "cowboy_hat_face": "🤠", - "crab": "🦀", - "crayon": "🖍", - "credit_card": "💳", - "crescent_moon": "🌙", - "cricket": "🦗", - "cricket_game": "🏏", - "crocodile": "🐊", - "croissant": "🥐", - "cross_mark": "❌", - "cross_mark_button": "❎", - "crossed_fingers": "🤞", - "crossed_fingers_dark_skin_tone": "🤞🏿", - "crossed_fingers_light_skin_tone": "🤞🏻", - "crossed_fingers_medium-dark_skin_tone": "🤞🏾", - "crossed_fingers_medium-light_skin_tone": "🤞🏼", - "crossed_fingers_medium_skin_tone": "🤞🏽", - "crossed_flags": "🎌", - "crossed_swords": "⚔", - "crown": "👑", - "crying_cat_face": "😿", - "crying_face": "😢", - "crystal_ball": "🔮", - "cucumber": "🥒", - "cupcake": "🧁", - "cup_with_straw": "🥤", - "curling_stone": "🥌", - "curly_hair": "🦱", - "curly-haired_man": "👨\u200d🦱", - "curly-haired_woman": "👩\u200d🦱", - "curly_loop": "➰", - "currency_exchange": "💱", - "curry_rice": "🍛", - "custard": "🍮", - "customs": "🛃", - "cut_of_meat": "🥩", - "cyclone": "🌀", - "dagger": "🗡", - "dango": "🍡", - "dashing_away": "💨", - "deaf_person": "🧏", - "deciduous_tree": "🌳", - "deer": "🦌", - "delivery_truck": "🚚", - "department_store": "🏬", - "derelict_house": "🏚", - "desert": "🏜", - "desert_island": "🏝", - "desktop_computer": "🖥", - "detective": "🕵", - "detective_dark_skin_tone": "🕵🏿", - "detective_light_skin_tone": "🕵🏻", - "detective_medium-dark_skin_tone": "🕵🏾", - "detective_medium-light_skin_tone": "🕵🏼", - "detective_medium_skin_tone": "🕵🏽", - "diamond_suit": "♦", - "diamond_with_a_dot": "💠", - "dim_button": "🔅", - "direct_hit": "🎯", - "disappointed_face": "😞", - "diving_mask": "🤿", - "diya_lamp": "🪔", - "dizzy": "💫", - "dizzy_face": "😵", - "dna": "🧬", - "dog": "🐶", - "dog_face": "🐶", - "dollar_banknote": "💵", - "dolphin": "🐬", - "door": "🚪", - "dotted_six-pointed_star": "🔯", - "double_curly_loop": "➿", - "double_exclamation_mark": "‼", - "doughnut": "🍩", - "dove": "🕊", - "down-left_arrow": "↙", - "down-right_arrow": "↘", - "down_arrow": "⬇", - "downcast_face_with_sweat": "😓", - "downwards_button": "🔽", - "dragon": "🐉", - "dragon_face": "🐲", - "dress": "👗", - "drooling_face": "🤤", - "drop_of_blood": "🩸", - "droplet": "💧", - "drum": "🥁", - "duck": "🦆", - "dumpling": "🥟", - "dvd": "📀", - "e-mail": "📧", - "eagle": "🦅", - "ear": "👂", - "ear_dark_skin_tone": "👂🏿", - "ear_light_skin_tone": "👂🏻", - "ear_medium-dark_skin_tone": "👂🏾", - "ear_medium-light_skin_tone": "👂🏼", - "ear_medium_skin_tone": "👂🏽", - "ear_of_corn": "🌽", - "ear_with_hearing_aid": "🦻", - "egg": "🍳", - "eggplant": "🍆", - "eight-pointed_star": "✴", - "eight-spoked_asterisk": "✳", - "eight-thirty": "🕣", - "eight_o’clock": "🕗", - "eject_button": "⏏", - "electric_plug": "🔌", - "elephant": "🐘", - "eleven-thirty": "🕦", - "eleven_o’clock": "🕚", - "elf": "🧝", - "elf_dark_skin_tone": "🧝🏿", - "elf_light_skin_tone": "🧝🏻", - "elf_medium-dark_skin_tone": "🧝🏾", - "elf_medium-light_skin_tone": "🧝🏼", - "elf_medium_skin_tone": "🧝🏽", - "envelope": "✉", - "envelope_with_arrow": "📩", - "euro_banknote": "💶", - "evergreen_tree": "🌲", - "ewe": "🐑", - "exclamation_mark": "❗", - "exclamation_question_mark": "⁉", - "exploding_head": "🤯", - "expressionless_face": "😑", - "eye": "👁", - "eye_in_speech_bubble": "👁️\u200d🗨️", - "eyes": "👀", - "face_blowing_a_kiss": "😘", - "face_savoring_food": "😋", - "face_screaming_in_fear": "😱", - "face_vomiting": "🤮", - "face_with_hand_over_mouth": "🤭", - "face_with_head-bandage": "🤕", - "face_with_medical_mask": "😷", - "face_with_monocle": "🧐", - "face_with_open_mouth": "😮", - "face_with_raised_eyebrow": "🤨", - "face_with_rolling_eyes": "🙄", - "face_with_steam_from_nose": "😤", - "face_with_symbols_on_mouth": "🤬", - "face_with_tears_of_joy": "😂", - "face_with_thermometer": "🤒", - "face_with_tongue": "😛", - "face_without_mouth": "😶", - "factory": "🏭", - "fairy": "🧚", - "fairy_dark_skin_tone": "🧚🏿", - "fairy_light_skin_tone": "🧚🏻", - "fairy_medium-dark_skin_tone": "🧚🏾", - "fairy_medium-light_skin_tone": "🧚🏼", - "fairy_medium_skin_tone": "🧚🏽", - "falafel": "🧆", - "fallen_leaf": "🍂", - "family": "👪", - "family_man_boy": "👨\u200d👦", - "family_man_boy_boy": "👨\u200d👦\u200d👦", - "family_man_girl": "👨\u200d👧", - "family_man_girl_boy": "👨\u200d👧\u200d👦", - "family_man_girl_girl": "👨\u200d👧\u200d👧", - "family_man_man_boy": "👨\u200d👨\u200d👦", - "family_man_man_boy_boy": "👨\u200d👨\u200d👦\u200d👦", - "family_man_man_girl": "👨\u200d👨\u200d👧", - "family_man_man_girl_boy": "👨\u200d👨\u200d👧\u200d👦", - "family_man_man_girl_girl": "👨\u200d👨\u200d👧\u200d👧", - "family_man_woman_boy": "👨\u200d👩\u200d👦", - "family_man_woman_boy_boy": "👨\u200d👩\u200d👦\u200d👦", - "family_man_woman_girl": "👨\u200d👩\u200d👧", - "family_man_woman_girl_boy": "👨\u200d👩\u200d👧\u200d👦", - "family_man_woman_girl_girl": "👨\u200d👩\u200d👧\u200d👧", - "family_woman_boy": "👩\u200d👦", - "family_woman_boy_boy": "👩\u200d👦\u200d👦", - "family_woman_girl": "👩\u200d👧", - "family_woman_girl_boy": "👩\u200d👧\u200d👦", - "family_woman_girl_girl": "👩\u200d👧\u200d👧", - "family_woman_woman_boy": "👩\u200d👩\u200d👦", - "family_woman_woman_boy_boy": "👩\u200d👩\u200d👦\u200d👦", - "family_woman_woman_girl": "👩\u200d👩\u200d👧", - "family_woman_woman_girl_boy": "👩\u200d👩\u200d👧\u200d👦", - "family_woman_woman_girl_girl": "👩\u200d👩\u200d👧\u200d👧", - "fast-forward_button": "⏩", - "fast_down_button": "⏬", - "fast_reverse_button": "⏪", - "fast_up_button": "⏫", - "fax_machine": "📠", - "fearful_face": "😨", - "female_sign": "♀", - "ferris_wheel": "🎡", - "ferry": "⛴", - "field_hockey": "🏑", - "file_cabinet": "🗄", - "file_folder": "📁", - "film_frames": "🎞", - "film_projector": "📽", - "fire": "🔥", - "fire_extinguisher": "🧯", - "firecracker": "🧨", - "fire_engine": "🚒", - "fireworks": "🎆", - "first_quarter_moon": "🌓", - "first_quarter_moon_face": "🌛", - "fish": "🐟", - "fish_cake_with_swirl": "🍥", - "fishing_pole": "🎣", - "five-thirty": "🕠", - "five_o’clock": "🕔", - "flag_in_hole": "⛳", - "flamingo": "🦩", - "flashlight": "🔦", - "flat_shoe": "🥿", - "fleur-de-lis": "⚜", - "flexed_biceps": "💪", - "flexed_biceps_dark_skin_tone": "💪🏿", - "flexed_biceps_light_skin_tone": "💪🏻", - "flexed_biceps_medium-dark_skin_tone": "💪🏾", - "flexed_biceps_medium-light_skin_tone": "💪🏼", - "flexed_biceps_medium_skin_tone": "💪🏽", - "floppy_disk": "💾", - "flower_playing_cards": "🎴", - "flushed_face": "😳", - "flying_disc": "🥏", - "flying_saucer": "🛸", - "fog": "🌫", - "foggy": "🌁", - "folded_hands": "🙏", - "folded_hands_dark_skin_tone": "🙏🏿", - "folded_hands_light_skin_tone": "🙏🏻", - "folded_hands_medium-dark_skin_tone": "🙏🏾", - "folded_hands_medium-light_skin_tone": "🙏🏼", - "folded_hands_medium_skin_tone": "🙏🏽", - "foot": "🦶", - "footprints": "👣", - "fork_and_knife": "🍴", - "fork_and_knife_with_plate": "🍽", - "fortune_cookie": "🥠", - "fountain": "⛲", - "fountain_pen": "🖋", - "four-thirty": "🕟", - "four_leaf_clover": "🍀", - "four_o’clock": "🕓", - "fox_face": "🦊", - "framed_picture": "🖼", - "french_fries": "🍟", - "fried_shrimp": "🍤", - "frog_face": "🐸", - "front-facing_baby_chick": "🐥", - "frowning_face": "☹", - "frowning_face_with_open_mouth": "😦", - "fuel_pump": "⛽", - "full_moon": "🌕", - "full_moon_face": "🌝", - "funeral_urn": "⚱", - "game_die": "🎲", - "garlic": "🧄", - "gear": "⚙", - "gem_stone": "💎", - "genie": "🧞", - "ghost": "👻", - "giraffe": "🦒", - "girl": "👧", - "girl_dark_skin_tone": "👧🏿", - "girl_light_skin_tone": "👧🏻", - "girl_medium-dark_skin_tone": "👧🏾", - "girl_medium-light_skin_tone": "👧🏼", - "girl_medium_skin_tone": "👧🏽", - "glass_of_milk": "🥛", - "glasses": "👓", - "globe_showing_americas": "🌎", - "globe_showing_asia-australia": "🌏", - "globe_showing_europe-africa": "🌍", - "globe_with_meridians": "🌐", - "gloves": "🧤", - "glowing_star": "🌟", - "goal_net": "🥅", - "goat": "🐐", - "goblin": "👺", - "goggles": "🥽", - "gorilla": "🦍", - "graduation_cap": "🎓", - "grapes": "🍇", - "green_apple": "🍏", - "green_book": "📗", - "green_circle": "🟢", - "green_heart": "💚", - "green_salad": "🥗", - "green_square": "🟩", - "grimacing_face": "😬", - "grinning_cat_face": "😺", - "grinning_cat_face_with_smiling_eyes": "😸", - "grinning_face": "😀", - "grinning_face_with_big_eyes": "😃", - "grinning_face_with_smiling_eyes": "😄", - "grinning_face_with_sweat": "😅", - "grinning_squinting_face": "😆", - "growing_heart": "💗", - "guard": "💂", - "guard_dark_skin_tone": "💂🏿", - "guard_light_skin_tone": "💂🏻", - "guard_medium-dark_skin_tone": "💂🏾", - "guard_medium-light_skin_tone": "💂🏼", - "guard_medium_skin_tone": "💂🏽", - "guide_dog": "🦮", - "guitar": "🎸", - "hamburger": "🍔", - "hammer": "🔨", - "hammer_and_pick": "⚒", - "hammer_and_wrench": "🛠", - "hamster_face": "🐹", - "hand_with_fingers_splayed": "🖐", - "hand_with_fingers_splayed_dark_skin_tone": "🖐🏿", - "hand_with_fingers_splayed_light_skin_tone": "🖐🏻", - "hand_with_fingers_splayed_medium-dark_skin_tone": "🖐🏾", - "hand_with_fingers_splayed_medium-light_skin_tone": "🖐🏼", - "hand_with_fingers_splayed_medium_skin_tone": "🖐🏽", - "handbag": "👜", - "handshake": "🤝", - "hatching_chick": "🐣", - "headphone": "🎧", - "hear-no-evil_monkey": "🙉", - "heart_decoration": "💟", - "heart_suit": "♥", - "heart_with_arrow": "💘", - "heart_with_ribbon": "💝", - "heavy_check_mark": "✔", - "heavy_division_sign": "➗", - "heavy_dollar_sign": "💲", - "heavy_heart_exclamation": "❣", - "heavy_large_circle": "⭕", - "heavy_minus_sign": "➖", - "heavy_multiplication_x": "✖", - "heavy_plus_sign": "➕", - "hedgehog": "🦔", - "helicopter": "🚁", - "herb": "🌿", - "hibiscus": "🌺", - "high-heeled_shoe": "👠", - "high-speed_train": "🚄", - "high_voltage": "⚡", - "hiking_boot": "🥾", - "hindu_temple": "🛕", - "hippopotamus": "🦛", - "hole": "🕳", - "honey_pot": "🍯", - "honeybee": "🐝", - "horizontal_traffic_light": "🚥", - "horse": "🐴", - "horse_face": "🐴", - "horse_racing": "🏇", - "horse_racing_dark_skin_tone": "🏇🏿", - "horse_racing_light_skin_tone": "🏇🏻", - "horse_racing_medium-dark_skin_tone": "🏇🏾", - "horse_racing_medium-light_skin_tone": "🏇🏼", - "horse_racing_medium_skin_tone": "🏇🏽", - "hospital": "🏥", - "hot_beverage": "☕", - "hot_dog": "🌭", - "hot_face": "🥵", - "hot_pepper": "🌶", - "hot_springs": "♨", - "hotel": "🏨", - "hourglass_done": "⌛", - "hourglass_not_done": "⏳", - "house": "🏠", - "house_with_garden": "🏡", - "houses": "🏘", - "hugging_face": "🤗", - "hundred_points": "💯", - "hushed_face": "😯", - "ice": "🧊", - "ice_cream": "🍨", - "ice_hockey": "🏒", - "ice_skate": "⛸", - "inbox_tray": "📥", - "incoming_envelope": "📨", - "index_pointing_up": "☝", - "index_pointing_up_dark_skin_tone": "☝🏿", - "index_pointing_up_light_skin_tone": "☝🏻", - "index_pointing_up_medium-dark_skin_tone": "☝🏾", - "index_pointing_up_medium-light_skin_tone": "☝🏼", - "index_pointing_up_medium_skin_tone": "☝🏽", - "infinity": "♾", - "information": "ℹ", - "input_latin_letters": "🔤", - "input_latin_lowercase": "🔡", - "input_latin_uppercase": "🔠", - "input_numbers": "🔢", - "input_symbols": "🔣", - "jack-o-lantern": "🎃", - "jeans": "👖", - "jigsaw": "🧩", - "joker": "🃏", - "joystick": "🕹", - "kaaba": "🕋", - "kangaroo": "🦘", - "key": "🔑", - "keyboard": "⌨", - "keycap_#": "#️⃣", - "keycap_*": "*️⃣", - "keycap_0": "0️⃣", - "keycap_1": "1️⃣", - "keycap_10": "🔟", - "keycap_2": "2️⃣", - "keycap_3": "3️⃣", - "keycap_4": "4️⃣", - "keycap_5": "5️⃣", - "keycap_6": "6️⃣", - "keycap_7": "7️⃣", - "keycap_8": "8️⃣", - "keycap_9": "9️⃣", - "kick_scooter": "🛴", - "kimono": "👘", - "kiss": "💋", - "kiss_man_man": "👨\u200d❤️\u200d💋\u200d👨", - "kiss_mark": "💋", - "kiss_woman_man": "👩\u200d❤️\u200d💋\u200d👨", - "kiss_woman_woman": "👩\u200d❤️\u200d💋\u200d👩", - "kissing_cat_face": "😽", - "kissing_face": "😗", - "kissing_face_with_closed_eyes": "😚", - "kissing_face_with_smiling_eyes": "😙", - "kitchen_knife": "🔪", - "kite": "🪁", - "kiwi_fruit": "🥝", - "koala": "🐨", - "lab_coat": "🥼", - "label": "🏷", - "lacrosse": "🥍", - "lady_beetle": "🐞", - "laptop_computer": "💻", - "large_blue_diamond": "🔷", - "large_orange_diamond": "🔶", - "last_quarter_moon": "🌗", - "last_quarter_moon_face": "🌜", - "last_track_button": "⏮", - "latin_cross": "✝", - "leaf_fluttering_in_wind": "🍃", - "leafy_green": "🥬", - "ledger": "📒", - "left-facing_fist": "🤛", - "left-facing_fist_dark_skin_tone": "🤛🏿", - "left-facing_fist_light_skin_tone": "🤛🏻", - "left-facing_fist_medium-dark_skin_tone": "🤛🏾", - "left-facing_fist_medium-light_skin_tone": "🤛🏼", - "left-facing_fist_medium_skin_tone": "🤛🏽", - "left-right_arrow": "↔", - "left_arrow": "⬅", - "left_arrow_curving_right": "↪", - "left_luggage": "🛅", - "left_speech_bubble": "🗨", - "leg": "🦵", - "lemon": "🍋", - "leopard": "🐆", - "level_slider": "🎚", - "light_bulb": "💡", - "light_rail": "🚈", - "link": "🔗", - "linked_paperclips": "🖇", - "lion_face": "🦁", - "lipstick": "💄", - "litter_in_bin_sign": "🚮", - "lizard": "🦎", - "llama": "🦙", - "lobster": "🦞", - "locked": "🔒", - "locked_with_key": "🔐", - "locked_with_pen": "🔏", - "locomotive": "🚂", - "lollipop": "🍭", - "lotion_bottle": "🧴", - "loudly_crying_face": "😭", - "loudspeaker": "📢", - "love-you_gesture": "🤟", - "love-you_gesture_dark_skin_tone": "🤟🏿", - "love-you_gesture_light_skin_tone": "🤟🏻", - "love-you_gesture_medium-dark_skin_tone": "🤟🏾", - "love-you_gesture_medium-light_skin_tone": "🤟🏼", - "love-you_gesture_medium_skin_tone": "🤟🏽", - "love_hotel": "🏩", - "love_letter": "💌", - "luggage": "🧳", - "lying_face": "🤥", - "mage": "🧙", - "mage_dark_skin_tone": "🧙🏿", - "mage_light_skin_tone": "🧙🏻", - "mage_medium-dark_skin_tone": "🧙🏾", - "mage_medium-light_skin_tone": "🧙🏼", - "mage_medium_skin_tone": "🧙🏽", - "magnet": "🧲", - "magnifying_glass_tilted_left": "🔍", - "magnifying_glass_tilted_right": "🔎", - "mahjong_red_dragon": "🀄", - "male_sign": "♂", - "man": "👨", - "man_and_woman_holding_hands": "👫", - "man_artist": "👨\u200d🎨", - "man_artist_dark_skin_tone": "👨🏿\u200d🎨", - "man_artist_light_skin_tone": "👨🏻\u200d🎨", - "man_artist_medium-dark_skin_tone": "👨🏾\u200d🎨", - "man_artist_medium-light_skin_tone": "👨🏼\u200d🎨", - "man_artist_medium_skin_tone": "👨🏽\u200d🎨", - "man_astronaut": "👨\u200d🚀", - "man_astronaut_dark_skin_tone": "👨🏿\u200d🚀", - "man_astronaut_light_skin_tone": "👨🏻\u200d🚀", - "man_astronaut_medium-dark_skin_tone": "👨🏾\u200d🚀", - "man_astronaut_medium-light_skin_tone": "👨🏼\u200d🚀", - "man_astronaut_medium_skin_tone": "👨🏽\u200d🚀", - "man_biking": "🚴\u200d♂️", - "man_biking_dark_skin_tone": "🚴🏿\u200d♂️", - "man_biking_light_skin_tone": "🚴🏻\u200d♂️", - "man_biking_medium-dark_skin_tone": "🚴🏾\u200d♂️", - "man_biking_medium-light_skin_tone": "🚴🏼\u200d♂️", - "man_biking_medium_skin_tone": "🚴🏽\u200d♂️", - "man_bouncing_ball": "⛹️\u200d♂️", - "man_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♂️", - "man_bouncing_ball_light_skin_tone": "⛹🏻\u200d♂️", - "man_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♂️", - "man_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♂️", - "man_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♂️", - "man_bowing": "🙇\u200d♂️", - "man_bowing_dark_skin_tone": "🙇🏿\u200d♂️", - "man_bowing_light_skin_tone": "🙇🏻\u200d♂️", - "man_bowing_medium-dark_skin_tone": "🙇🏾\u200d♂️", - "man_bowing_medium-light_skin_tone": "🙇🏼\u200d♂️", - "man_bowing_medium_skin_tone": "🙇🏽\u200d♂️", - "man_cartwheeling": "🤸\u200d♂️", - "man_cartwheeling_dark_skin_tone": "🤸🏿\u200d♂️", - "man_cartwheeling_light_skin_tone": "🤸🏻\u200d♂️", - "man_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♂️", - "man_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♂️", - "man_cartwheeling_medium_skin_tone": "🤸🏽\u200d♂️", - "man_climbing": "🧗\u200d♂️", - "man_climbing_dark_skin_tone": "🧗🏿\u200d♂️", - "man_climbing_light_skin_tone": "🧗🏻\u200d♂️", - "man_climbing_medium-dark_skin_tone": "🧗🏾\u200d♂️", - "man_climbing_medium-light_skin_tone": "🧗🏼\u200d♂️", - "man_climbing_medium_skin_tone": "🧗🏽\u200d♂️", - "man_construction_worker": "👷\u200d♂️", - "man_construction_worker_dark_skin_tone": "👷🏿\u200d♂️", - "man_construction_worker_light_skin_tone": "👷🏻\u200d♂️", - "man_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♂️", - "man_construction_worker_medium-light_skin_tone": "👷🏼\u200d♂️", - "man_construction_worker_medium_skin_tone": "👷🏽\u200d♂️", - "man_cook": "👨\u200d🍳", - "man_cook_dark_skin_tone": "👨🏿\u200d🍳", - "man_cook_light_skin_tone": "👨🏻\u200d🍳", - "man_cook_medium-dark_skin_tone": "👨🏾\u200d🍳", - "man_cook_medium-light_skin_tone": "👨🏼\u200d🍳", - "man_cook_medium_skin_tone": "👨🏽\u200d🍳", - "man_dancing": "🕺", - "man_dancing_dark_skin_tone": "🕺🏿", - "man_dancing_light_skin_tone": "🕺🏻", - "man_dancing_medium-dark_skin_tone": "🕺🏾", - "man_dancing_medium-light_skin_tone": "🕺🏼", - "man_dancing_medium_skin_tone": "🕺🏽", - "man_dark_skin_tone": "👨🏿", - "man_detective": "🕵️\u200d♂️", - "man_detective_dark_skin_tone": "🕵🏿\u200d♂️", - "man_detective_light_skin_tone": "🕵🏻\u200d♂️", - "man_detective_medium-dark_skin_tone": "🕵🏾\u200d♂️", - "man_detective_medium-light_skin_tone": "🕵🏼\u200d♂️", - "man_detective_medium_skin_tone": "🕵🏽\u200d♂️", - "man_elf": "🧝\u200d♂️", - "man_elf_dark_skin_tone": "🧝🏿\u200d♂️", - "man_elf_light_skin_tone": "🧝🏻\u200d♂️", - "man_elf_medium-dark_skin_tone": "🧝🏾\u200d♂️", - "man_elf_medium-light_skin_tone": "🧝🏼\u200d♂️", - "man_elf_medium_skin_tone": "🧝🏽\u200d♂️", - "man_facepalming": "🤦\u200d♂️", - "man_facepalming_dark_skin_tone": "🤦🏿\u200d♂️", - "man_facepalming_light_skin_tone": "🤦🏻\u200d♂️", - "man_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♂️", - "man_facepalming_medium-light_skin_tone": "🤦🏼\u200d♂️", - "man_facepalming_medium_skin_tone": "🤦🏽\u200d♂️", - "man_factory_worker": "👨\u200d🏭", - "man_factory_worker_dark_skin_tone": "👨🏿\u200d🏭", - "man_factory_worker_light_skin_tone": "👨🏻\u200d🏭", - "man_factory_worker_medium-dark_skin_tone": "👨🏾\u200d🏭", - "man_factory_worker_medium-light_skin_tone": "👨🏼\u200d🏭", - "man_factory_worker_medium_skin_tone": "👨🏽\u200d🏭", - "man_fairy": "🧚\u200d♂️", - "man_fairy_dark_skin_tone": "🧚🏿\u200d♂️", - "man_fairy_light_skin_tone": "🧚🏻\u200d♂️", - "man_fairy_medium-dark_skin_tone": "🧚🏾\u200d♂️", - "man_fairy_medium-light_skin_tone": "🧚🏼\u200d♂️", - "man_fairy_medium_skin_tone": "🧚🏽\u200d♂️", - "man_farmer": "👨\u200d🌾", - "man_farmer_dark_skin_tone": "👨🏿\u200d🌾", - "man_farmer_light_skin_tone": "👨🏻\u200d🌾", - "man_farmer_medium-dark_skin_tone": "👨🏾\u200d🌾", - "man_farmer_medium-light_skin_tone": "👨🏼\u200d🌾", - "man_farmer_medium_skin_tone": "👨🏽\u200d🌾", - "man_firefighter": "👨\u200d🚒", - "man_firefighter_dark_skin_tone": "👨🏿\u200d🚒", - "man_firefighter_light_skin_tone": "👨🏻\u200d🚒", - "man_firefighter_medium-dark_skin_tone": "👨🏾\u200d🚒", - "man_firefighter_medium-light_skin_tone": "👨🏼\u200d🚒", - "man_firefighter_medium_skin_tone": "👨🏽\u200d🚒", - "man_frowning": "🙍\u200d♂️", - "man_frowning_dark_skin_tone": "🙍🏿\u200d♂️", - "man_frowning_light_skin_tone": "🙍🏻\u200d♂️", - "man_frowning_medium-dark_skin_tone": "🙍🏾\u200d♂️", - "man_frowning_medium-light_skin_tone": "🙍🏼\u200d♂️", - "man_frowning_medium_skin_tone": "🙍🏽\u200d♂️", - "man_genie": "🧞\u200d♂️", - "man_gesturing_no": "🙅\u200d♂️", - "man_gesturing_no_dark_skin_tone": "🙅🏿\u200d♂️", - "man_gesturing_no_light_skin_tone": "🙅🏻\u200d♂️", - "man_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♂️", - "man_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♂️", - "man_gesturing_no_medium_skin_tone": "🙅🏽\u200d♂️", - "man_gesturing_ok": "🙆\u200d♂️", - "man_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♂️", - "man_gesturing_ok_light_skin_tone": "🙆🏻\u200d♂️", - "man_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♂️", - "man_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♂️", - "man_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♂️", - "man_getting_haircut": "💇\u200d♂️", - "man_getting_haircut_dark_skin_tone": "💇🏿\u200d♂️", - "man_getting_haircut_light_skin_tone": "💇🏻\u200d♂️", - "man_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♂️", - "man_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♂️", - "man_getting_haircut_medium_skin_tone": "💇🏽\u200d♂️", - "man_getting_massage": "💆\u200d♂️", - "man_getting_massage_dark_skin_tone": "💆🏿\u200d♂️", - "man_getting_massage_light_skin_tone": "💆🏻\u200d♂️", - "man_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♂️", - "man_getting_massage_medium-light_skin_tone": "💆🏼\u200d♂️", - "man_getting_massage_medium_skin_tone": "💆🏽\u200d♂️", - "man_golfing": "🏌️\u200d♂️", - "man_golfing_dark_skin_tone": "🏌🏿\u200d♂️", - "man_golfing_light_skin_tone": "🏌🏻\u200d♂️", - "man_golfing_medium-dark_skin_tone": "🏌🏾\u200d♂️", - "man_golfing_medium-light_skin_tone": "🏌🏼\u200d♂️", - "man_golfing_medium_skin_tone": "🏌🏽\u200d♂️", - "man_guard": "💂\u200d♂️", - "man_guard_dark_skin_tone": "💂🏿\u200d♂️", - "man_guard_light_skin_tone": "💂🏻\u200d♂️", - "man_guard_medium-dark_skin_tone": "💂🏾\u200d♂️", - "man_guard_medium-light_skin_tone": "💂🏼\u200d♂️", - "man_guard_medium_skin_tone": "💂🏽\u200d♂️", - "man_health_worker": "👨\u200d⚕️", - "man_health_worker_dark_skin_tone": "👨🏿\u200d⚕️", - "man_health_worker_light_skin_tone": "👨🏻\u200d⚕️", - "man_health_worker_medium-dark_skin_tone": "👨🏾\u200d⚕️", - "man_health_worker_medium-light_skin_tone": "👨🏼\u200d⚕️", - "man_health_worker_medium_skin_tone": "👨🏽\u200d⚕️", - "man_in_lotus_position": "🧘\u200d♂️", - "man_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♂️", - "man_in_lotus_position_light_skin_tone": "🧘🏻\u200d♂️", - "man_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♂️", - "man_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♂️", - "man_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♂️", - "man_in_manual_wheelchair": "👨\u200d🦽", - "man_in_motorized_wheelchair": "👨\u200d🦼", - "man_in_steamy_room": "🧖\u200d♂️", - "man_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♂️", - "man_in_steamy_room_light_skin_tone": "🧖🏻\u200d♂️", - "man_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♂️", - "man_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♂️", - "man_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♂️", - "man_in_suit_levitating": "🕴", - "man_in_suit_levitating_dark_skin_tone": "🕴🏿", - "man_in_suit_levitating_light_skin_tone": "🕴🏻", - "man_in_suit_levitating_medium-dark_skin_tone": "🕴🏾", - "man_in_suit_levitating_medium-light_skin_tone": "🕴🏼", - "man_in_suit_levitating_medium_skin_tone": "🕴🏽", - "man_in_tuxedo": "🤵", - "man_in_tuxedo_dark_skin_tone": "🤵🏿", - "man_in_tuxedo_light_skin_tone": "🤵🏻", - "man_in_tuxedo_medium-dark_skin_tone": "🤵🏾", - "man_in_tuxedo_medium-light_skin_tone": "🤵🏼", - "man_in_tuxedo_medium_skin_tone": "🤵🏽", - "man_judge": "👨\u200d⚖️", - "man_judge_dark_skin_tone": "👨🏿\u200d⚖️", - "man_judge_light_skin_tone": "👨🏻\u200d⚖️", - "man_judge_medium-dark_skin_tone": "👨🏾\u200d⚖️", - "man_judge_medium-light_skin_tone": "👨🏼\u200d⚖️", - "man_judge_medium_skin_tone": "👨🏽\u200d⚖️", - "man_juggling": "🤹\u200d♂️", - "man_juggling_dark_skin_tone": "🤹🏿\u200d♂️", - "man_juggling_light_skin_tone": "🤹🏻\u200d♂️", - "man_juggling_medium-dark_skin_tone": "🤹🏾\u200d♂️", - "man_juggling_medium-light_skin_tone": "🤹🏼\u200d♂️", - "man_juggling_medium_skin_tone": "🤹🏽\u200d♂️", - "man_lifting_weights": "🏋️\u200d♂️", - "man_lifting_weights_dark_skin_tone": "🏋🏿\u200d♂️", - "man_lifting_weights_light_skin_tone": "🏋🏻\u200d♂️", - "man_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♂️", - "man_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♂️", - "man_lifting_weights_medium_skin_tone": "🏋🏽\u200d♂️", - "man_light_skin_tone": "👨🏻", - "man_mage": "🧙\u200d♂️", - "man_mage_dark_skin_tone": "🧙🏿\u200d♂️", - "man_mage_light_skin_tone": "🧙🏻\u200d♂️", - "man_mage_medium-dark_skin_tone": "🧙🏾\u200d♂️", - "man_mage_medium-light_skin_tone": "🧙🏼\u200d♂️", - "man_mage_medium_skin_tone": "🧙🏽\u200d♂️", - "man_mechanic": "👨\u200d🔧", - "man_mechanic_dark_skin_tone": "👨🏿\u200d🔧", - "man_mechanic_light_skin_tone": "👨🏻\u200d🔧", - "man_mechanic_medium-dark_skin_tone": "👨🏾\u200d🔧", - "man_mechanic_medium-light_skin_tone": "👨🏼\u200d🔧", - "man_mechanic_medium_skin_tone": "👨🏽\u200d🔧", - "man_medium-dark_skin_tone": "👨🏾", - "man_medium-light_skin_tone": "👨🏼", - "man_medium_skin_tone": "👨🏽", - "man_mountain_biking": "🚵\u200d♂️", - "man_mountain_biking_dark_skin_tone": "🚵🏿\u200d♂️", - "man_mountain_biking_light_skin_tone": "🚵🏻\u200d♂️", - "man_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♂️", - "man_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♂️", - "man_mountain_biking_medium_skin_tone": "🚵🏽\u200d♂️", - "man_office_worker": "👨\u200d💼", - "man_office_worker_dark_skin_tone": "👨🏿\u200d💼", - "man_office_worker_light_skin_tone": "👨🏻\u200d💼", - "man_office_worker_medium-dark_skin_tone": "👨🏾\u200d💼", - "man_office_worker_medium-light_skin_tone": "👨🏼\u200d💼", - "man_office_worker_medium_skin_tone": "👨🏽\u200d💼", - "man_pilot": "👨\u200d✈️", - "man_pilot_dark_skin_tone": "👨🏿\u200d✈️", - "man_pilot_light_skin_tone": "👨🏻\u200d✈️", - "man_pilot_medium-dark_skin_tone": "👨🏾\u200d✈️", - "man_pilot_medium-light_skin_tone": "👨🏼\u200d✈️", - "man_pilot_medium_skin_tone": "👨🏽\u200d✈️", - "man_playing_handball": "🤾\u200d♂️", - "man_playing_handball_dark_skin_tone": "🤾🏿\u200d♂️", - "man_playing_handball_light_skin_tone": "🤾🏻\u200d♂️", - "man_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♂️", - "man_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♂️", - "man_playing_handball_medium_skin_tone": "🤾🏽\u200d♂️", - "man_playing_water_polo": "🤽\u200d♂️", - "man_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♂️", - "man_playing_water_polo_light_skin_tone": "🤽🏻\u200d♂️", - "man_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♂️", - "man_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♂️", - "man_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♂️", - "man_police_officer": "👮\u200d♂️", - "man_police_officer_dark_skin_tone": "👮🏿\u200d♂️", - "man_police_officer_light_skin_tone": "👮🏻\u200d♂️", - "man_police_officer_medium-dark_skin_tone": "👮🏾\u200d♂️", - "man_police_officer_medium-light_skin_tone": "👮🏼\u200d♂️", - "man_police_officer_medium_skin_tone": "👮🏽\u200d♂️", - "man_pouting": "🙎\u200d♂️", - "man_pouting_dark_skin_tone": "🙎🏿\u200d♂️", - "man_pouting_light_skin_tone": "🙎🏻\u200d♂️", - "man_pouting_medium-dark_skin_tone": "🙎🏾\u200d♂️", - "man_pouting_medium-light_skin_tone": "🙎🏼\u200d♂️", - "man_pouting_medium_skin_tone": "🙎🏽\u200d♂️", - "man_raising_hand": "🙋\u200d♂️", - "man_raising_hand_dark_skin_tone": "🙋🏿\u200d♂️", - "man_raising_hand_light_skin_tone": "🙋🏻\u200d♂️", - "man_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♂️", - "man_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♂️", - "man_raising_hand_medium_skin_tone": "🙋🏽\u200d♂️", - "man_rowing_boat": "🚣\u200d♂️", - "man_rowing_boat_dark_skin_tone": "🚣🏿\u200d♂️", - "man_rowing_boat_light_skin_tone": "🚣🏻\u200d♂️", - "man_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♂️", - "man_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♂️", - "man_rowing_boat_medium_skin_tone": "🚣🏽\u200d♂️", - "man_running": "🏃\u200d♂️", - "man_running_dark_skin_tone": "🏃🏿\u200d♂️", - "man_running_light_skin_tone": "🏃🏻\u200d♂️", - "man_running_medium-dark_skin_tone": "🏃🏾\u200d♂️", - "man_running_medium-light_skin_tone": "🏃🏼\u200d♂️", - "man_running_medium_skin_tone": "🏃🏽\u200d♂️", - "man_scientist": "👨\u200d🔬", - "man_scientist_dark_skin_tone": "👨🏿\u200d🔬", - "man_scientist_light_skin_tone": "👨🏻\u200d🔬", - "man_scientist_medium-dark_skin_tone": "👨🏾\u200d🔬", - "man_scientist_medium-light_skin_tone": "👨🏼\u200d🔬", - "man_scientist_medium_skin_tone": "👨🏽\u200d🔬", - "man_shrugging": "🤷\u200d♂️", - "man_shrugging_dark_skin_tone": "🤷🏿\u200d♂️", - "man_shrugging_light_skin_tone": "🤷🏻\u200d♂️", - "man_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♂️", - "man_shrugging_medium-light_skin_tone": "🤷🏼\u200d♂️", - "man_shrugging_medium_skin_tone": "🤷🏽\u200d♂️", - "man_singer": "👨\u200d🎤", - "man_singer_dark_skin_tone": "👨🏿\u200d🎤", - "man_singer_light_skin_tone": "👨🏻\u200d🎤", - "man_singer_medium-dark_skin_tone": "👨🏾\u200d🎤", - "man_singer_medium-light_skin_tone": "👨🏼\u200d🎤", - "man_singer_medium_skin_tone": "👨🏽\u200d🎤", - "man_student": "👨\u200d🎓", - "man_student_dark_skin_tone": "👨🏿\u200d🎓", - "man_student_light_skin_tone": "👨🏻\u200d🎓", - "man_student_medium-dark_skin_tone": "👨🏾\u200d🎓", - "man_student_medium-light_skin_tone": "👨🏼\u200d🎓", - "man_student_medium_skin_tone": "👨🏽\u200d🎓", - "man_surfing": "🏄\u200d♂️", - "man_surfing_dark_skin_tone": "🏄🏿\u200d♂️", - "man_surfing_light_skin_tone": "🏄🏻\u200d♂️", - "man_surfing_medium-dark_skin_tone": "🏄🏾\u200d♂️", - "man_surfing_medium-light_skin_tone": "🏄🏼\u200d♂️", - "man_surfing_medium_skin_tone": "🏄🏽\u200d♂️", - "man_swimming": "🏊\u200d♂️", - "man_swimming_dark_skin_tone": "🏊🏿\u200d♂️", - "man_swimming_light_skin_tone": "🏊🏻\u200d♂️", - "man_swimming_medium-dark_skin_tone": "🏊🏾\u200d♂️", - "man_swimming_medium-light_skin_tone": "🏊🏼\u200d♂️", - "man_swimming_medium_skin_tone": "🏊🏽\u200d♂️", - "man_teacher": "👨\u200d🏫", - "man_teacher_dark_skin_tone": "👨🏿\u200d🏫", - "man_teacher_light_skin_tone": "👨🏻\u200d🏫", - "man_teacher_medium-dark_skin_tone": "👨🏾\u200d🏫", - "man_teacher_medium-light_skin_tone": "👨🏼\u200d🏫", - "man_teacher_medium_skin_tone": "👨🏽\u200d🏫", - "man_technologist": "👨\u200d💻", - "man_technologist_dark_skin_tone": "👨🏿\u200d💻", - "man_technologist_light_skin_tone": "👨🏻\u200d💻", - "man_technologist_medium-dark_skin_tone": "👨🏾\u200d💻", - "man_technologist_medium-light_skin_tone": "👨🏼\u200d💻", - "man_technologist_medium_skin_tone": "👨🏽\u200d💻", - "man_tipping_hand": "💁\u200d♂️", - "man_tipping_hand_dark_skin_tone": "💁🏿\u200d♂️", - "man_tipping_hand_light_skin_tone": "💁🏻\u200d♂️", - "man_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♂️", - "man_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♂️", - "man_tipping_hand_medium_skin_tone": "💁🏽\u200d♂️", - "man_vampire": "🧛\u200d♂️", - "man_vampire_dark_skin_tone": "🧛🏿\u200d♂️", - "man_vampire_light_skin_tone": "🧛🏻\u200d♂️", - "man_vampire_medium-dark_skin_tone": "🧛🏾\u200d♂️", - "man_vampire_medium-light_skin_tone": "🧛🏼\u200d♂️", - "man_vampire_medium_skin_tone": "🧛🏽\u200d♂️", - "man_walking": "🚶\u200d♂️", - "man_walking_dark_skin_tone": "🚶🏿\u200d♂️", - "man_walking_light_skin_tone": "🚶🏻\u200d♂️", - "man_walking_medium-dark_skin_tone": "🚶🏾\u200d♂️", - "man_walking_medium-light_skin_tone": "🚶🏼\u200d♂️", - "man_walking_medium_skin_tone": "🚶🏽\u200d♂️", - "man_wearing_turban": "👳\u200d♂️", - "man_wearing_turban_dark_skin_tone": "👳🏿\u200d♂️", - "man_wearing_turban_light_skin_tone": "👳🏻\u200d♂️", - "man_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♂️", - "man_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♂️", - "man_wearing_turban_medium_skin_tone": "👳🏽\u200d♂️", - "man_with_probing_cane": "👨\u200d🦯", - "man_with_chinese_cap": "👲", - "man_with_chinese_cap_dark_skin_tone": "👲🏿", - "man_with_chinese_cap_light_skin_tone": "👲🏻", - "man_with_chinese_cap_medium-dark_skin_tone": "👲🏾", - "man_with_chinese_cap_medium-light_skin_tone": "👲🏼", - "man_with_chinese_cap_medium_skin_tone": "👲🏽", - "man_zombie": "🧟\u200d♂️", - "mango": "🥭", - "mantelpiece_clock": "🕰", - "manual_wheelchair": "🦽", - "man’s_shoe": "👞", - "map_of_japan": "🗾", - "maple_leaf": "🍁", - "martial_arts_uniform": "🥋", - "mate": "🧉", - "meat_on_bone": "🍖", - "mechanical_arm": "🦾", - "mechanical_leg": "🦿", - "medical_symbol": "⚕", - "megaphone": "📣", - "melon": "🍈", - "memo": "📝", - "men_with_bunny_ears": "👯\u200d♂️", - "men_wrestling": "🤼\u200d♂️", - "menorah": "🕎", - "men’s_room": "🚹", - "mermaid": "🧜\u200d♀️", - "mermaid_dark_skin_tone": "🧜🏿\u200d♀️", - "mermaid_light_skin_tone": "🧜🏻\u200d♀️", - "mermaid_medium-dark_skin_tone": "🧜🏾\u200d♀️", - "mermaid_medium-light_skin_tone": "🧜🏼\u200d♀️", - "mermaid_medium_skin_tone": "🧜🏽\u200d♀️", - "merman": "🧜\u200d♂️", - "merman_dark_skin_tone": "🧜🏿\u200d♂️", - "merman_light_skin_tone": "🧜🏻\u200d♂️", - "merman_medium-dark_skin_tone": "🧜🏾\u200d♂️", - "merman_medium-light_skin_tone": "🧜🏼\u200d♂️", - "merman_medium_skin_tone": "🧜🏽\u200d♂️", - "merperson": "🧜", - "merperson_dark_skin_tone": "🧜🏿", - "merperson_light_skin_tone": "🧜🏻", - "merperson_medium-dark_skin_tone": "🧜🏾", - "merperson_medium-light_skin_tone": "🧜🏼", - "merperson_medium_skin_tone": "🧜🏽", - "metro": "🚇", - "microbe": "🦠", - "microphone": "🎤", - "microscope": "🔬", - "middle_finger": "🖕", - "middle_finger_dark_skin_tone": "🖕🏿", - "middle_finger_light_skin_tone": "🖕🏻", - "middle_finger_medium-dark_skin_tone": "🖕🏾", - "middle_finger_medium-light_skin_tone": "🖕🏼", - "middle_finger_medium_skin_tone": "🖕🏽", - "military_medal": "🎖", - "milky_way": "🌌", - "minibus": "🚐", - "moai": "🗿", - "mobile_phone": "📱", - "mobile_phone_off": "📴", - "mobile_phone_with_arrow": "📲", - "money-mouth_face": "🤑", - "money_bag": "💰", - "money_with_wings": "💸", - "monkey": "🐒", - "monkey_face": "🐵", - "monorail": "🚝", - "moon_cake": "🥮", - "moon_viewing_ceremony": "🎑", - "mosque": "🕌", - "mosquito": "🦟", - "motor_boat": "🛥", - "motor_scooter": "🛵", - "motorcycle": "🏍", - "motorized_wheelchair": "🦼", - "motorway": "🛣", - "mount_fuji": "🗻", - "mountain": "⛰", - "mountain_cableway": "🚠", - "mountain_railway": "🚞", - "mouse": "🐭", - "mouse_face": "🐭", - "mouth": "👄", - "movie_camera": "🎥", - "mushroom": "🍄", - "musical_keyboard": "🎹", - "musical_note": "🎵", - "musical_notes": "🎶", - "musical_score": "🎼", - "muted_speaker": "🔇", - "nail_polish": "💅", - "nail_polish_dark_skin_tone": "💅🏿", - "nail_polish_light_skin_tone": "💅🏻", - "nail_polish_medium-dark_skin_tone": "💅🏾", - "nail_polish_medium-light_skin_tone": "💅🏼", - "nail_polish_medium_skin_tone": "💅🏽", - "name_badge": "📛", - "national_park": "🏞", - "nauseated_face": "🤢", - "nazar_amulet": "🧿", - "necktie": "👔", - "nerd_face": "🤓", - "neutral_face": "😐", - "new_moon": "🌑", - "new_moon_face": "🌚", - "newspaper": "📰", - "next_track_button": "⏭", - "night_with_stars": "🌃", - "nine-thirty": "🕤", - "nine_o’clock": "🕘", - "no_bicycles": "🚳", - "no_entry": "⛔", - "no_littering": "🚯", - "no_mobile_phones": "📵", - "no_one_under_eighteen": "🔞", - "no_pedestrians": "🚷", - "no_smoking": "🚭", - "non-potable_water": "🚱", - "nose": "👃", - "nose_dark_skin_tone": "👃🏿", - "nose_light_skin_tone": "👃🏻", - "nose_medium-dark_skin_tone": "👃🏾", - "nose_medium-light_skin_tone": "👃🏼", - "nose_medium_skin_tone": "👃🏽", - "notebook": "📓", - "notebook_with_decorative_cover": "📔", - "nut_and_bolt": "🔩", - "octopus": "🐙", - "oden": "🍢", - "office_building": "🏢", - "ogre": "👹", - "oil_drum": "🛢", - "old_key": "🗝", - "old_man": "👴", - "old_man_dark_skin_tone": "👴🏿", - "old_man_light_skin_tone": "👴🏻", - "old_man_medium-dark_skin_tone": "👴🏾", - "old_man_medium-light_skin_tone": "👴🏼", - "old_man_medium_skin_tone": "👴🏽", - "old_woman": "👵", - "old_woman_dark_skin_tone": "👵🏿", - "old_woman_light_skin_tone": "👵🏻", - "old_woman_medium-dark_skin_tone": "👵🏾", - "old_woman_medium-light_skin_tone": "👵🏼", - "old_woman_medium_skin_tone": "👵🏽", - "older_adult": "🧓", - "older_adult_dark_skin_tone": "🧓🏿", - "older_adult_light_skin_tone": "🧓🏻", - "older_adult_medium-dark_skin_tone": "🧓🏾", - "older_adult_medium-light_skin_tone": "🧓🏼", - "older_adult_medium_skin_tone": "🧓🏽", - "om": "🕉", - "oncoming_automobile": "🚘", - "oncoming_bus": "🚍", - "oncoming_fist": "👊", - "oncoming_fist_dark_skin_tone": "👊🏿", - "oncoming_fist_light_skin_tone": "👊🏻", - "oncoming_fist_medium-dark_skin_tone": "👊🏾", - "oncoming_fist_medium-light_skin_tone": "👊🏼", - "oncoming_fist_medium_skin_tone": "👊🏽", - "oncoming_police_car": "🚔", - "oncoming_taxi": "🚖", - "one-piece_swimsuit": "🩱", - "one-thirty": "🕜", - "one_o’clock": "🕐", - "onion": "🧅", - "open_book": "📖", - "open_file_folder": "📂", - "open_hands": "👐", - "open_hands_dark_skin_tone": "👐🏿", - "open_hands_light_skin_tone": "👐🏻", - "open_hands_medium-dark_skin_tone": "👐🏾", - "open_hands_medium-light_skin_tone": "👐🏼", - "open_hands_medium_skin_tone": "👐🏽", - "open_mailbox_with_lowered_flag": "📭", - "open_mailbox_with_raised_flag": "📬", - "optical_disk": "💿", - "orange_book": "📙", - "orange_circle": "🟠", - "orange_heart": "🧡", - "orange_square": "🟧", - "orangutan": "🦧", - "orthodox_cross": "☦", - "otter": "🦦", - "outbox_tray": "📤", - "owl": "🦉", - "ox": "🐂", - "oyster": "🦪", - "package": "📦", - "page_facing_up": "📄", - "page_with_curl": "📃", - "pager": "📟", - "paintbrush": "🖌", - "palm_tree": "🌴", - "palms_up_together": "🤲", - "palms_up_together_dark_skin_tone": "🤲🏿", - "palms_up_together_light_skin_tone": "🤲🏻", - "palms_up_together_medium-dark_skin_tone": "🤲🏾", - "palms_up_together_medium-light_skin_tone": "🤲🏼", - "palms_up_together_medium_skin_tone": "🤲🏽", - "pancakes": "🥞", - "panda_face": "🐼", - "paperclip": "📎", - "parrot": "🦜", - "part_alternation_mark": "〽", - "party_popper": "🎉", - "partying_face": "🥳", - "passenger_ship": "🛳", - "passport_control": "🛂", - "pause_button": "⏸", - "paw_prints": "🐾", - "peace_symbol": "☮", - "peach": "🍑", - "peacock": "🦚", - "peanuts": "🥜", - "pear": "🍐", - "pen": "🖊", - "pencil": "📝", - "penguin": "🐧", - "pensive_face": "😔", - "people_holding_hands": "🧑\u200d🤝\u200d🧑", - "people_with_bunny_ears": "👯", - "people_wrestling": "🤼", - "performing_arts": "🎭", - "persevering_face": "😣", - "person_biking": "🚴", - "person_biking_dark_skin_tone": "🚴🏿", - "person_biking_light_skin_tone": "🚴🏻", - "person_biking_medium-dark_skin_tone": "🚴🏾", - "person_biking_medium-light_skin_tone": "🚴🏼", - "person_biking_medium_skin_tone": "🚴🏽", - "person_bouncing_ball": "⛹", - "person_bouncing_ball_dark_skin_tone": "⛹🏿", - "person_bouncing_ball_light_skin_tone": "⛹🏻", - "person_bouncing_ball_medium-dark_skin_tone": "⛹🏾", - "person_bouncing_ball_medium-light_skin_tone": "⛹🏼", - "person_bouncing_ball_medium_skin_tone": "⛹🏽", - "person_bowing": "🙇", - "person_bowing_dark_skin_tone": "🙇🏿", - "person_bowing_light_skin_tone": "🙇🏻", - "person_bowing_medium-dark_skin_tone": "🙇🏾", - "person_bowing_medium-light_skin_tone": "🙇🏼", - "person_bowing_medium_skin_tone": "🙇🏽", - "person_cartwheeling": "🤸", - "person_cartwheeling_dark_skin_tone": "🤸🏿", - "person_cartwheeling_light_skin_tone": "🤸🏻", - "person_cartwheeling_medium-dark_skin_tone": "🤸🏾", - "person_cartwheeling_medium-light_skin_tone": "🤸🏼", - "person_cartwheeling_medium_skin_tone": "🤸🏽", - "person_climbing": "🧗", - "person_climbing_dark_skin_tone": "🧗🏿", - "person_climbing_light_skin_tone": "🧗🏻", - "person_climbing_medium-dark_skin_tone": "🧗🏾", - "person_climbing_medium-light_skin_tone": "🧗🏼", - "person_climbing_medium_skin_tone": "🧗🏽", - "person_facepalming": "🤦", - "person_facepalming_dark_skin_tone": "🤦🏿", - "person_facepalming_light_skin_tone": "🤦🏻", - "person_facepalming_medium-dark_skin_tone": "🤦🏾", - "person_facepalming_medium-light_skin_tone": "🤦🏼", - "person_facepalming_medium_skin_tone": "🤦🏽", - "person_fencing": "🤺", - "person_frowning": "🙍", - "person_frowning_dark_skin_tone": "🙍🏿", - "person_frowning_light_skin_tone": "🙍🏻", - "person_frowning_medium-dark_skin_tone": "🙍🏾", - "person_frowning_medium-light_skin_tone": "🙍🏼", - "person_frowning_medium_skin_tone": "🙍🏽", - "person_gesturing_no": "🙅", - "person_gesturing_no_dark_skin_tone": "🙅🏿", - "person_gesturing_no_light_skin_tone": "🙅🏻", - "person_gesturing_no_medium-dark_skin_tone": "🙅🏾", - "person_gesturing_no_medium-light_skin_tone": "🙅🏼", - "person_gesturing_no_medium_skin_tone": "🙅🏽", - "person_gesturing_ok": "🙆", - "person_gesturing_ok_dark_skin_tone": "🙆🏿", - "person_gesturing_ok_light_skin_tone": "🙆🏻", - "person_gesturing_ok_medium-dark_skin_tone": "🙆🏾", - "person_gesturing_ok_medium-light_skin_tone": "🙆🏼", - "person_gesturing_ok_medium_skin_tone": "🙆🏽", - "person_getting_haircut": "💇", - "person_getting_haircut_dark_skin_tone": "💇🏿", - "person_getting_haircut_light_skin_tone": "💇🏻", - "person_getting_haircut_medium-dark_skin_tone": "💇🏾", - "person_getting_haircut_medium-light_skin_tone": "💇🏼", - "person_getting_haircut_medium_skin_tone": "💇🏽", - "person_getting_massage": "💆", - "person_getting_massage_dark_skin_tone": "💆🏿", - "person_getting_massage_light_skin_tone": "💆🏻", - "person_getting_massage_medium-dark_skin_tone": "💆🏾", - "person_getting_massage_medium-light_skin_tone": "💆🏼", - "person_getting_massage_medium_skin_tone": "💆🏽", - "person_golfing": "🏌", - "person_golfing_dark_skin_tone": "🏌🏿", - "person_golfing_light_skin_tone": "🏌🏻", - "person_golfing_medium-dark_skin_tone": "🏌🏾", - "person_golfing_medium-light_skin_tone": "🏌🏼", - "person_golfing_medium_skin_tone": "🏌🏽", - "person_in_bed": "🛌", - "person_in_bed_dark_skin_tone": "🛌🏿", - "person_in_bed_light_skin_tone": "🛌🏻", - "person_in_bed_medium-dark_skin_tone": "🛌🏾", - "person_in_bed_medium-light_skin_tone": "🛌🏼", - "person_in_bed_medium_skin_tone": "🛌🏽", - "person_in_lotus_position": "🧘", - "person_in_lotus_position_dark_skin_tone": "🧘🏿", - "person_in_lotus_position_light_skin_tone": "🧘🏻", - "person_in_lotus_position_medium-dark_skin_tone": "🧘🏾", - "person_in_lotus_position_medium-light_skin_tone": "🧘🏼", - "person_in_lotus_position_medium_skin_tone": "🧘🏽", - "person_in_steamy_room": "🧖", - "person_in_steamy_room_dark_skin_tone": "🧖🏿", - "person_in_steamy_room_light_skin_tone": "🧖🏻", - "person_in_steamy_room_medium-dark_skin_tone": "🧖🏾", - "person_in_steamy_room_medium-light_skin_tone": "🧖🏼", - "person_in_steamy_room_medium_skin_tone": "🧖🏽", - "person_juggling": "🤹", - "person_juggling_dark_skin_tone": "🤹🏿", - "person_juggling_light_skin_tone": "🤹🏻", - "person_juggling_medium-dark_skin_tone": "🤹🏾", - "person_juggling_medium-light_skin_tone": "🤹🏼", - "person_juggling_medium_skin_tone": "🤹🏽", - "person_kneeling": "🧎", - "person_lifting_weights": "🏋", - "person_lifting_weights_dark_skin_tone": "🏋🏿", - "person_lifting_weights_light_skin_tone": "🏋🏻", - "person_lifting_weights_medium-dark_skin_tone": "🏋🏾", - "person_lifting_weights_medium-light_skin_tone": "🏋🏼", - "person_lifting_weights_medium_skin_tone": "🏋🏽", - "person_mountain_biking": "🚵", - "person_mountain_biking_dark_skin_tone": "🚵🏿", - "person_mountain_biking_light_skin_tone": "🚵🏻", - "person_mountain_biking_medium-dark_skin_tone": "🚵🏾", - "person_mountain_biking_medium-light_skin_tone": "🚵🏼", - "person_mountain_biking_medium_skin_tone": "🚵🏽", - "person_playing_handball": "🤾", - "person_playing_handball_dark_skin_tone": "🤾🏿", - "person_playing_handball_light_skin_tone": "🤾🏻", - "person_playing_handball_medium-dark_skin_tone": "🤾🏾", - "person_playing_handball_medium-light_skin_tone": "🤾🏼", - "person_playing_handball_medium_skin_tone": "🤾🏽", - "person_playing_water_polo": "🤽", - "person_playing_water_polo_dark_skin_tone": "🤽🏿", - "person_playing_water_polo_light_skin_tone": "🤽🏻", - "person_playing_water_polo_medium-dark_skin_tone": "🤽🏾", - "person_playing_water_polo_medium-light_skin_tone": "🤽🏼", - "person_playing_water_polo_medium_skin_tone": "🤽🏽", - "person_pouting": "🙎", - "person_pouting_dark_skin_tone": "🙎🏿", - "person_pouting_light_skin_tone": "🙎🏻", - "person_pouting_medium-dark_skin_tone": "🙎🏾", - "person_pouting_medium-light_skin_tone": "🙎🏼", - "person_pouting_medium_skin_tone": "🙎🏽", - "person_raising_hand": "🙋", - "person_raising_hand_dark_skin_tone": "🙋🏿", - "person_raising_hand_light_skin_tone": "🙋🏻", - "person_raising_hand_medium-dark_skin_tone": "🙋🏾", - "person_raising_hand_medium-light_skin_tone": "🙋🏼", - "person_raising_hand_medium_skin_tone": "🙋🏽", - "person_rowing_boat": "🚣", - "person_rowing_boat_dark_skin_tone": "🚣🏿", - "person_rowing_boat_light_skin_tone": "🚣🏻", - "person_rowing_boat_medium-dark_skin_tone": "🚣🏾", - "person_rowing_boat_medium-light_skin_tone": "🚣🏼", - "person_rowing_boat_medium_skin_tone": "🚣🏽", - "person_running": "🏃", - "person_running_dark_skin_tone": "🏃🏿", - "person_running_light_skin_tone": "🏃🏻", - "person_running_medium-dark_skin_tone": "🏃🏾", - "person_running_medium-light_skin_tone": "🏃🏼", - "person_running_medium_skin_tone": "🏃🏽", - "person_shrugging": "🤷", - "person_shrugging_dark_skin_tone": "🤷🏿", - "person_shrugging_light_skin_tone": "🤷🏻", - "person_shrugging_medium-dark_skin_tone": "🤷🏾", - "person_shrugging_medium-light_skin_tone": "🤷🏼", - "person_shrugging_medium_skin_tone": "🤷🏽", - "person_standing": "🧍", - "person_surfing": "🏄", - "person_surfing_dark_skin_tone": "🏄🏿", - "person_surfing_light_skin_tone": "🏄🏻", - "person_surfing_medium-dark_skin_tone": "🏄🏾", - "person_surfing_medium-light_skin_tone": "🏄🏼", - "person_surfing_medium_skin_tone": "🏄🏽", - "person_swimming": "🏊", - "person_swimming_dark_skin_tone": "🏊🏿", - "person_swimming_light_skin_tone": "🏊🏻", - "person_swimming_medium-dark_skin_tone": "🏊🏾", - "person_swimming_medium-light_skin_tone": "🏊🏼", - "person_swimming_medium_skin_tone": "🏊🏽", - "person_taking_bath": "🛀", - "person_taking_bath_dark_skin_tone": "🛀🏿", - "person_taking_bath_light_skin_tone": "🛀🏻", - "person_taking_bath_medium-dark_skin_tone": "🛀🏾", - "person_taking_bath_medium-light_skin_tone": "🛀🏼", - "person_taking_bath_medium_skin_tone": "🛀🏽", - "person_tipping_hand": "💁", - "person_tipping_hand_dark_skin_tone": "💁🏿", - "person_tipping_hand_light_skin_tone": "💁🏻", - "person_tipping_hand_medium-dark_skin_tone": "💁🏾", - "person_tipping_hand_medium-light_skin_tone": "💁🏼", - "person_tipping_hand_medium_skin_tone": "💁🏽", - "person_walking": "🚶", - "person_walking_dark_skin_tone": "🚶🏿", - "person_walking_light_skin_tone": "🚶🏻", - "person_walking_medium-dark_skin_tone": "🚶🏾", - "person_walking_medium-light_skin_tone": "🚶🏼", - "person_walking_medium_skin_tone": "🚶🏽", - "person_wearing_turban": "👳", - "person_wearing_turban_dark_skin_tone": "👳🏿", - "person_wearing_turban_light_skin_tone": "👳🏻", - "person_wearing_turban_medium-dark_skin_tone": "👳🏾", - "person_wearing_turban_medium-light_skin_tone": "👳🏼", - "person_wearing_turban_medium_skin_tone": "👳🏽", - "petri_dish": "🧫", - "pick": "⛏", - "pie": "🥧", - "pig": "🐷", - "pig_face": "🐷", - "pig_nose": "🐽", - "pile_of_poo": "💩", - "pill": "💊", - "pinching_hand": "🤏", - "pine_decoration": "🎍", - "pineapple": "🍍", - "ping_pong": "🏓", - "pirate_flag": "🏴\u200d☠️", - "pistol": "🔫", - "pizza": "🍕", - "place_of_worship": "🛐", - "play_button": "▶", - "play_or_pause_button": "⏯", - "pleading_face": "🥺", - "police_car": "🚓", - "police_car_light": "🚨", - "police_officer": "👮", - "police_officer_dark_skin_tone": "👮🏿", - "police_officer_light_skin_tone": "👮🏻", - "police_officer_medium-dark_skin_tone": "👮🏾", - "police_officer_medium-light_skin_tone": "👮🏼", - "police_officer_medium_skin_tone": "👮🏽", - "poodle": "🐩", - "pool_8_ball": "🎱", - "popcorn": "🍿", - "post_office": "🏣", - "postal_horn": "📯", - "postbox": "📮", - "pot_of_food": "🍲", - "potable_water": "🚰", - "potato": "🥔", - "poultry_leg": "🍗", - "pound_banknote": "💷", - "pouting_cat_face": "😾", - "pouting_face": "😡", - "prayer_beads": "📿", - "pregnant_woman": "🤰", - "pregnant_woman_dark_skin_tone": "🤰🏿", - "pregnant_woman_light_skin_tone": "🤰🏻", - "pregnant_woman_medium-dark_skin_tone": "🤰🏾", - "pregnant_woman_medium-light_skin_tone": "🤰🏼", - "pregnant_woman_medium_skin_tone": "🤰🏽", - "pretzel": "🥨", - "probing_cane": "🦯", - "prince": "🤴", - "prince_dark_skin_tone": "🤴🏿", - "prince_light_skin_tone": "🤴🏻", - "prince_medium-dark_skin_tone": "🤴🏾", - "prince_medium-light_skin_tone": "🤴🏼", - "prince_medium_skin_tone": "🤴🏽", - "princess": "👸", - "princess_dark_skin_tone": "👸🏿", - "princess_light_skin_tone": "👸🏻", - "princess_medium-dark_skin_tone": "👸🏾", - "princess_medium-light_skin_tone": "👸🏼", - "princess_medium_skin_tone": "👸🏽", - "printer": "🖨", - "prohibited": "🚫", - "purple_circle": "🟣", - "purple_heart": "💜", - "purple_square": "🟪", - "purse": "👛", - "pushpin": "📌", - "question_mark": "❓", - "rabbit": "🐰", - "rabbit_face": "🐰", - "raccoon": "🦝", - "racing_car": "🏎", - "radio": "📻", - "radio_button": "🔘", - "radioactive": "☢", - "railway_car": "🚃", - "railway_track": "🛤", - "rainbow": "🌈", - "rainbow_flag": "🏳️\u200d🌈", - "raised_back_of_hand": "🤚", - "raised_back_of_hand_dark_skin_tone": "🤚🏿", - "raised_back_of_hand_light_skin_tone": "🤚🏻", - "raised_back_of_hand_medium-dark_skin_tone": "🤚🏾", - "raised_back_of_hand_medium-light_skin_tone": "🤚🏼", - "raised_back_of_hand_medium_skin_tone": "🤚🏽", - "raised_fist": "✊", - "raised_fist_dark_skin_tone": "✊🏿", - "raised_fist_light_skin_tone": "✊🏻", - "raised_fist_medium-dark_skin_tone": "✊🏾", - "raised_fist_medium-light_skin_tone": "✊🏼", - "raised_fist_medium_skin_tone": "✊🏽", - "raised_hand": "✋", - "raised_hand_dark_skin_tone": "✋🏿", - "raised_hand_light_skin_tone": "✋🏻", - "raised_hand_medium-dark_skin_tone": "✋🏾", - "raised_hand_medium-light_skin_tone": "✋🏼", - "raised_hand_medium_skin_tone": "✋🏽", - "raising_hands": "🙌", - "raising_hands_dark_skin_tone": "🙌🏿", - "raising_hands_light_skin_tone": "🙌🏻", - "raising_hands_medium-dark_skin_tone": "🙌🏾", - "raising_hands_medium-light_skin_tone": "🙌🏼", - "raising_hands_medium_skin_tone": "🙌🏽", - "ram": "🐏", - "rat": "🐀", - "razor": "🪒", - "ringed_planet": "🪐", - "receipt": "🧾", - "record_button": "⏺", - "recycling_symbol": "♻", - "red_apple": "🍎", - "red_circle": "🔴", - "red_envelope": "🧧", - "red_hair": "🦰", - "red-haired_man": "👨\u200d🦰", - "red-haired_woman": "👩\u200d🦰", - "red_heart": "❤", - "red_paper_lantern": "🏮", - "red_square": "🟥", - "red_triangle_pointed_down": "🔻", - "red_triangle_pointed_up": "🔺", - "registered": "®", - "relieved_face": "😌", - "reminder_ribbon": "🎗", - "repeat_button": "🔁", - "repeat_single_button": "🔂", - "rescue_worker’s_helmet": "⛑", - "restroom": "🚻", - "reverse_button": "◀", - "revolving_hearts": "💞", - "rhinoceros": "🦏", - "ribbon": "🎀", - "rice_ball": "🍙", - "rice_cracker": "🍘", - "right-facing_fist": "🤜", - "right-facing_fist_dark_skin_tone": "🤜🏿", - "right-facing_fist_light_skin_tone": "🤜🏻", - "right-facing_fist_medium-dark_skin_tone": "🤜🏾", - "right-facing_fist_medium-light_skin_tone": "🤜🏼", - "right-facing_fist_medium_skin_tone": "🤜🏽", - "right_anger_bubble": "🗯", - "right_arrow": "➡", - "right_arrow_curving_down": "⤵", - "right_arrow_curving_left": "↩", - "right_arrow_curving_up": "⤴", - "ring": "💍", - "roasted_sweet_potato": "🍠", - "robot_face": "🤖", - "rocket": "🚀", - "roll_of_paper": "🧻", - "rolled-up_newspaper": "🗞", - "roller_coaster": "🎢", - "rolling_on_the_floor_laughing": "🤣", - "rooster": "🐓", - "rose": "🌹", - "rosette": "🏵", - "round_pushpin": "📍", - "rugby_football": "🏉", - "running_shirt": "🎽", - "running_shoe": "👟", - "sad_but_relieved_face": "😥", - "safety_pin": "🧷", - "safety_vest": "🦺", - "salt": "🧂", - "sailboat": "⛵", - "sake": "🍶", - "sandwich": "🥪", - "sari": "🥻", - "satellite": "📡", - "satellite_antenna": "📡", - "sauropod": "🦕", - "saxophone": "🎷", - "scarf": "🧣", - "school": "🏫", - "school_backpack": "🎒", - "scissors": "✂", - "scorpion": "🦂", - "scroll": "📜", - "seat": "💺", - "see-no-evil_monkey": "🙈", - "seedling": "🌱", - "selfie": "🤳", - "selfie_dark_skin_tone": "🤳🏿", - "selfie_light_skin_tone": "🤳🏻", - "selfie_medium-dark_skin_tone": "🤳🏾", - "selfie_medium-light_skin_tone": "🤳🏼", - "selfie_medium_skin_tone": "🤳🏽", - "service_dog": "🐕\u200d🦺", - "seven-thirty": "🕢", - "seven_o’clock": "🕖", - "shallow_pan_of_food": "🥘", - "shamrock": "☘", - "shark": "🦈", - "shaved_ice": "🍧", - "sheaf_of_rice": "🌾", - "shield": "🛡", - "shinto_shrine": "⛩", - "ship": "🚢", - "shooting_star": "🌠", - "shopping_bags": "🛍", - "shopping_cart": "🛒", - "shortcake": "🍰", - "shorts": "🩳", - "shower": "🚿", - "shrimp": "🦐", - "shuffle_tracks_button": "🔀", - "shushing_face": "🤫", - "sign_of_the_horns": "🤘", - "sign_of_the_horns_dark_skin_tone": "🤘🏿", - "sign_of_the_horns_light_skin_tone": "🤘🏻", - "sign_of_the_horns_medium-dark_skin_tone": "🤘🏾", - "sign_of_the_horns_medium-light_skin_tone": "🤘🏼", - "sign_of_the_horns_medium_skin_tone": "🤘🏽", - "six-thirty": "🕡", - "six_o’clock": "🕕", - "skateboard": "🛹", - "skier": "⛷", - "skis": "🎿", - "skull": "💀", - "skull_and_crossbones": "☠", - "skunk": "🦨", - "sled": "🛷", - "sleeping_face": "😴", - "sleepy_face": "😪", - "slightly_frowning_face": "🙁", - "slightly_smiling_face": "🙂", - "slot_machine": "🎰", - "sloth": "🦥", - "small_airplane": "🛩", - "small_blue_diamond": "🔹", - "small_orange_diamond": "🔸", - "smiling_cat_face_with_heart-eyes": "😻", - "smiling_face": "☺", - "smiling_face_with_halo": "😇", - "smiling_face_with_3_hearts": "🥰", - "smiling_face_with_heart-eyes": "😍", - "smiling_face_with_horns": "😈", - "smiling_face_with_smiling_eyes": "😊", - "smiling_face_with_sunglasses": "😎", - "smirking_face": "😏", - "snail": "🐌", - "snake": "🐍", - "sneezing_face": "🤧", - "snow-capped_mountain": "🏔", - "snowboarder": "🏂", - "snowboarder_dark_skin_tone": "🏂🏿", - "snowboarder_light_skin_tone": "🏂🏻", - "snowboarder_medium-dark_skin_tone": "🏂🏾", - "snowboarder_medium-light_skin_tone": "🏂🏼", - "snowboarder_medium_skin_tone": "🏂🏽", - "snowflake": "❄", - "snowman": "☃", - "snowman_without_snow": "⛄", - "soap": "🧼", - "soccer_ball": "⚽", - "socks": "🧦", - "softball": "🥎", - "soft_ice_cream": "🍦", - "spade_suit": "♠", - "spaghetti": "🍝", - "sparkle": "❇", - "sparkler": "🎇", - "sparkles": "✨", - "sparkling_heart": "💖", - "speak-no-evil_monkey": "🙊", - "speaker_high_volume": "🔊", - "speaker_low_volume": "🔈", - "speaker_medium_volume": "🔉", - "speaking_head": "🗣", - "speech_balloon": "💬", - "speedboat": "🚤", - "spider": "🕷", - "spider_web": "🕸", - "spiral_calendar": "🗓", - "spiral_notepad": "🗒", - "spiral_shell": "🐚", - "spoon": "🥄", - "sponge": "🧽", - "sport_utility_vehicle": "🚙", - "sports_medal": "🏅", - "spouting_whale": "🐳", - "squid": "🦑", - "squinting_face_with_tongue": "😝", - "stadium": "🏟", - "star-struck": "🤩", - "star_and_crescent": "☪", - "star_of_david": "✡", - "station": "🚉", - "steaming_bowl": "🍜", - "stethoscope": "🩺", - "stop_button": "⏹", - "stop_sign": "🛑", - "stopwatch": "⏱", - "straight_ruler": "📏", - "strawberry": "🍓", - "studio_microphone": "🎙", - "stuffed_flatbread": "🥙", - "sun": "☀", - "sun_behind_cloud": "⛅", - "sun_behind_large_cloud": "🌥", - "sun_behind_rain_cloud": "🌦", - "sun_behind_small_cloud": "🌤", - "sun_with_face": "🌞", - "sunflower": "🌻", - "sunglasses": "😎", - "sunrise": "🌅", - "sunrise_over_mountains": "🌄", - "sunset": "🌇", - "superhero": "🦸", - "supervillain": "🦹", - "sushi": "🍣", - "suspension_railway": "🚟", - "swan": "🦢", - "sweat_droplets": "💦", - "synagogue": "🕍", - "syringe": "💉", - "t-shirt": "👕", - "taco": "🌮", - "takeout_box": "🥡", - "tanabata_tree": "🎋", - "tangerine": "🍊", - "taxi": "🚕", - "teacup_without_handle": "🍵", - "tear-off_calendar": "📆", - "teddy_bear": "🧸", - "telephone": "☎", - "telephone_receiver": "📞", - "telescope": "🔭", - "television": "📺", - "ten-thirty": "🕥", - "ten_o’clock": "🕙", - "tennis": "🎾", - "tent": "⛺", - "test_tube": "🧪", - "thermometer": "🌡", - "thinking_face": "🤔", - "thought_balloon": "💭", - "thread": "🧵", - "three-thirty": "🕞", - "three_o’clock": "🕒", - "thumbs_down": "👎", - "thumbs_down_dark_skin_tone": "👎🏿", - "thumbs_down_light_skin_tone": "👎🏻", - "thumbs_down_medium-dark_skin_tone": "👎🏾", - "thumbs_down_medium-light_skin_tone": "👎🏼", - "thumbs_down_medium_skin_tone": "👎🏽", - "thumbs_up": "👍", - "thumbs_up_dark_skin_tone": "👍🏿", - "thumbs_up_light_skin_tone": "👍🏻", - "thumbs_up_medium-dark_skin_tone": "👍🏾", - "thumbs_up_medium-light_skin_tone": "👍🏼", - "thumbs_up_medium_skin_tone": "👍🏽", - "ticket": "🎫", - "tiger": "🐯", - "tiger_face": "🐯", - "timer_clock": "⏲", - "tired_face": "😫", - "toolbox": "🧰", - "toilet": "🚽", - "tomato": "🍅", - "tongue": "👅", - "tooth": "🦷", - "top_hat": "🎩", - "tornado": "🌪", - "trackball": "🖲", - "tractor": "🚜", - "trade_mark": "™", - "train": "🚋", - "tram": "🚊", - "tram_car": "🚋", - "triangular_flag": "🚩", - "triangular_ruler": "📐", - "trident_emblem": "🔱", - "trolleybus": "🚎", - "trophy": "🏆", - "tropical_drink": "🍹", - "tropical_fish": "🐠", - "trumpet": "🎺", - "tulip": "🌷", - "tumbler_glass": "🥃", - "turtle": "🐢", - "twelve-thirty": "🕧", - "twelve_o’clock": "🕛", - "two-hump_camel": "🐫", - "two-thirty": "🕝", - "two_hearts": "💕", - "two_men_holding_hands": "👬", - "two_o’clock": "🕑", - "two_women_holding_hands": "👭", - "umbrella": "☂", - "umbrella_on_ground": "⛱", - "umbrella_with_rain_drops": "☔", - "unamused_face": "😒", - "unicorn_face": "🦄", - "unlocked": "🔓", - "up-down_arrow": "↕", - "up-left_arrow": "↖", - "up-right_arrow": "↗", - "up_arrow": "⬆", - "upside-down_face": "🙃", - "upwards_button": "🔼", - "vampire": "🧛", - "vampire_dark_skin_tone": "🧛🏿", - "vampire_light_skin_tone": "🧛🏻", - "vampire_medium-dark_skin_tone": "🧛🏾", - "vampire_medium-light_skin_tone": "🧛🏼", - "vampire_medium_skin_tone": "🧛🏽", - "vertical_traffic_light": "🚦", - "vibration_mode": "📳", - "victory_hand": "✌", - "victory_hand_dark_skin_tone": "✌🏿", - "victory_hand_light_skin_tone": "✌🏻", - "victory_hand_medium-dark_skin_tone": "✌🏾", - "victory_hand_medium-light_skin_tone": "✌🏼", - "victory_hand_medium_skin_tone": "✌🏽", - "video_camera": "📹", - "video_game": "🎮", - "videocassette": "📼", - "violin": "🎻", - "volcano": "🌋", - "volleyball": "🏐", - "vulcan_salute": "🖖", - "vulcan_salute_dark_skin_tone": "🖖🏿", - "vulcan_salute_light_skin_tone": "🖖🏻", - "vulcan_salute_medium-dark_skin_tone": "🖖🏾", - "vulcan_salute_medium-light_skin_tone": "🖖🏼", - "vulcan_salute_medium_skin_tone": "🖖🏽", - "waffle": "🧇", - "waning_crescent_moon": "🌘", - "waning_gibbous_moon": "🌖", - "warning": "⚠", - "wastebasket": "🗑", - "watch": "⌚", - "water_buffalo": "🐃", - "water_closet": "🚾", - "water_wave": "🌊", - "watermelon": "🍉", - "waving_hand": "👋", - "waving_hand_dark_skin_tone": "👋🏿", - "waving_hand_light_skin_tone": "👋🏻", - "waving_hand_medium-dark_skin_tone": "👋🏾", - "waving_hand_medium-light_skin_tone": "👋🏼", - "waving_hand_medium_skin_tone": "👋🏽", - "wavy_dash": "〰", - "waxing_crescent_moon": "🌒", - "waxing_gibbous_moon": "🌔", - "weary_cat_face": "🙀", - "weary_face": "😩", - "wedding": "💒", - "whale": "🐳", - "wheel_of_dharma": "☸", - "wheelchair_symbol": "♿", - "white_circle": "⚪", - "white_exclamation_mark": "❕", - "white_flag": "🏳", - "white_flower": "💮", - "white_hair": "🦳", - "white-haired_man": "👨\u200d🦳", - "white-haired_woman": "👩\u200d🦳", - "white_heart": "🤍", - "white_heavy_check_mark": "✅", - "white_large_square": "⬜", - "white_medium-small_square": "◽", - "white_medium_square": "◻", - "white_medium_star": "⭐", - "white_question_mark": "❔", - "white_small_square": "▫", - "white_square_button": "🔳", - "wilted_flower": "🥀", - "wind_chime": "🎐", - "wind_face": "🌬", - "wine_glass": "🍷", - "winking_face": "😉", - "winking_face_with_tongue": "😜", - "wolf_face": "🐺", - "woman": "👩", - "woman_artist": "👩\u200d🎨", - "woman_artist_dark_skin_tone": "👩🏿\u200d🎨", - "woman_artist_light_skin_tone": "👩🏻\u200d🎨", - "woman_artist_medium-dark_skin_tone": "👩🏾\u200d🎨", - "woman_artist_medium-light_skin_tone": "👩🏼\u200d🎨", - "woman_artist_medium_skin_tone": "👩🏽\u200d🎨", - "woman_astronaut": "👩\u200d🚀", - "woman_astronaut_dark_skin_tone": "👩🏿\u200d🚀", - "woman_astronaut_light_skin_tone": "👩🏻\u200d🚀", - "woman_astronaut_medium-dark_skin_tone": "👩🏾\u200d🚀", - "woman_astronaut_medium-light_skin_tone": "👩🏼\u200d🚀", - "woman_astronaut_medium_skin_tone": "👩🏽\u200d🚀", - "woman_biking": "🚴\u200d♀️", - "woman_biking_dark_skin_tone": "🚴🏿\u200d♀️", - "woman_biking_light_skin_tone": "🚴🏻\u200d♀️", - "woman_biking_medium-dark_skin_tone": "🚴🏾\u200d♀️", - "woman_biking_medium-light_skin_tone": "🚴🏼\u200d♀️", - "woman_biking_medium_skin_tone": "🚴🏽\u200d♀️", - "woman_bouncing_ball": "⛹️\u200d♀️", - "woman_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♀️", - "woman_bouncing_ball_light_skin_tone": "⛹🏻\u200d♀️", - "woman_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♀️", - "woman_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♀️", - "woman_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♀️", - "woman_bowing": "🙇\u200d♀️", - "woman_bowing_dark_skin_tone": "🙇🏿\u200d♀️", - "woman_bowing_light_skin_tone": "🙇🏻\u200d♀️", - "woman_bowing_medium-dark_skin_tone": "🙇🏾\u200d♀️", - "woman_bowing_medium-light_skin_tone": "🙇🏼\u200d♀️", - "woman_bowing_medium_skin_tone": "🙇🏽\u200d♀️", - "woman_cartwheeling": "🤸\u200d♀️", - "woman_cartwheeling_dark_skin_tone": "🤸🏿\u200d♀️", - "woman_cartwheeling_light_skin_tone": "🤸🏻\u200d♀️", - "woman_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♀️", - "woman_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♀️", - "woman_cartwheeling_medium_skin_tone": "🤸🏽\u200d♀️", - "woman_climbing": "🧗\u200d♀️", - "woman_climbing_dark_skin_tone": "🧗🏿\u200d♀️", - "woman_climbing_light_skin_tone": "🧗🏻\u200d♀️", - "woman_climbing_medium-dark_skin_tone": "🧗🏾\u200d♀️", - "woman_climbing_medium-light_skin_tone": "🧗🏼\u200d♀️", - "woman_climbing_medium_skin_tone": "🧗🏽\u200d♀️", - "woman_construction_worker": "👷\u200d♀️", - "woman_construction_worker_dark_skin_tone": "👷🏿\u200d♀️", - "woman_construction_worker_light_skin_tone": "👷🏻\u200d♀️", - "woman_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♀️", - "woman_construction_worker_medium-light_skin_tone": "👷🏼\u200d♀️", - "woman_construction_worker_medium_skin_tone": "👷🏽\u200d♀️", - "woman_cook": "👩\u200d🍳", - "woman_cook_dark_skin_tone": "👩🏿\u200d🍳", - "woman_cook_light_skin_tone": "👩🏻\u200d🍳", - "woman_cook_medium-dark_skin_tone": "👩🏾\u200d🍳", - "woman_cook_medium-light_skin_tone": "👩🏼\u200d🍳", - "woman_cook_medium_skin_tone": "👩🏽\u200d🍳", - "woman_dancing": "💃", - "woman_dancing_dark_skin_tone": "💃🏿", - "woman_dancing_light_skin_tone": "💃🏻", - "woman_dancing_medium-dark_skin_tone": "💃🏾", - "woman_dancing_medium-light_skin_tone": "💃🏼", - "woman_dancing_medium_skin_tone": "💃🏽", - "woman_dark_skin_tone": "👩🏿", - "woman_detective": "🕵️\u200d♀️", - "woman_detective_dark_skin_tone": "🕵🏿\u200d♀️", - "woman_detective_light_skin_tone": "🕵🏻\u200d♀️", - "woman_detective_medium-dark_skin_tone": "🕵🏾\u200d♀️", - "woman_detective_medium-light_skin_tone": "🕵🏼\u200d♀️", - "woman_detective_medium_skin_tone": "🕵🏽\u200d♀️", - "woman_elf": "🧝\u200d♀️", - "woman_elf_dark_skin_tone": "🧝🏿\u200d♀️", - "woman_elf_light_skin_tone": "🧝🏻\u200d♀️", - "woman_elf_medium-dark_skin_tone": "🧝🏾\u200d♀️", - "woman_elf_medium-light_skin_tone": "🧝🏼\u200d♀️", - "woman_elf_medium_skin_tone": "🧝🏽\u200d♀️", - "woman_facepalming": "🤦\u200d♀️", - "woman_facepalming_dark_skin_tone": "🤦🏿\u200d♀️", - "woman_facepalming_light_skin_tone": "🤦🏻\u200d♀️", - "woman_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♀️", - "woman_facepalming_medium-light_skin_tone": "🤦🏼\u200d♀️", - "woman_facepalming_medium_skin_tone": "🤦🏽\u200d♀️", - "woman_factory_worker": "👩\u200d🏭", - "woman_factory_worker_dark_skin_tone": "👩🏿\u200d🏭", - "woman_factory_worker_light_skin_tone": "👩🏻\u200d🏭", - "woman_factory_worker_medium-dark_skin_tone": "👩🏾\u200d🏭", - "woman_factory_worker_medium-light_skin_tone": "👩🏼\u200d🏭", - "woman_factory_worker_medium_skin_tone": "👩🏽\u200d🏭", - "woman_fairy": "🧚\u200d♀️", - "woman_fairy_dark_skin_tone": "🧚🏿\u200d♀️", - "woman_fairy_light_skin_tone": "🧚🏻\u200d♀️", - "woman_fairy_medium-dark_skin_tone": "🧚🏾\u200d♀️", - "woman_fairy_medium-light_skin_tone": "🧚🏼\u200d♀️", - "woman_fairy_medium_skin_tone": "🧚🏽\u200d♀️", - "woman_farmer": "👩\u200d🌾", - "woman_farmer_dark_skin_tone": "👩🏿\u200d🌾", - "woman_farmer_light_skin_tone": "👩🏻\u200d🌾", - "woman_farmer_medium-dark_skin_tone": "👩🏾\u200d🌾", - "woman_farmer_medium-light_skin_tone": "👩🏼\u200d🌾", - "woman_farmer_medium_skin_tone": "👩🏽\u200d🌾", - "woman_firefighter": "👩\u200d🚒", - "woman_firefighter_dark_skin_tone": "👩🏿\u200d🚒", - "woman_firefighter_light_skin_tone": "👩🏻\u200d🚒", - "woman_firefighter_medium-dark_skin_tone": "👩🏾\u200d🚒", - "woman_firefighter_medium-light_skin_tone": "👩🏼\u200d🚒", - "woman_firefighter_medium_skin_tone": "👩🏽\u200d🚒", - "woman_frowning": "🙍\u200d♀️", - "woman_frowning_dark_skin_tone": "🙍🏿\u200d♀️", - "woman_frowning_light_skin_tone": "🙍🏻\u200d♀️", - "woman_frowning_medium-dark_skin_tone": "🙍🏾\u200d♀️", - "woman_frowning_medium-light_skin_tone": "🙍🏼\u200d♀️", - "woman_frowning_medium_skin_tone": "🙍🏽\u200d♀️", - "woman_genie": "🧞\u200d♀️", - "woman_gesturing_no": "🙅\u200d♀️", - "woman_gesturing_no_dark_skin_tone": "🙅🏿\u200d♀️", - "woman_gesturing_no_light_skin_tone": "🙅🏻\u200d♀️", - "woman_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♀️", - "woman_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♀️", - "woman_gesturing_no_medium_skin_tone": "🙅🏽\u200d♀️", - "woman_gesturing_ok": "🙆\u200d♀️", - "woman_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♀️", - "woman_gesturing_ok_light_skin_tone": "🙆🏻\u200d♀️", - "woman_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♀️", - "woman_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♀️", - "woman_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♀️", - "woman_getting_haircut": "💇\u200d♀️", - "woman_getting_haircut_dark_skin_tone": "💇🏿\u200d♀️", - "woman_getting_haircut_light_skin_tone": "💇🏻\u200d♀️", - "woman_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♀️", - "woman_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♀️", - "woman_getting_haircut_medium_skin_tone": "💇🏽\u200d♀️", - "woman_getting_massage": "💆\u200d♀️", - "woman_getting_massage_dark_skin_tone": "💆🏿\u200d♀️", - "woman_getting_massage_light_skin_tone": "💆🏻\u200d♀️", - "woman_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♀️", - "woman_getting_massage_medium-light_skin_tone": "💆🏼\u200d♀️", - "woman_getting_massage_medium_skin_tone": "💆🏽\u200d♀️", - "woman_golfing": "🏌️\u200d♀️", - "woman_golfing_dark_skin_tone": "🏌🏿\u200d♀️", - "woman_golfing_light_skin_tone": "🏌🏻\u200d♀️", - "woman_golfing_medium-dark_skin_tone": "🏌🏾\u200d♀️", - "woman_golfing_medium-light_skin_tone": "🏌🏼\u200d♀️", - "woman_golfing_medium_skin_tone": "🏌🏽\u200d♀️", - "woman_guard": "💂\u200d♀️", - "woman_guard_dark_skin_tone": "💂🏿\u200d♀️", - "woman_guard_light_skin_tone": "💂🏻\u200d♀️", - "woman_guard_medium-dark_skin_tone": "💂🏾\u200d♀️", - "woman_guard_medium-light_skin_tone": "💂🏼\u200d♀️", - "woman_guard_medium_skin_tone": "💂🏽\u200d♀️", - "woman_health_worker": "👩\u200d⚕️", - "woman_health_worker_dark_skin_tone": "👩🏿\u200d⚕️", - "woman_health_worker_light_skin_tone": "👩🏻\u200d⚕️", - "woman_health_worker_medium-dark_skin_tone": "👩🏾\u200d⚕️", - "woman_health_worker_medium-light_skin_tone": "👩🏼\u200d⚕️", - "woman_health_worker_medium_skin_tone": "👩🏽\u200d⚕️", - "woman_in_lotus_position": "🧘\u200d♀️", - "woman_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♀️", - "woman_in_lotus_position_light_skin_tone": "🧘🏻\u200d♀️", - "woman_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♀️", - "woman_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♀️", - "woman_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♀️", - "woman_in_manual_wheelchair": "👩\u200d🦽", - "woman_in_motorized_wheelchair": "👩\u200d🦼", - "woman_in_steamy_room": "🧖\u200d♀️", - "woman_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♀️", - "woman_in_steamy_room_light_skin_tone": "🧖🏻\u200d♀️", - "woman_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♀️", - "woman_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♀️", - "woman_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♀️", - "woman_judge": "👩\u200d⚖️", - "woman_judge_dark_skin_tone": "👩🏿\u200d⚖️", - "woman_judge_light_skin_tone": "👩🏻\u200d⚖️", - "woman_judge_medium-dark_skin_tone": "👩🏾\u200d⚖️", - "woman_judge_medium-light_skin_tone": "👩🏼\u200d⚖️", - "woman_judge_medium_skin_tone": "👩🏽\u200d⚖️", - "woman_juggling": "🤹\u200d♀️", - "woman_juggling_dark_skin_tone": "🤹🏿\u200d♀️", - "woman_juggling_light_skin_tone": "🤹🏻\u200d♀️", - "woman_juggling_medium-dark_skin_tone": "🤹🏾\u200d♀️", - "woman_juggling_medium-light_skin_tone": "🤹🏼\u200d♀️", - "woman_juggling_medium_skin_tone": "🤹🏽\u200d♀️", - "woman_lifting_weights": "🏋️\u200d♀️", - "woman_lifting_weights_dark_skin_tone": "🏋🏿\u200d♀️", - "woman_lifting_weights_light_skin_tone": "🏋🏻\u200d♀️", - "woman_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♀️", - "woman_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♀️", - "woman_lifting_weights_medium_skin_tone": "🏋🏽\u200d♀️", - "woman_light_skin_tone": "👩🏻", - "woman_mage": "🧙\u200d♀️", - "woman_mage_dark_skin_tone": "🧙🏿\u200d♀️", - "woman_mage_light_skin_tone": "🧙🏻\u200d♀️", - "woman_mage_medium-dark_skin_tone": "🧙🏾\u200d♀️", - "woman_mage_medium-light_skin_tone": "🧙🏼\u200d♀️", - "woman_mage_medium_skin_tone": "🧙🏽\u200d♀️", - "woman_mechanic": "👩\u200d🔧", - "woman_mechanic_dark_skin_tone": "👩🏿\u200d🔧", - "woman_mechanic_light_skin_tone": "👩🏻\u200d🔧", - "woman_mechanic_medium-dark_skin_tone": "👩🏾\u200d🔧", - "woman_mechanic_medium-light_skin_tone": "👩🏼\u200d🔧", - "woman_mechanic_medium_skin_tone": "👩🏽\u200d🔧", - "woman_medium-dark_skin_tone": "👩🏾", - "woman_medium-light_skin_tone": "👩🏼", - "woman_medium_skin_tone": "👩🏽", - "woman_mountain_biking": "🚵\u200d♀️", - "woman_mountain_biking_dark_skin_tone": "🚵🏿\u200d♀️", - "woman_mountain_biking_light_skin_tone": "🚵🏻\u200d♀️", - "woman_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♀️", - "woman_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♀️", - "woman_mountain_biking_medium_skin_tone": "🚵🏽\u200d♀️", - "woman_office_worker": "👩\u200d💼", - "woman_office_worker_dark_skin_tone": "👩🏿\u200d💼", - "woman_office_worker_light_skin_tone": "👩🏻\u200d💼", - "woman_office_worker_medium-dark_skin_tone": "👩🏾\u200d💼", - "woman_office_worker_medium-light_skin_tone": "👩🏼\u200d💼", - "woman_office_worker_medium_skin_tone": "👩🏽\u200d💼", - "woman_pilot": "👩\u200d✈️", - "woman_pilot_dark_skin_tone": "👩🏿\u200d✈️", - "woman_pilot_light_skin_tone": "👩🏻\u200d✈️", - "woman_pilot_medium-dark_skin_tone": "👩🏾\u200d✈️", - "woman_pilot_medium-light_skin_tone": "👩🏼\u200d✈️", - "woman_pilot_medium_skin_tone": "👩🏽\u200d✈️", - "woman_playing_handball": "🤾\u200d♀️", - "woman_playing_handball_dark_skin_tone": "🤾🏿\u200d♀️", - "woman_playing_handball_light_skin_tone": "🤾🏻\u200d♀️", - "woman_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♀️", - "woman_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♀️", - "woman_playing_handball_medium_skin_tone": "🤾🏽\u200d♀️", - "woman_playing_water_polo": "🤽\u200d♀️", - "woman_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♀️", - "woman_playing_water_polo_light_skin_tone": "🤽🏻\u200d♀️", - "woman_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♀️", - "woman_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♀️", - "woman_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♀️", - "woman_police_officer": "👮\u200d♀️", - "woman_police_officer_dark_skin_tone": "👮🏿\u200d♀️", - "woman_police_officer_light_skin_tone": "👮🏻\u200d♀️", - "woman_police_officer_medium-dark_skin_tone": "👮🏾\u200d♀️", - "woman_police_officer_medium-light_skin_tone": "👮🏼\u200d♀️", - "woman_police_officer_medium_skin_tone": "👮🏽\u200d♀️", - "woman_pouting": "🙎\u200d♀️", - "woman_pouting_dark_skin_tone": "🙎🏿\u200d♀️", - "woman_pouting_light_skin_tone": "🙎🏻\u200d♀️", - "woman_pouting_medium-dark_skin_tone": "🙎🏾\u200d♀️", - "woman_pouting_medium-light_skin_tone": "🙎🏼\u200d♀️", - "woman_pouting_medium_skin_tone": "🙎🏽\u200d♀️", - "woman_raising_hand": "🙋\u200d♀️", - "woman_raising_hand_dark_skin_tone": "🙋🏿\u200d♀️", - "woman_raising_hand_light_skin_tone": "🙋🏻\u200d♀️", - "woman_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♀️", - "woman_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♀️", - "woman_raising_hand_medium_skin_tone": "🙋🏽\u200d♀️", - "woman_rowing_boat": "🚣\u200d♀️", - "woman_rowing_boat_dark_skin_tone": "🚣🏿\u200d♀️", - "woman_rowing_boat_light_skin_tone": "🚣🏻\u200d♀️", - "woman_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♀️", - "woman_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♀️", - "woman_rowing_boat_medium_skin_tone": "🚣🏽\u200d♀️", - "woman_running": "🏃\u200d♀️", - "woman_running_dark_skin_tone": "🏃🏿\u200d♀️", - "woman_running_light_skin_tone": "🏃🏻\u200d♀️", - "woman_running_medium-dark_skin_tone": "🏃🏾\u200d♀️", - "woman_running_medium-light_skin_tone": "🏃🏼\u200d♀️", - "woman_running_medium_skin_tone": "🏃🏽\u200d♀️", - "woman_scientist": "👩\u200d🔬", - "woman_scientist_dark_skin_tone": "👩🏿\u200d🔬", - "woman_scientist_light_skin_tone": "👩🏻\u200d🔬", - "woman_scientist_medium-dark_skin_tone": "👩🏾\u200d🔬", - "woman_scientist_medium-light_skin_tone": "👩🏼\u200d🔬", - "woman_scientist_medium_skin_tone": "👩🏽\u200d🔬", - "woman_shrugging": "🤷\u200d♀️", - "woman_shrugging_dark_skin_tone": "🤷🏿\u200d♀️", - "woman_shrugging_light_skin_tone": "🤷🏻\u200d♀️", - "woman_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♀️", - "woman_shrugging_medium-light_skin_tone": "🤷🏼\u200d♀️", - "woman_shrugging_medium_skin_tone": "🤷🏽\u200d♀️", - "woman_singer": "👩\u200d🎤", - "woman_singer_dark_skin_tone": "👩🏿\u200d🎤", - "woman_singer_light_skin_tone": "👩🏻\u200d🎤", - "woman_singer_medium-dark_skin_tone": "👩🏾\u200d🎤", - "woman_singer_medium-light_skin_tone": "👩🏼\u200d🎤", - "woman_singer_medium_skin_tone": "👩🏽\u200d🎤", - "woman_student": "👩\u200d🎓", - "woman_student_dark_skin_tone": "👩🏿\u200d🎓", - "woman_student_light_skin_tone": "👩🏻\u200d🎓", - "woman_student_medium-dark_skin_tone": "👩🏾\u200d🎓", - "woman_student_medium-light_skin_tone": "👩🏼\u200d🎓", - "woman_student_medium_skin_tone": "👩🏽\u200d🎓", - "woman_surfing": "🏄\u200d♀️", - "woman_surfing_dark_skin_tone": "🏄🏿\u200d♀️", - "woman_surfing_light_skin_tone": "🏄🏻\u200d♀️", - "woman_surfing_medium-dark_skin_tone": "🏄🏾\u200d♀️", - "woman_surfing_medium-light_skin_tone": "🏄🏼\u200d♀️", - "woman_surfing_medium_skin_tone": "🏄🏽\u200d♀️", - "woman_swimming": "🏊\u200d♀️", - "woman_swimming_dark_skin_tone": "🏊🏿\u200d♀️", - "woman_swimming_light_skin_tone": "🏊🏻\u200d♀️", - "woman_swimming_medium-dark_skin_tone": "🏊🏾\u200d♀️", - "woman_swimming_medium-light_skin_tone": "🏊🏼\u200d♀️", - "woman_swimming_medium_skin_tone": "🏊🏽\u200d♀️", - "woman_teacher": "👩\u200d🏫", - "woman_teacher_dark_skin_tone": "👩🏿\u200d🏫", - "woman_teacher_light_skin_tone": "👩🏻\u200d🏫", - "woman_teacher_medium-dark_skin_tone": "👩🏾\u200d🏫", - "woman_teacher_medium-light_skin_tone": "👩🏼\u200d🏫", - "woman_teacher_medium_skin_tone": "👩🏽\u200d🏫", - "woman_technologist": "👩\u200d💻", - "woman_technologist_dark_skin_tone": "👩🏿\u200d💻", - "woman_technologist_light_skin_tone": "👩🏻\u200d💻", - "woman_technologist_medium-dark_skin_tone": "👩🏾\u200d💻", - "woman_technologist_medium-light_skin_tone": "👩🏼\u200d💻", - "woman_technologist_medium_skin_tone": "👩🏽\u200d💻", - "woman_tipping_hand": "💁\u200d♀️", - "woman_tipping_hand_dark_skin_tone": "💁🏿\u200d♀️", - "woman_tipping_hand_light_skin_tone": "💁🏻\u200d♀️", - "woman_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♀️", - "woman_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♀️", - "woman_tipping_hand_medium_skin_tone": "💁🏽\u200d♀️", - "woman_vampire": "🧛\u200d♀️", - "woman_vampire_dark_skin_tone": "🧛🏿\u200d♀️", - "woman_vampire_light_skin_tone": "🧛🏻\u200d♀️", - "woman_vampire_medium-dark_skin_tone": "🧛🏾\u200d♀️", - "woman_vampire_medium-light_skin_tone": "🧛🏼\u200d♀️", - "woman_vampire_medium_skin_tone": "🧛🏽\u200d♀️", - "woman_walking": "🚶\u200d♀️", - "woman_walking_dark_skin_tone": "🚶🏿\u200d♀️", - "woman_walking_light_skin_tone": "🚶🏻\u200d♀️", - "woman_walking_medium-dark_skin_tone": "🚶🏾\u200d♀️", - "woman_walking_medium-light_skin_tone": "🚶🏼\u200d♀️", - "woman_walking_medium_skin_tone": "🚶🏽\u200d♀️", - "woman_wearing_turban": "👳\u200d♀️", - "woman_wearing_turban_dark_skin_tone": "👳🏿\u200d♀️", - "woman_wearing_turban_light_skin_tone": "👳🏻\u200d♀️", - "woman_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♀️", - "woman_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♀️", - "woman_wearing_turban_medium_skin_tone": "👳🏽\u200d♀️", - "woman_with_headscarf": "🧕", - "woman_with_headscarf_dark_skin_tone": "🧕🏿", - "woman_with_headscarf_light_skin_tone": "🧕🏻", - "woman_with_headscarf_medium-dark_skin_tone": "🧕🏾", - "woman_with_headscarf_medium-light_skin_tone": "🧕🏼", - "woman_with_headscarf_medium_skin_tone": "🧕🏽", - "woman_with_probing_cane": "👩\u200d🦯", - "woman_zombie": "🧟\u200d♀️", - "woman’s_boot": "👢", - "woman’s_clothes": "👚", - "woman’s_hat": "👒", - "woman’s_sandal": "👡", - "women_with_bunny_ears": "👯\u200d♀️", - "women_wrestling": "🤼\u200d♀️", - "women’s_room": "🚺", - "woozy_face": "🥴", - "world_map": "🗺", - "worried_face": "😟", - "wrapped_gift": "🎁", - "wrench": "🔧", - "writing_hand": "✍", - "writing_hand_dark_skin_tone": "✍🏿", - "writing_hand_light_skin_tone": "✍🏻", - "writing_hand_medium-dark_skin_tone": "✍🏾", - "writing_hand_medium-light_skin_tone": "✍🏼", - "writing_hand_medium_skin_tone": "✍🏽", - "yarn": "🧶", - "yawning_face": "🥱", - "yellow_circle": "🟡", - "yellow_heart": "💛", - "yellow_square": "🟨", - "yen_banknote": "💴", - "yo-yo": "🪀", - "yin_yang": "☯", - "zany_face": "🤪", - "zebra": "🦓", - "zipper-mouth_face": "🤐", - "zombie": "🧟", - "zzz": "💤", - "åland_islands": "🇦🇽", - "keycap_asterisk": "*⃣", - "keycap_digit_eight": "8⃣", - "keycap_digit_five": "5⃣", - "keycap_digit_four": "4⃣", - "keycap_digit_nine": "9⃣", - "keycap_digit_one": "1⃣", - "keycap_digit_seven": "7⃣", - "keycap_digit_six": "6⃣", - "keycap_digit_three": "3⃣", - "keycap_digit_two": "2⃣", - "keycap_digit_zero": "0⃣", - "keycap_number_sign": "#⃣", - "light_skin_tone": "🏻", - "medium_light_skin_tone": "🏼", - "medium_skin_tone": "🏽", - "medium_dark_skin_tone": "🏾", - "dark_skin_tone": "🏿", - "regional_indicator_symbol_letter_a": "🇦", - "regional_indicator_symbol_letter_b": "🇧", - "regional_indicator_symbol_letter_c": "🇨", - "regional_indicator_symbol_letter_d": "🇩", - "regional_indicator_symbol_letter_e": "🇪", - "regional_indicator_symbol_letter_f": "🇫", - "regional_indicator_symbol_letter_g": "🇬", - "regional_indicator_symbol_letter_h": "🇭", - "regional_indicator_symbol_letter_i": "🇮", - "regional_indicator_symbol_letter_j": "🇯", - "regional_indicator_symbol_letter_k": "🇰", - "regional_indicator_symbol_letter_l": "🇱", - "regional_indicator_symbol_letter_m": "🇲", - "regional_indicator_symbol_letter_n": "🇳", - "regional_indicator_symbol_letter_o": "🇴", - "regional_indicator_symbol_letter_p": "🇵", - "regional_indicator_symbol_letter_q": "🇶", - "regional_indicator_symbol_letter_r": "🇷", - "regional_indicator_symbol_letter_s": "🇸", - "regional_indicator_symbol_letter_t": "🇹", - "regional_indicator_symbol_letter_u": "🇺", - "regional_indicator_symbol_letter_v": "🇻", - "regional_indicator_symbol_letter_w": "🇼", - "regional_indicator_symbol_letter_x": "🇽", - "regional_indicator_symbol_letter_y": "🇾", - "regional_indicator_symbol_letter_z": "🇿", - "airplane_arriving": "🛬", - "space_invader": "👾", - "football": "🏈", - "anger": "💢", - "angry": "😠", - "anguished": "😧", - "signal_strength": "📶", - "arrows_counterclockwise": "🔄", - "arrow_heading_down": "⤵", - "arrow_heading_up": "⤴", - "art": "🎨", - "astonished": "😲", - "athletic_shoe": "👟", - "atm": "🏧", - "car": "🚗", - "red_car": "🚗", - "angel": "👼", - "back": "🔙", - "badminton_racquet_and_shuttlecock": "🏸", - "dollar": "💵", - "euro": "💶", - "pound": "💷", - "yen": "💴", - "barber": "💈", - "bath": "🛀", - "bear": "🐻", - "heartbeat": "💓", - "beer": "🍺", - "no_bell": "🔕", - "bento": "🍱", - "bike": "🚲", - "bicyclist": "🚴", - "8ball": "🎱", - "biohazard_sign": "☣", - "birthday": "🎂", - "black_circle_for_record": "⏺", - "clubs": "♣", - "diamonds": "♦", - "arrow_double_down": "⏬", - "hearts": "♥", - "rewind": "⏪", - "black_left__pointing_double_triangle_with_vertical_bar": "⏮", - "arrow_backward": "◀", - "black_medium_small_square": "◾", - "question": "❓", - "fast_forward": "⏩", - "black_right__pointing_double_triangle_with_vertical_bar": "⏭", - "arrow_forward": "▶", - "black_right__pointing_triangle_with_double_vertical_bar": "⏯", - "arrow_right": "➡", - "spades": "♠", - "black_square_for_stop": "⏹", - "sunny": "☀", - "phone": "☎", - "recycle": "♻", - "arrow_double_up": "⏫", - "busstop": "🚏", - "date": "📅", - "flags": "🎏", - "cat2": "🐈", - "joy_cat": "😹", - "smirk_cat": "😼", - "chart_with_downwards_trend": "📉", - "chart_with_upwards_trend": "📈", - "chart": "💹", - "mega": "📣", - "checkered_flag": "🏁", - "accept": "🉑", - "ideograph_advantage": "🉐", - "congratulations": "㊗", - "secret": "㊙", - "m": "Ⓜ", - "city_sunset": "🌆", - "clapper": "🎬", - "clap": "👏", - "beers": "🍻", - "clock830": "🕣", - "clock8": "🕗", - "clock1130": "🕦", - "clock11": "🕚", - "clock530": "🕠", - "clock5": "🕔", - "clock430": "🕟", - "clock4": "🕓", - "clock930": "🕤", - "clock9": "🕘", - "clock130": "🕜", - "clock1": "🕐", - "clock730": "🕢", - "clock7": "🕖", - "clock630": "🕡", - "clock6": "🕕", - "clock1030": "🕥", - "clock10": "🕙", - "clock330": "🕞", - "clock3": "🕒", - "clock1230": "🕧", - "clock12": "🕛", - "clock230": "🕝", - "clock2": "🕑", - "arrows_clockwise": "🔃", - "repeat": "🔁", - "repeat_one": "🔂", - "closed_lock_with_key": "🔐", - "mailbox_closed": "📪", - "mailbox": "📫", - "cloud_with_tornado": "🌪", - "cocktail": "🍸", - "boom": "💥", - "compression": "🗜", - "confounded": "😖", - "confused": "😕", - "rice": "🍚", - "cow2": "🐄", - "cricket_bat_and_ball": "🏏", - "x": "❌", - "cry": "😢", - "curry": "🍛", - "dagger_knife": "🗡", - "dancer": "💃", - "dark_sunglasses": "🕶", - "dash": "💨", - "truck": "🚚", - "derelict_house_building": "🏚", - "diamond_shape_with_a_dot_inside": "💠", - "dart": "🎯", - "disappointed_relieved": "😥", - "disappointed": "😞", - "do_not_litter": "🚯", - "dog2": "🐕", - "flipper": "🐬", - "loop": "➿", - "bangbang": "‼", - "double_vertical_bar": "⏸", - "dove_of_peace": "🕊", - "small_red_triangle_down": "🔻", - "arrow_down_small": "🔽", - "arrow_down": "⬇", - "dromedary_camel": "🐪", - "e__mail": "📧", - "corn": "🌽", - "ear_of_rice": "🌾", - "earth_americas": "🌎", - "earth_asia": "🌏", - "earth_africa": "🌍", - "eight_pointed_black_star": "✴", - "eight_spoked_asterisk": "✳", - "eject_symbol": "⏏", - "bulb": "💡", - "emoji_modifier_fitzpatrick_type__1__2": "🏻", - "emoji_modifier_fitzpatrick_type__3": "🏼", - "emoji_modifier_fitzpatrick_type__4": "🏽", - "emoji_modifier_fitzpatrick_type__5": "🏾", - "emoji_modifier_fitzpatrick_type__6": "🏿", - "end": "🔚", - "email": "✉", - "european_castle": "🏰", - "european_post_office": "🏤", - "interrobang": "⁉", - "expressionless": "😑", - "eyeglasses": "👓", - "massage": "💆", - "yum": "😋", - "scream": "😱", - "kissing_heart": "😘", - "sweat": "😓", - "face_with_head__bandage": "🤕", - "triumph": "😤", - "mask": "😷", - "no_good": "🙅", - "ok_woman": "🙆", - "open_mouth": "😮", - "cold_sweat": "😰", - "stuck_out_tongue": "😛", - "stuck_out_tongue_closed_eyes": "😝", - "stuck_out_tongue_winking_eye": "😜", - "joy": "😂", - "no_mouth": "😶", - "santa": "🎅", - "fax": "📠", - "fearful": "😨", - "field_hockey_stick_and_ball": "🏑", - "first_quarter_moon_with_face": "🌛", - "fish_cake": "🍥", - "fishing_pole_and_fish": "🎣", - "facepunch": "👊", - "punch": "👊", - "flag_for_afghanistan": "🇦🇫", - "flag_for_albania": "🇦🇱", - "flag_for_algeria": "🇩🇿", - "flag_for_american_samoa": "🇦🇸", - "flag_for_andorra": "🇦🇩", - "flag_for_angola": "🇦🇴", - "flag_for_anguilla": "🇦🇮", - "flag_for_antarctica": "🇦🇶", - "flag_for_antigua_&_barbuda": "🇦🇬", - "flag_for_argentina": "🇦🇷", - "flag_for_armenia": "🇦🇲", - "flag_for_aruba": "🇦🇼", - "flag_for_ascension_island": "🇦🇨", - "flag_for_australia": "🇦🇺", - "flag_for_austria": "🇦🇹", - "flag_for_azerbaijan": "🇦🇿", - "flag_for_bahamas": "🇧🇸", - "flag_for_bahrain": "🇧🇭", - "flag_for_bangladesh": "🇧🇩", - "flag_for_barbados": "🇧🇧", - "flag_for_belarus": "🇧🇾", - "flag_for_belgium": "🇧🇪", - "flag_for_belize": "🇧🇿", - "flag_for_benin": "🇧🇯", - "flag_for_bermuda": "🇧🇲", - "flag_for_bhutan": "🇧🇹", - "flag_for_bolivia": "🇧🇴", - "flag_for_bosnia_&_herzegovina": "🇧🇦", - "flag_for_botswana": "🇧🇼", - "flag_for_bouvet_island": "🇧🇻", - "flag_for_brazil": "🇧🇷", - "flag_for_british_indian_ocean_territory": "🇮🇴", - "flag_for_british_virgin_islands": "🇻🇬", - "flag_for_brunei": "🇧🇳", - "flag_for_bulgaria": "🇧🇬", - "flag_for_burkina_faso": "🇧🇫", - "flag_for_burundi": "🇧🇮", - "flag_for_cambodia": "🇰🇭", - "flag_for_cameroon": "🇨🇲", - "flag_for_canada": "🇨🇦", - "flag_for_canary_islands": "🇮🇨", - "flag_for_cape_verde": "🇨🇻", - "flag_for_caribbean_netherlands": "🇧🇶", - "flag_for_cayman_islands": "🇰🇾", - "flag_for_central_african_republic": "🇨🇫", - "flag_for_ceuta_&_melilla": "🇪🇦", - "flag_for_chad": "🇹🇩", - "flag_for_chile": "🇨🇱", - "flag_for_china": "🇨🇳", - "flag_for_christmas_island": "🇨🇽", - "flag_for_clipperton_island": "🇨🇵", - "flag_for_cocos__islands": "🇨🇨", - "flag_for_colombia": "🇨🇴", - "flag_for_comoros": "🇰🇲", - "flag_for_congo____brazzaville": "🇨🇬", - "flag_for_congo____kinshasa": "🇨🇩", - "flag_for_cook_islands": "🇨🇰", - "flag_for_costa_rica": "🇨🇷", - "flag_for_croatia": "🇭🇷", - "flag_for_cuba": "🇨🇺", - "flag_for_curaçao": "🇨🇼", - "flag_for_cyprus": "🇨🇾", - "flag_for_czech_republic": "🇨🇿", - "flag_for_côte_d’ivoire": "🇨🇮", - "flag_for_denmark": "🇩🇰", - "flag_for_diego_garcia": "🇩🇬", - "flag_for_djibouti": "🇩🇯", - "flag_for_dominica": "🇩🇲", - "flag_for_dominican_republic": "🇩🇴", - "flag_for_ecuador": "🇪🇨", - "flag_for_egypt": "🇪🇬", - "flag_for_el_salvador": "🇸🇻", - "flag_for_equatorial_guinea": "🇬🇶", - "flag_for_eritrea": "🇪🇷", - "flag_for_estonia": "🇪🇪", - "flag_for_ethiopia": "🇪🇹", - "flag_for_european_union": "🇪🇺", - "flag_for_falkland_islands": "🇫🇰", - "flag_for_faroe_islands": "🇫🇴", - "flag_for_fiji": "🇫🇯", - "flag_for_finland": "🇫🇮", - "flag_for_france": "🇫🇷", - "flag_for_french_guiana": "🇬🇫", - "flag_for_french_polynesia": "🇵🇫", - "flag_for_french_southern_territories": "🇹🇫", - "flag_for_gabon": "🇬🇦", - "flag_for_gambia": "🇬🇲", - "flag_for_georgia": "🇬🇪", - "flag_for_germany": "🇩🇪", - "flag_for_ghana": "🇬🇭", - "flag_for_gibraltar": "🇬🇮", - "flag_for_greece": "🇬🇷", - "flag_for_greenland": "🇬🇱", - "flag_for_grenada": "🇬🇩", - "flag_for_guadeloupe": "🇬🇵", - "flag_for_guam": "🇬🇺", - "flag_for_guatemala": "🇬🇹", - "flag_for_guernsey": "🇬🇬", - "flag_for_guinea": "🇬🇳", - "flag_for_guinea__bissau": "🇬🇼", - "flag_for_guyana": "🇬🇾", - "flag_for_haiti": "🇭🇹", - "flag_for_heard_&_mcdonald_islands": "🇭🇲", - "flag_for_honduras": "🇭🇳", - "flag_for_hong_kong": "🇭🇰", - "flag_for_hungary": "🇭🇺", - "flag_for_iceland": "🇮🇸", - "flag_for_india": "🇮🇳", - "flag_for_indonesia": "🇮🇩", - "flag_for_iran": "🇮🇷", - "flag_for_iraq": "🇮🇶", - "flag_for_ireland": "🇮🇪", - "flag_for_isle_of_man": "🇮🇲", - "flag_for_israel": "🇮🇱", - "flag_for_italy": "🇮🇹", - "flag_for_jamaica": "🇯🇲", - "flag_for_japan": "🇯🇵", - "flag_for_jersey": "🇯🇪", - "flag_for_jordan": "🇯🇴", - "flag_for_kazakhstan": "🇰🇿", - "flag_for_kenya": "🇰🇪", - "flag_for_kiribati": "🇰🇮", - "flag_for_kosovo": "🇽🇰", - "flag_for_kuwait": "🇰🇼", - "flag_for_kyrgyzstan": "🇰🇬", - "flag_for_laos": "🇱🇦", - "flag_for_latvia": "🇱🇻", - "flag_for_lebanon": "🇱🇧", - "flag_for_lesotho": "🇱🇸", - "flag_for_liberia": "🇱🇷", - "flag_for_libya": "🇱🇾", - "flag_for_liechtenstein": "🇱🇮", - "flag_for_lithuania": "🇱🇹", - "flag_for_luxembourg": "🇱🇺", - "flag_for_macau": "🇲🇴", - "flag_for_macedonia": "🇲🇰", - "flag_for_madagascar": "🇲🇬", - "flag_for_malawi": "🇲🇼", - "flag_for_malaysia": "🇲🇾", - "flag_for_maldives": "🇲🇻", - "flag_for_mali": "🇲🇱", - "flag_for_malta": "🇲🇹", - "flag_for_marshall_islands": "🇲🇭", - "flag_for_martinique": "🇲🇶", - "flag_for_mauritania": "🇲🇷", - "flag_for_mauritius": "🇲🇺", - "flag_for_mayotte": "🇾🇹", - "flag_for_mexico": "🇲🇽", - "flag_for_micronesia": "🇫🇲", - "flag_for_moldova": "🇲🇩", - "flag_for_monaco": "🇲🇨", - "flag_for_mongolia": "🇲🇳", - "flag_for_montenegro": "🇲🇪", - "flag_for_montserrat": "🇲🇸", - "flag_for_morocco": "🇲🇦", - "flag_for_mozambique": "🇲🇿", - "flag_for_myanmar": "🇲🇲", - "flag_for_namibia": "🇳🇦", - "flag_for_nauru": "🇳🇷", - "flag_for_nepal": "🇳🇵", - "flag_for_netherlands": "🇳🇱", - "flag_for_new_caledonia": "🇳🇨", - "flag_for_new_zealand": "🇳🇿", - "flag_for_nicaragua": "🇳🇮", - "flag_for_niger": "🇳🇪", - "flag_for_nigeria": "🇳🇬", - "flag_for_niue": "🇳🇺", - "flag_for_norfolk_island": "🇳🇫", - "flag_for_north_korea": "🇰🇵", - "flag_for_northern_mariana_islands": "🇲🇵", - "flag_for_norway": "🇳🇴", - "flag_for_oman": "🇴🇲", - "flag_for_pakistan": "🇵🇰", - "flag_for_palau": "🇵🇼", - "flag_for_palestinian_territories": "🇵🇸", - "flag_for_panama": "🇵🇦", - "flag_for_papua_new_guinea": "🇵🇬", - "flag_for_paraguay": "🇵🇾", - "flag_for_peru": "🇵🇪", - "flag_for_philippines": "🇵🇭", - "flag_for_pitcairn_islands": "🇵🇳", - "flag_for_poland": "🇵🇱", - "flag_for_portugal": "🇵🇹", - "flag_for_puerto_rico": "🇵🇷", - "flag_for_qatar": "🇶🇦", - "flag_for_romania": "🇷🇴", - "flag_for_russia": "🇷🇺", - "flag_for_rwanda": "🇷🇼", - "flag_for_réunion": "🇷🇪", - "flag_for_samoa": "🇼🇸", - "flag_for_san_marino": "🇸🇲", - "flag_for_saudi_arabia": "🇸🇦", - "flag_for_senegal": "🇸🇳", - "flag_for_serbia": "🇷🇸", - "flag_for_seychelles": "🇸🇨", - "flag_for_sierra_leone": "🇸🇱", - "flag_for_singapore": "🇸🇬", - "flag_for_sint_maarten": "🇸🇽", - "flag_for_slovakia": "🇸🇰", - "flag_for_slovenia": "🇸🇮", - "flag_for_solomon_islands": "🇸🇧", - "flag_for_somalia": "🇸🇴", - "flag_for_south_africa": "🇿🇦", - "flag_for_south_georgia_&_south_sandwich_islands": "🇬🇸", - "flag_for_south_korea": "🇰🇷", - "flag_for_south_sudan": "🇸🇸", - "flag_for_spain": "🇪🇸", - "flag_for_sri_lanka": "🇱🇰", - "flag_for_st._barthélemy": "🇧🇱", - "flag_for_st._helena": "🇸🇭", - "flag_for_st._kitts_&_nevis": "🇰🇳", - "flag_for_st._lucia": "🇱🇨", - "flag_for_st._martin": "🇲🇫", - "flag_for_st._pierre_&_miquelon": "🇵🇲", - "flag_for_st._vincent_&_grenadines": "🇻🇨", - "flag_for_sudan": "🇸🇩", - "flag_for_suriname": "🇸🇷", - "flag_for_svalbard_&_jan_mayen": "🇸🇯", - "flag_for_swaziland": "🇸🇿", - "flag_for_sweden": "🇸🇪", - "flag_for_switzerland": "🇨🇭", - "flag_for_syria": "🇸🇾", - "flag_for_são_tomé_&_príncipe": "🇸🇹", - "flag_for_taiwan": "🇹🇼", - "flag_for_tajikistan": "🇹🇯", - "flag_for_tanzania": "🇹🇿", - "flag_for_thailand": "🇹🇭", - "flag_for_timor__leste": "🇹🇱", - "flag_for_togo": "🇹🇬", - "flag_for_tokelau": "🇹🇰", - "flag_for_tonga": "🇹🇴", - "flag_for_trinidad_&_tobago": "🇹🇹", - "flag_for_tristan_da_cunha": "🇹🇦", - "flag_for_tunisia": "🇹🇳", - "flag_for_turkey": "🇹🇷", - "flag_for_turkmenistan": "🇹🇲", - "flag_for_turks_&_caicos_islands": "🇹🇨", - "flag_for_tuvalu": "🇹🇻", - "flag_for_u.s._outlying_islands": "🇺🇲", - "flag_for_u.s._virgin_islands": "🇻🇮", - "flag_for_uganda": "🇺🇬", - "flag_for_ukraine": "🇺🇦", - "flag_for_united_arab_emirates": "🇦🇪", - "flag_for_united_kingdom": "🇬🇧", - "flag_for_united_states": "🇺🇸", - "flag_for_uruguay": "🇺🇾", - "flag_for_uzbekistan": "🇺🇿", - "flag_for_vanuatu": "🇻🇺", - "flag_for_vatican_city": "🇻🇦", - "flag_for_venezuela": "🇻🇪", - "flag_for_vietnam": "🇻🇳", - "flag_for_wallis_&_futuna": "🇼🇫", - "flag_for_western_sahara": "🇪🇭", - "flag_for_yemen": "🇾🇪", - "flag_for_zambia": "🇿🇲", - "flag_for_zimbabwe": "🇿🇼", - "flag_for_åland_islands": "🇦🇽", - "golf": "⛳", - "fleur__de__lis": "⚜", - "muscle": "💪", - "flushed": "😳", - "frame_with_picture": "🖼", - "fries": "🍟", - "frog": "🐸", - "hatched_chick": "🐥", - "frowning": "😦", - "fuelpump": "⛽", - "full_moon_with_face": "🌝", - "gem": "💎", - "star2": "🌟", - "golfer": "🏌", - "mortar_board": "🎓", - "grimacing": "😬", - "smile_cat": "😸", - "grinning": "😀", - "grin": "😁", - "heartpulse": "💗", - "guardsman": "💂", - "haircut": "💇", - "hamster": "🐹", - "raising_hand": "🙋", - "headphones": "🎧", - "hear_no_evil": "🙉", - "cupid": "💘", - "gift_heart": "💝", - "heart": "❤", - "exclamation": "❗", - "heavy_exclamation_mark": "❗", - "heavy_heart_exclamation_mark_ornament": "❣", - "o": "⭕", - "helm_symbol": "⎈", - "helmet_with_white_cross": "⛑", - "high_heel": "👠", - "bullettrain_side": "🚄", - "bullettrain_front": "🚅", - "high_brightness": "🔆", - "zap": "⚡", - "hocho": "🔪", - "knife": "🔪", - "bee": "🐝", - "traffic_light": "🚥", - "racehorse": "🐎", - "coffee": "☕", - "hotsprings": "♨", - "hourglass": "⌛", - "hourglass_flowing_sand": "⏳", - "house_buildings": "🏘", - "100": "💯", - "hushed": "😯", - "ice_hockey_stick_and_puck": "🏒", - "imp": "👿", - "information_desk_person": "💁", - "information_source": "ℹ", - "capital_abcd": "🔠", - "abc": "🔤", - "abcd": "🔡", - "1234": "🔢", - "symbols": "🔣", - "izakaya_lantern": "🏮", - "lantern": "🏮", - "jack_o_lantern": "🎃", - "dolls": "🎎", - "japanese_goblin": "👺", - "japanese_ogre": "👹", - "beginner": "🔰", - "zero": "0️⃣", - "one": "1️⃣", - "ten": "🔟", - "two": "2️⃣", - "three": "3️⃣", - "four": "4️⃣", - "five": "5️⃣", - "six": "6️⃣", - "seven": "7️⃣", - "eight": "8️⃣", - "nine": "9️⃣", - "couplekiss": "💏", - "kissing_cat": "😽", - "kissing": "😗", - "kissing_closed_eyes": "😚", - "kissing_smiling_eyes": "😙", - "beetle": "🐞", - "large_blue_circle": "🔵", - "last_quarter_moon_with_face": "🌜", - "leaves": "🍃", - "mag": "🔍", - "left_right_arrow": "↔", - "leftwards_arrow_with_hook": "↩", - "arrow_left": "⬅", - "lock": "🔒", - "lock_with_ink_pen": "🔏", - "sob": "😭", - "low_brightness": "🔅", - "lower_left_ballpoint_pen": "🖊", - "lower_left_crayon": "🖍", - "lower_left_fountain_pen": "🖋", - "lower_left_paintbrush": "🖌", - "mahjong": "🀄", - "couple": "👫", - "man_in_business_suit_levitating": "🕴", - "man_with_gua_pi_mao": "👲", - "man_with_turban": "👳", - "mans_shoe": "👞", - "shoe": "👞", - "menorah_with_nine_branches": "🕎", - "mens": "🚹", - "minidisc": "💽", - "iphone": "📱", - "calling": "📲", - "money__mouth_face": "🤑", - "moneybag": "💰", - "rice_scene": "🎑", - "mountain_bicyclist": "🚵", - "mouse2": "🐁", - "lips": "👄", - "moyai": "🗿", - "notes": "🎶", - "nail_care": "💅", - "ab": "🆎", - "negative_squared_cross_mark": "❎", - "a": "🅰", - "b": "🅱", - "o2": "🅾", - "parking": "🅿", - "new_moon_with_face": "🌚", - "no_entry_sign": "🚫", - "underage": "🔞", - "non__potable_water": "🚱", - "arrow_upper_right": "↗", - "arrow_upper_left": "↖", - "office": "🏢", - "older_man": "👴", - "older_woman": "👵", - "om_symbol": "🕉", - "on": "🔛", - "book": "📖", - "unlock": "🔓", - "mailbox_with_no_mail": "📭", - "mailbox_with_mail": "📬", - "cd": "💿", - "tada": "🎉", - "feet": "🐾", - "walking": "🚶", - "pencil2": "✏", - "pensive": "😔", - "persevere": "😣", - "bow": "🙇", - "raised_hands": "🙌", - "person_with_ball": "⛹", - "person_with_blond_hair": "👱", - "pray": "🙏", - "person_with_pouting_face": "🙎", - "computer": "💻", - "pig2": "🐖", - "hankey": "💩", - "poop": "💩", - "shit": "💩", - "bamboo": "🎍", - "gun": "🔫", - "black_joker": "🃏", - "rotating_light": "🚨", - "cop": "👮", - "stew": "🍲", - "pouch": "👝", - "pouting_cat": "😾", - "rage": "😡", - "put_litter_in_its_place": "🚮", - "rabbit2": "🐇", - "racing_motorcycle": "🏍", - "radioactive_sign": "☢", - "fist": "✊", - "hand": "✋", - "raised_hand_with_fingers_splayed": "🖐", - "raised_hand_with_part_between_middle_and_ring_fingers": "🖖", - "blue_car": "🚙", - "apple": "🍎", - "relieved": "😌", - "reversed_hand_with_middle_finger_extended": "🖕", - "mag_right": "🔎", - "arrow_right_hook": "↪", - "sweet_potato": "🍠", - "robot": "🤖", - "rolled__up_newspaper": "🗞", - "rowboat": "🚣", - "runner": "🏃", - "running": "🏃", - "running_shirt_with_sash": "🎽", - "boat": "⛵", - "scales": "⚖", - "school_satchel": "🎒", - "scorpius": "♏", - "see_no_evil": "🙈", - "sheep": "🐑", - "stars": "🌠", - "cake": "🍰", - "six_pointed_star": "🔯", - "ski": "🎿", - "sleeping_accommodation": "🛌", - "sleeping": "😴", - "sleepy": "😪", - "sleuth_or_spy": "🕵", - "heart_eyes_cat": "😻", - "smiley_cat": "😺", - "innocent": "😇", - "heart_eyes": "😍", - "smiling_imp": "😈", - "smiley": "😃", - "sweat_smile": "😅", - "smile": "😄", - "laughing": "😆", - "satisfied": "😆", - "blush": "😊", - "smirk": "😏", - "smoking": "🚬", - "snow_capped_mountain": "🏔", - "soccer": "⚽", - "icecream": "🍦", - "soon": "🔜", - "arrow_lower_right": "↘", - "arrow_lower_left": "↙", - "speak_no_evil": "🙊", - "speaker": "🔈", - "mute": "🔇", - "sound": "🔉", - "loud_sound": "🔊", - "speaking_head_in_silhouette": "🗣", - "spiral_calendar_pad": "🗓", - "spiral_note_pad": "🗒", - "shell": "🐚", - "sweat_drops": "💦", - "u5272": "🈹", - "u5408": "🈴", - "u55b6": "🈺", - "u6307": "🈯", - "u6708": "🈷", - "u6709": "🈶", - "u6e80": "🈵", - "u7121": "🈚", - "u7533": "🈸", - "u7981": "🈲", - "u7a7a": "🈳", - "cl": "🆑", - "cool": "🆒", - "free": "🆓", - "id": "🆔", - "koko": "🈁", - "sa": "🈂", - "new": "🆕", - "ng": "🆖", - "ok": "🆗", - "sos": "🆘", - "up": "🆙", - "vs": "🆚", - "steam_locomotive": "🚂", - "ramen": "🍜", - "partly_sunny": "⛅", - "city_sunrise": "🌇", - "surfer": "🏄", - "swimmer": "🏊", - "shirt": "👕", - "tshirt": "👕", - "table_tennis_paddle_and_ball": "🏓", - "tea": "🍵", - "tv": "📺", - "three_button_mouse": "🖱", - "+1": "👍", - "thumbsup": "👍", - "__1": "👎", - "-1": "👎", - "thumbsdown": "👎", - "thunder_cloud_and_rain": "⛈", - "tiger2": "🐅", - "tophat": "🎩", - "top": "🔝", - "tm": "™", - "train2": "🚆", - "triangular_flag_on_post": "🚩", - "trident": "🔱", - "twisted_rightwards_arrows": "🔀", - "unamused": "😒", - "small_red_triangle": "🔺", - "arrow_up_small": "🔼", - "arrow_up_down": "↕", - "upside__down_face": "🙃", - "arrow_up": "⬆", - "v": "✌", - "vhs": "📼", - "wc": "🚾", - "ocean": "🌊", - "waving_black_flag": "🏴", - "wave": "👋", - "waving_white_flag": "🏳", - "moon": "🌔", - "scream_cat": "🙀", - "weary": "😩", - "weight_lifter": "🏋", - "whale2": "🐋", - "wheelchair": "♿", - "point_down": "👇", - "grey_exclamation": "❕", - "white_frowning_face": "☹", - "white_check_mark": "✅", - "point_left": "👈", - "white_medium_small_square": "◽", - "star": "⭐", - "grey_question": "❔", - "point_right": "👉", - "relaxed": "☺", - "white_sun_behind_cloud": "🌥", - "white_sun_behind_cloud_with_rain": "🌦", - "white_sun_with_small_cloud": "🌤", - "point_up_2": "👆", - "point_up": "☝", - "wind_blowing_face": "🌬", - "wink": "😉", - "wolf": "🐺", - "dancers": "👯", - "boot": "👢", - "womans_clothes": "👚", - "womans_hat": "👒", - "sandal": "👡", - "womens": "🚺", - "worried": "😟", - "gift": "🎁", - "zipper__mouth_face": "🤐", - "regional_indicator_a": "🇦", - "regional_indicator_b": "🇧", - "regional_indicator_c": "🇨", - "regional_indicator_d": "🇩", - "regional_indicator_e": "🇪", - "regional_indicator_f": "🇫", - "regional_indicator_g": "🇬", - "regional_indicator_h": "🇭", - "regional_indicator_i": "🇮", - "regional_indicator_j": "🇯", - "regional_indicator_k": "🇰", - "regional_indicator_l": "🇱", - "regional_indicator_m": "🇲", - "regional_indicator_n": "🇳", - "regional_indicator_o": "🇴", - "regional_indicator_p": "🇵", - "regional_indicator_q": "🇶", - "regional_indicator_r": "🇷", - "regional_indicator_s": "🇸", - "regional_indicator_t": "🇹", - "regional_indicator_u": "🇺", - "regional_indicator_v": "🇻", - "regional_indicator_w": "🇼", - "regional_indicator_x": "🇽", - "regional_indicator_y": "🇾", - "regional_indicator_z": "🇿", -} diff --git a/spaces/algomuffin/jojo_fork/app.py b/spaces/algomuffin/jojo_fork/app.py deleted file mode 100644 index 6eac8d6c3e698ed3ab4da3342af23a7f71dee8f7..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/app.py +++ /dev/null @@ -1,218 +0,0 @@ -import os -os.system("pip install gradio==2.8.0b5") -os.system("pip install -r requirements.txt") -os.system("pip freeze") - -from PIL import Image -import torch -import gradio as gr -import torch -torch.backends.cudnn.benchmark = True -from torchvision import transforms, utils -from util import * -from PIL import Image -import math -import random -import numpy as np -from torch import nn, autograd, optim -from torch.nn import functional as F -from tqdm import tqdm -import lpips -from model import * - - -#from e4e_projection import projection as e4e_projection - -from copy import deepcopy -import imageio - -import os -import sys -import numpy as np -from PIL import Image -import torch -import torchvision.transforms as transforms -from argparse import Namespace -from e4e.models.psp import pSp -from util import * -from huggingface_hub import hf_hub_download - -device= 'cpu' -model_path_e = hf_hub_download(repo_id="akhaliq/JoJoGAN_e4e_ffhq_encode", filename="e4e_ffhq_encode.pt") -ckpt = torch.load(model_path_e, map_location='cpu') -opts = ckpt['opts'] -opts['checkpoint_path'] = model_path_e -opts= Namespace(**opts) -net = pSp(opts, device).eval().to(device) - -@ torch.no_grad() -def projection(img, name, device='cuda'): - - - transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(256), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - img = transform(img).unsqueeze(0).to(device) - images, w_plus = net(img, randomize_noise=False, return_latents=True) - result_file = {} - result_file['latent'] = w_plus[0] - torch.save(result_file, name) - return w_plus[0] - - - - -device = 'cpu' - - -latent_dim = 512 - -model_path_s = hf_hub_download(repo_id="akhaliq/jojogan-stylegan2-ffhq-config-f", filename="stylegan2-ffhq-config-f.pt") -original_generator = Generator(1024, latent_dim, 8, 2).to(device) -ckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage) -original_generator.load_state_dict(ckpt["g_ema"], strict=False) -mean_latent = original_generator.mean_latent(10000) - -generatorjojo = deepcopy(original_generator) - -generatordisney = deepcopy(original_generator) - -generatorjinx = deepcopy(original_generator) - -generatorcaitlyn = deepcopy(original_generator) - -generatoryasuho = deepcopy(original_generator) - -generatorarcanemulti = deepcopy(original_generator) - -generatorart = deepcopy(original_generator) - -generatorspider = deepcopy(original_generator) - -generatorsketch = deepcopy(original_generator) - -generatordisneynoc = deepcopy(original_generator) - -transform = transforms.Compose( - [ - transforms.Resize((1024, 1024)), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] -) - - - - -modeljojo = hf_hub_download(repo_id="akhaliq/JoJoGAN-jojo", filename="jojo_preserve_color.pt") - - -ckptjojo = torch.load(modeljojo, map_location=lambda storage, loc: storage) -generatorjojo.load_state_dict(ckptjojo["g"], strict=False) - - -modeldisney = hf_hub_download(repo_id="akhaliq/jojogan-disney", filename="disney_preserve_color.pt") - -ckptdisney = torch.load(modeldisney, map_location=lambda storage, loc: storage) -generatordisney.load_state_dict(ckptdisney["g"], strict=False) - - -modeljinx = hf_hub_download(repo_id="akhaliq/jojo-gan-jinx", filename="arcane_jinx_preserve_color.pt") - -ckptjinx = torch.load(modeljinx, map_location=lambda storage, loc: storage) -generatorjinx.load_state_dict(ckptjinx["g"], strict=False) - - -modelcaitlyn = hf_hub_download(repo_id="akhaliq/jojogan-arcane", filename="arcane_caitlyn_preserve_color.pt") - -ckptcaitlyn = torch.load(modelcaitlyn, map_location=lambda storage, loc: storage) -generatorcaitlyn.load_state_dict(ckptcaitlyn["g"], strict=False) - - -modelyasuho = hf_hub_download(repo_id="akhaliq/JoJoGAN-jojo", filename="jojo_yasuho_preserve_color.pt") - -ckptyasuho = torch.load(modelyasuho, map_location=lambda storage, loc: storage) -generatoryasuho.load_state_dict(ckptyasuho["g"], strict=False) - - -model_arcane_multi = hf_hub_download(repo_id="akhaliq/jojogan-arcane", filename="arcane_multi_preserve_color.pt") - -ckptarcanemulti = torch.load(model_arcane_multi, map_location=lambda storage, loc: storage) -generatorarcanemulti.load_state_dict(ckptarcanemulti["g"], strict=False) - - -modelart = hf_hub_download(repo_id="akhaliq/jojo-gan-art", filename="art.pt") - -ckptart = torch.load(modelart, map_location=lambda storage, loc: storage) -generatorart.load_state_dict(ckptart["g"], strict=False) - - -modelSpiderverse = hf_hub_download(repo_id="akhaliq/jojo-gan-spiderverse", filename="Spiderverse-face-500iters-8face.pt") - -ckptspider = torch.load(modelSpiderverse, map_location=lambda storage, loc: storage) -generatorspider.load_state_dict(ckptspider["g"], strict=False) - -modelSketch = hf_hub_download(repo_id="akhaliq/jojogan-sketch", filename="sketch_multi.pt") - -ckptsketch = torch.load(modelSketch, map_location=lambda storage, loc: storage) -generatorsketch.load_state_dict(ckptsketch["g"], strict=False) - - -modeldisneynoc = hf_hub_download(repo_id="algomuffin/disney", filename="disney.pt") - -ckptMy = torch.load(modeldisneynoc, map_location=lambda storage, loc: storage) -generatordisneynoc.load_state_dict(ckptMy["g"], strict=False) - - - - -def inference(img, model): - img.save('out.jpg') - aligned_face = align_face('out.jpg') - - my_w = projection(aligned_face, "test.pt", device).unsqueeze(0) - if model == 'JoJo': - with torch.no_grad(): - my_sample = generatorjojo(my_w, input_is_latent=True) - elif model == 'Disney': - with torch.no_grad(): - my_sample = generatordisneynoc(my_w, input_is_latent=True) - elif model == 'Jinx': - with torch.no_grad(): - my_sample = generatorjinx(my_w, input_is_latent=True) - elif model == 'Caitlyn': - with torch.no_grad(): - my_sample = generatorcaitlyn(my_w, input_is_latent=True) - elif model == 'Yasuho': - with torch.no_grad(): - my_sample = generatoryasuho(my_w, input_is_latent=True) - elif model == 'Arcane Multi': - with torch.no_grad(): - my_sample = generatorarcanemulti(my_w, input_is_latent=True) - elif model == 'Art': - with torch.no_grad(): - my_sample = generatorart(my_w, input_is_latent=True) - elif model == 'Spider-Verse': - with torch.no_grad(): - my_sample = generatorspider(my_w, input_is_latent=True) - else: - with torch.no_grad(): - my_sample = generatorsketch(my_w, input_is_latent=True) - - - npimage = my_sample[0].permute(1, 2, 0).detach().numpy() - imageio.imwrite('filename.jpeg', npimage) - return 'filename.jpeg' - -title = "JoJoGAN_fork" -description = "Gradio Demo for JoJoGAN: This is a fork made by Alessandro Miragliotta in order to test the network with the Disney no-preserve color model. To use it, simply upload your image, or click one of the examples to load them." - -article = "

        JoJoGAN: One Shot Face Stylization| Github Repo Pytorch

        visitor badge
        " - -examples=[['mona.png','Disney'],['man.jpg','Disney'],['iu.jpeg','Disney'],['baby-face.jpg','Disney']] -gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Disney'], type="value", default='Disney', label="Model")], gr.outputs.Image(type="file"),title=title,description=description,article=article,allow_flagging=False,examples=examples,allow_screenshot=False).launch(enable_queue=True, cache_examples=True) diff --git a/spaces/aliabd/SummerTime/evaluation/bleu_metric.py b/spaces/aliabd/SummerTime/evaluation/bleu_metric.py deleted file mode 100644 index ea6c0b5730d647aacca797ff5303c74b8e7517fb..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/evaluation/bleu_metric.py +++ /dev/null @@ -1,20 +0,0 @@ -from summ_eval.bleu_metric import BleuMetric -from evaluation.summeval_metric import SummEvalMetric -from typing import List, Dict - - -class Bleu(SummEvalMetric): - metric_name = "bleu" - range = (0, 100) - higher_is_better = True - requires_heavy_compute = False - - def __init__(self): - se_metric = BleuMetric() - super(Bleu, self).__init__(se_metric) - - def evaluate( - self, inputs: List[str], targets: List[str], keys: List[str] = ["bleu"] - ) -> Dict[str, float]: - # TODO zhangir: potentially update when dataset api is merged. - return super(Bleu, self).evaluate(inputs, targets, keys) diff --git a/spaces/alibaba-pai/pai-diffusion-artist-large-zh/app.py b/spaces/alibaba-pai/pai-diffusion-artist-large-zh/app.py deleted file mode 100644 index f780681517d4a67ccbd81fe69f5126000f184323..0000000000000000000000000000000000000000 --- a/spaces/alibaba-pai/pai-diffusion-artist-large-zh/app.py +++ /dev/null @@ -1,33 +0,0 @@ -import gradio as gr -from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler -import torch -from PIL import Image - -model_id = "alibaba-pai/pai-diffusion-artist-large-zh" -pipe = StableDiffusionPipeline.from_pretrained(model_id) -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe = pipe.to("cpu") - -def infer_text2img(prompt, guide, steps): - image = pipe([prompt], guidance_scale=guide, num_inference_steps=steps).images[0] - return image - -with gr.Blocks() as demo: - examples = [ - ["草地上的帐篷,背景是山脉"], - ["卧室里有一张床和一张桌子"], - ["雾蒙蒙的日出在湖面上"], - ] - with gr.Row(): - with gr.Column(scale=1, ): - image_out = gr.Image(label = '输出(output)') - with gr.Column(scale=1, ): - prompt = gr.Textbox(label = '提示词(prompt)') - submit_btn = gr.Button("生成图像(Generate)") - with gr.Row(scale=0.5 ): - guide = gr.Slider(2, 15, value = 7, label = '文本引导强度(guidance scale)') - steps = gr.Slider(10, 50, value = 20, step = 1, label = '迭代次数(inference steps)') - ex = gr.Examples(examples, fn=infer_text2img, inputs=[prompt, guide, steps], outputs=image_out) - submit_btn.click(fn = infer_text2img, inputs = [prompt, guide, steps], outputs = image_out) - -demo.queue(concurrency_count=1, max_size=8).launch() diff --git a/spaces/allandclive/Uganda_MMS/tts.py b/spaces/allandclive/Uganda_MMS/tts.py deleted file mode 100644 index 10b3ac1dd19fda660fb6f6dd4fed894afe81f46e..0000000000000000000000000000000000000000 --- a/spaces/allandclive/Uganda_MMS/tts.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import os -import re -import tempfile -import torch -import sys -import gradio as gr - -from huggingface_hub import hf_hub_download - -# Setup TTS env -if "vits" not in sys.path: - sys.path.append("vits") - -from vits import commons, utils -from vits.models import SynthesizerTrn - - -class TextMapper(object): - def __init__(self, vocab_file): - self.symbols = [ - x.replace("\n", "") for x in open(vocab_file, encoding="utf-8").readlines() - ] - self.SPACE_ID = self.symbols.index(" ") - self._symbol_to_id = {s: i for i, s in enumerate(self.symbols)} - self._id_to_symbol = {i: s for i, s in enumerate(self.symbols)} - - def text_to_sequence(self, text, cleaner_names): - """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - """ - sequence = [] - clean_text = text.strip() - for symbol in clean_text: - symbol_id = self._symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - def uromanize(self, text, uroman_pl): - iso = "xxx" - with tempfile.NamedTemporaryFile() as tf, tempfile.NamedTemporaryFile() as tf2: - with open(tf.name, "w") as f: - f.write("\n".join([text])) - cmd = f"perl " + uroman_pl - cmd += f" -l {iso} " - cmd += f" < {tf.name} > {tf2.name}" - os.system(cmd) - outtexts = [] - with open(tf2.name) as f: - for line in f: - line = re.sub(r"\s+", " ", line).strip() - outtexts.append(line) - outtext = outtexts[0] - return outtext - - def get_text(self, text, hps): - text_norm = self.text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def filter_oov(self, text, lang=None): - text = self.preprocess_char(text, lang=lang) - val_chars = self._symbol_to_id - txt_filt = "".join(list(filter(lambda x: x in val_chars, text))) - return txt_filt - - def preprocess_char(self, text, lang=None): - """ - Special treatement of characters in certain languages - """ - if lang == "ron": - text = text.replace("ț", "ţ") - print(f"{lang} (ț -> ţ): {text}") - return text - - -def synthesize(text, lang, speed): - - if speed is None: - speed = 1.0 - - lang_code = lang.split(":")[0].strip() - - vocab_file = hf_hub_download( - repo_id="facebook/mms-tts", - filename="vocab.txt", - subfolder=f"models/{lang_code}", - ) - config_file = hf_hub_download( - repo_id="facebook/mms-tts", - filename="config.json", - subfolder=f"models/{lang_code}", - ) - g_pth = hf_hub_download( - repo_id="facebook/mms-tts", - filename="G_100000.pth", - subfolder=f"models/{lang_code}", - ) - - if torch.cuda.is_available(): - device = torch.device("cuda") - elif ( - hasattr(torch.backends, "mps") - and torch.backends.mps.is_available() - and torch.backends.mps.is_built() - ): - device = torch.device("mps") - else: - device = torch.device("cpu") - - print(f"Run inference with {device}") - - assert os.path.isfile(config_file), f"{config_file} doesn't exist" - hps = utils.get_hparams_from_file(config_file) - text_mapper = TextMapper(vocab_file) - net_g = SynthesizerTrn( - len(text_mapper.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model, - ) - net_g.to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(g_pth, net_g, None) - - is_uroman = hps.data.training_files.split(".")[-1] == "uroman" - - if is_uroman: - uroman_dir = "uroman" - assert os.path.exists(uroman_dir) - uroman_pl = os.path.join(uroman_dir, "bin", "uroman.pl") - text = text_mapper.uromanize(text, uroman_pl) - - text = text.lower() - text = text_mapper.filter_oov(text, lang=lang) - stn_tst = text_mapper.get_text(text, hps) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(device) - hyp = ( - net_g.infer( - x_tst, - x_tst_lengths, - noise_scale=0.667, - noise_scale_w=0.8, - length_scale=1.0 / speed, - )[0][0, 0] - .cpu() - .float() - .numpy() - ) - - return gr.Audio.update(value=(hps.data.sampling_rate, hyp)), text - - -TTS_EXAMPLES = [ - ["Ngenda mu dduuka.", "lug: Ganda"], - ["Nituza kurebaana", "nyn: Nyankore"], - ["Nkwenda inno", "xog: Soga"], - ["Ayia nduri ozuku fi ni", "lgg: Lugbara"], - ["Iya yom me neni", "ach: Acholi"] -] diff --git a/spaces/allknowingroger/Image-Models-Test149/README.md b/spaces/allknowingroger/Image-Models-Test149/README.md deleted file mode 100644 index a3a43bf672ca727d8113068aed4ea790c9de9309..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test149/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -duplicated_from: allknowingroger/Image-Models-Test142 ---- - - \ No newline at end of file diff --git a/spaces/alsrbdni/pdf-chat/README.md b/spaces/alsrbdni/pdf-chat/README.md deleted file mode 100644 index 05fdcbcb74348434fdc766ca3799dd2f70dcd2a3..0000000000000000000000000000000000000000 --- a/spaces/alsrbdni/pdf-chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Pdf Chat -emoji: 🏃 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ankur2402/ISRO/app.py b/spaces/ankur2402/ISRO/app.py deleted file mode 100644 index 31fd5cc7987dfdcdafdbe98289f9e91c90c69b80..0000000000000000000000000000000000000000 --- a/spaces/ankur2402/ISRO/app.py +++ /dev/null @@ -1,379 +0,0 @@ -import streamlit as st -import numpy as np -import cv2 -import tensorflow as tf -from PIL import Image -from keras.models import load_model -from sklearn.preprocessing import LabelEncoder -import pickle -from keras_preprocessing.sequence import pad_sequences -from keras.preprocessing.text import Tokenizer -from sklearn.preprocessing import LabelEncoder -from PIL import Image -# from google.colab.patches import cv2_imshow - -def label_smoothing(y_true,y_pred): - - return tf.keras.losses.binary_crossentropy(y_true,y_pred,label_smoothing=0.1) -def sparse_cross_entropy(y_true, y_pred): - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, - logits=y_pred) - loss_mean = tf.reduce_mean(loss) - return loss_mean -model1 = load_model('densenet.h5',custom_objects={'label_smoothing': label_smoothing}) -image_model_transfer=load_model("image_model_transfer.h5") -decoder_model=load_model("Final_ISRO_DenseNet201_Epoch50.h5",custom_objects={'sparse_cross_entropy': sparse_cross_entropy}) - -class TokenizerWrap(Tokenizer): - """Wrap the Tokenizer-class from Keras with more functionality.""" - - def _init_(self, texts, num_words=None): - """ - :param texts: List of strings with the data-set. - :param num_words: Max number of words to use. - """ - - Tokenizer._init_(self, num_words=num_words) - - # Create the vocabulary from the texts. - self.fit_on_texts(texts) - - # Create inverse lookup from integer-tokens to words. - # word_index is a dictionary. its values are tokens and the keys are words - # opposite to index_to_word - self.index_to_word = dict(zip(self.word_index.values(), - self.word_index.keys())) - - def token_to_word(self, token): - """Lookup a single word from an integer-token.""" - word = " " if token == 0 else self.index_to_word[token] - return word - - def tokens_to_string(self, tokens): - """Convert a list of integer-tokens to a string.""" - # Create a list of the individual words. - words = [self.index_to_word[token] - for token in tokens - if token != 0] - - # Concatenate the words to a single string - # with space between all the words. - text = " ".join(words) - - return text - - def captions_to_tokens(self, captions_listlist): - """ - Convert a list-of-list with text-captions to - a list-of-list of integer-tokens. - """ - - # Note that text_to_sequences() takes a list of texts. - tokens = [self.texts_to_sequences(captions_list) - for captions_list in captions_listlist] - - return tokens -with open('Train_Label.pickle', 'rb') as efile: - labels=pickle.load(efile) -with open('tokenizer.pkl', 'rb') as efile: - tokenizer=pickle.load(efile) - -le=LabelEncoder() -labels=le.fit_transform(labels) - -def framing(video):#defining a small function named"framing" with a parameter "i" that's supposed to be provided for reading the video - fr = []#creating an empty list named fr - fr_pre=[]#creating an empty list named fr_pre - cap = cv2.VideoCapture(video)#reading the video file - while (cap.isOpened()):#This command builds a loop to check if the data is still being read from the video - ret,frame = cap.read()#reading the data tunnel,gives two output where one tells about presence of frames(here it's ret) & the other speaks frame data(here it's frame) - if ret == True:#checking for presence of frames - # cv2_imshow(frame)#displaying the frames - grayed = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#Converting the frames to Grayscale from BGR - canned = cv2.Canny(grayed,320,320)#For extrating edges we use Canny Edge detection method - fr.append(frame)#Appending the read frame - fr_pre.append(canned)#Appending the edge extracted frames - # cv2_imshow(grayed)#Displaying the original frames - # cv2_imshow(canned)#Displaying the edge detected frames - k = cv2.waitKey(10) & 0XFF#this is an arrangement for displaying the video where the secs for which each frame needs to be displayed in given in the paranthesis - if k == ord('q'):#pressing 'q' key will close the video - break - else: - break - cap.release()#Here we release the resoures - cv2.destroyAllWindows()#Here we delete all the windows that were created during the program - return fr_pre,fr - -def difference_of_frames(frames): - diff = []#creatin a list variable - for i in range(0,len(frames)-1):#defining the range - diff.append(cv2.absdiff(frames[i],frames[i+1]))#appending the diff between frames to the list variable so we're supposed to get only the difference between frames - return diff - -def cal_threshold(diff): - mn = np.mean(diff)#This gives mean - st_d = np.std(diff)#This gives standard deviation - a = 4#Setting a random value we can modify it to any value - ts = mn + (a * st_d)#defining the standard threshold value for the project/global threshold value - return ts - -def imp_frames(diff, ts, ogframes): - a_fr = []#Creating an empty list - for i in range(len(diff)):#Defining the for loop to be looped over all the frames obtained after finding the frames resulted from subtracting - mn = np.mean(diff[i])#Calculating the mean for each frame - st_d = np.std(diff[i])#Calculating the standard deviation for each frame - fr_ts = mn + (4*st_d)#Finding the threshold values for each frame/image - a_fr.append([i,fr_ts])#Appending the frame number & the threshold values - imp_fr = []#Creating an empty list - for i,ac_tr in(a_fr):#Defining the loop on the list obtained from above code - if ac_tr >= ts:#Comapring the threshold values to the standard threshold/global threshold values - imp_fr.append([i,ac_tr])#Appending the list with the imp frames based on their index & the values - key_fr = []#Creating an empty list - for i,_ in imp_fr:#Defining the loop over the list obtained from above code - key_fr.append(ogframes[i])#This extracts the frames based on the index of frames - return key_fr - -def final_image(video): - frames,ogframes = framing(video)#calling function framing & then extracting the images - diff=difference_of_frames(frames) - ts=cal_threshold(diff) - key_fr=imp_frames(diff, ts, ogframes) - frame_no=key_fr[int(len(key_fr)/2)] #this is a frame - cv2.imwrite("Testing1.jpg",frame_no) - return "Testing1.jpg" - cv2.destroyAllWindows() - -def image_test(image_path): - image=Image.open(image_path) - image = image.resize((224,224)) - image = np.array(image) - image= np.expand_dims(image, axis=0) - return image - -def largest_indices(ary, n): - flat = ary.flatten() - indices = np.argpartition(flat, -n)[-n:] - indices = indices[np.argsort(-flat[indices])] - return indices - -mark_start = 'ssss' -mark_end = ' eeee' - -token_start = tokenizer.word_index[mark_start.strip()] -token_end = tokenizer.word_index[mark_end.strip()] - -def load_image(path, size=None): - """ - Load the image from the given file-path and resize it - to the given size if not None. - """ - - # Load the image using PIL. - img = Image.open(path) - - # Resize image if desired. - if not size is None: - img = img.resize(size=size, resample=Image.LANCZOS) - - img = np.array(img) - img = img / 255.0 - - # Convert 2-dim gray-scale array to 3-dim RGB array. - if (len(img.shape) == 2): - img = np.repeat(img[:, :, np.newaxis], 3, axis=2) - return img - -def greedy_search(image_path, max_tokens=30): - """ - Generate a caption for the image in the given path. - The caption is limited to the given number of tokens (words). - """ - # ---------------------------ENCODE IMAGE-------------------------------- - # Load and resize the image. - image = load_image(image_path, size=(224,224)) - - # Expand the 3-dim numpy array to 4-dim - # because the image-model expects a whole batch as input, - # so we give it a batch with just one image. - image_batch = np.expand_dims(image, axis=0) - - # Process the image with the pre-trained image-model - # to get the transfer-values. - transfer_values = image_model_transfer.predict(image_batch) - - # ------------------------------------------------------------------- - - - # Pre-allocate the 2-dim array used as input to the decoder. - # This holds just a single sequence of integer-tokens, - # but the decoder-model expects a batch of sequences. - shape = (1, max_tokens) - decoder_input_data = np.zeros(shape=shape, dtype=int) - - # The first input-token is the special start-token for 'ssss '. - token_int = token_start #1 - - # Initialize an empty output-text. - output_text = '' - - # Initialize the number of tokens we have processed. - count_tokens = 0 - - # While we haven't sampled the special end-token for ' eeee' - # and we haven't processed the max number of tokens. - while token_int != token_end and count_tokens < max_tokens: - # Update the input-sequence to the decoder - # with the last token that was sampled. - # In the first iteration this will set the - # first element to the start-token. - decoder_input_data[0, count_tokens] = token_int - - # Wrap the input-data in a dict for clarity and safety, - # so we are sure we input the data in the right order. - x_data = \ - { - 'transfer_values_input': transfer_values, - 'decoder_input': decoder_input_data - } - - # Note that we input the entire sequence of tokens - # to the decoder. This wastes a lot of computation - # because we are only interested in the last input - # and output. We could modify the code to return - # the GRU-states when calling predict() and then - # feeding these GRU-states as well the next time - # we call predict(), but it would make the code - # much more complicated. - - # Input this data to the decoder and get the predicted output. - decoder_output = decoder_model.predict(x_data) -# print(decoder_output.shape) (1,30,15000) for every iteration - - # Get the last predicted token as a one-hot encoded array. - # Note that this is not limited by softmax, but we just - # need the index of the largest element so it doesn't matter. - token_onehot = decoder_output[0, count_tokens, :] -# print(token_onehot.shape) (15000, ) for every iteration - # Convert to an integer-token. - token_int = np.argmax(token_onehot) -# print(token_int) #the token of a word with the highest score - - # Lookup the word corresponding to this integer-token. - sampled_word = tokenizer.token_to_word(token_int) -# print(sampled_word) - - # Append the word to the output-text. - output_text += " " + sampled_word - - # Increment the token-counter. - count_tokens += 1 - - # This is the sequence of tokens output by the decoder. - output_tokens = decoder_input_data[0] -# print(output_tokens) - # Plot the image. - # plt.imshow(image) - # plt.show() - - predicted_caption=output_text.split() - del (predicted_caption[-1]) - output_text = " " - output_text = output_text.join(predicted_caption) - - # Print the predicted caption. - # print("Predicted caption:") - # print(output_text) - # print() - return predicted_caption - -def beam_search(beam_index, image_path, max_tokens=30): - image = load_image(image_path, size=(224,224)) - - # Expand the 3-dim numpy array to 4-dim - # because the image-model expects a whole batch as input, - # so we give it a batch with just one image. - image_batch = np.expand_dims(image, axis=0) - - # Process the image with the pre-trained image-model - # to get the transfer-values. - transfer_values = image_model_transfer.predict(image_batch) - - token_int = [token_start] - start_word = [[token_int, 0.0]] - count_tokens = 0 - while len(start_word[0][0])

      -

      new hindi movie joker song download


      Download ⇒⇒⇒ https://tinurli.com/2uwiBu



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/client_exceptions.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/client_exceptions.py deleted file mode 100644 index c640e1e7fbdf8c56a9e744492d99f8ca32988142..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/client_exceptions.py +++ /dev/null @@ -1,342 +0,0 @@ -"""HTTP related errors.""" - -import asyncio -import warnings -from typing import TYPE_CHECKING, Any, Optional, Tuple, Union - -from .http_parser import RawResponseMessage -from .typedefs import LooseHeaders - -try: - import ssl - - SSLContext = ssl.SSLContext -except ImportError: # pragma: no cover - ssl = SSLContext = None # type: ignore[assignment] - - -if TYPE_CHECKING: # pragma: no cover - from .client_reqrep import ClientResponse, ConnectionKey, Fingerprint, RequestInfo -else: - RequestInfo = ClientResponse = ConnectionKey = None - -__all__ = ( - "ClientError", - "ClientConnectionError", - "ClientOSError", - "ClientConnectorError", - "ClientProxyConnectionError", - "ClientSSLError", - "ClientConnectorSSLError", - "ClientConnectorCertificateError", - "ServerConnectionError", - "ServerTimeoutError", - "ServerDisconnectedError", - "ServerFingerprintMismatch", - "ClientResponseError", - "ClientHttpProxyError", - "WSServerHandshakeError", - "ContentTypeError", - "ClientPayloadError", - "InvalidURL", -) - - -class ClientError(Exception): - """Base class for client connection errors.""" - - -class ClientResponseError(ClientError): - """Connection error during reading response. - - request_info: instance of RequestInfo - """ - - def __init__( - self, - request_info: RequestInfo, - history: Tuple[ClientResponse, ...], - *, - code: Optional[int] = None, - status: Optional[int] = None, - message: str = "", - headers: Optional[LooseHeaders] = None, - ) -> None: - self.request_info = request_info - if code is not None: - if status is not None: - raise ValueError( - "Both code and status arguments are provided; " - "code is deprecated, use status instead" - ) - warnings.warn( - "code argument is deprecated, use status instead", - DeprecationWarning, - stacklevel=2, - ) - if status is not None: - self.status = status - elif code is not None: - self.status = code - else: - self.status = 0 - self.message = message - self.headers = headers - self.history = history - self.args = (request_info, history) - - def __str__(self) -> str: - return "{}, message={!r}, url={!r}".format( - self.status, - self.message, - self.request_info.real_url, - ) - - def __repr__(self) -> str: - args = f"{self.request_info!r}, {self.history!r}" - if self.status != 0: - args += f", status={self.status!r}" - if self.message != "": - args += f", message={self.message!r}" - if self.headers is not None: - args += f", headers={self.headers!r}" - return f"{type(self).__name__}({args})" - - @property - def code(self) -> int: - warnings.warn( - "code property is deprecated, use status instead", - DeprecationWarning, - stacklevel=2, - ) - return self.status - - @code.setter - def code(self, value: int) -> None: - warnings.warn( - "code property is deprecated, use status instead", - DeprecationWarning, - stacklevel=2, - ) - self.status = value - - -class ContentTypeError(ClientResponseError): - """ContentType found is not valid.""" - - -class WSServerHandshakeError(ClientResponseError): - """websocket server handshake error.""" - - -class ClientHttpProxyError(ClientResponseError): - """HTTP proxy error. - - Raised in :class:`aiohttp.connector.TCPConnector` if - proxy responds with status other than ``200 OK`` - on ``CONNECT`` request. - """ - - -class TooManyRedirects(ClientResponseError): - """Client was redirected too many times.""" - - -class ClientConnectionError(ClientError): - """Base class for client socket errors.""" - - -class ClientOSError(ClientConnectionError, OSError): - """OSError error.""" - - -class ClientConnectorError(ClientOSError): - """Client connector error. - - Raised in :class:`aiohttp.connector.TCPConnector` if - a connection can not be established. - """ - - def __init__(self, connection_key: ConnectionKey, os_error: OSError) -> None: - self._conn_key = connection_key - self._os_error = os_error - super().__init__(os_error.errno, os_error.strerror) - self.args = (connection_key, os_error) - - @property - def os_error(self) -> OSError: - return self._os_error - - @property - def host(self) -> str: - return self._conn_key.host - - @property - def port(self) -> Optional[int]: - return self._conn_key.port - - @property - def ssl(self) -> Union[SSLContext, None, bool, "Fingerprint"]: - return self._conn_key.ssl - - def __str__(self) -> str: - return "Cannot connect to host {0.host}:{0.port} ssl:{1} [{2}]".format( - self, self.ssl if self.ssl is not None else "default", self.strerror - ) - - # OSError.__reduce__ does too much black magick - __reduce__ = BaseException.__reduce__ - - -class ClientProxyConnectionError(ClientConnectorError): - """Proxy connection error. - - Raised in :class:`aiohttp.connector.TCPConnector` if - connection to proxy can not be established. - """ - - -class UnixClientConnectorError(ClientConnectorError): - """Unix connector error. - - Raised in :py:class:`aiohttp.connector.UnixConnector` - if connection to unix socket can not be established. - """ - - def __init__( - self, path: str, connection_key: ConnectionKey, os_error: OSError - ) -> None: - self._path = path - super().__init__(connection_key, os_error) - - @property - def path(self) -> str: - return self._path - - def __str__(self) -> str: - return "Cannot connect to unix socket {0.path} ssl:{1} [{2}]".format( - self, self.ssl if self.ssl is not None else "default", self.strerror - ) - - -class ServerConnectionError(ClientConnectionError): - """Server connection errors.""" - - -class ServerDisconnectedError(ServerConnectionError): - """Server disconnected.""" - - def __init__(self, message: Union[RawResponseMessage, str, None] = None) -> None: - if message is None: - message = "Server disconnected" - - self.args = (message,) - self.message = message - - -class ServerTimeoutError(ServerConnectionError, asyncio.TimeoutError): - """Server timeout error.""" - - -class ServerFingerprintMismatch(ServerConnectionError): - """SSL certificate does not match expected fingerprint.""" - - def __init__(self, expected: bytes, got: bytes, host: str, port: int) -> None: - self.expected = expected - self.got = got - self.host = host - self.port = port - self.args = (expected, got, host, port) - - def __repr__(self) -> str: - return "<{} expected={!r} got={!r} host={!r} port={!r}>".format( - self.__class__.__name__, self.expected, self.got, self.host, self.port - ) - - -class ClientPayloadError(ClientError): - """Response payload error.""" - - -class InvalidURL(ClientError, ValueError): - """Invalid URL. - - URL used for fetching is malformed, e.g. it doesn't contains host - part. - """ - - # Derive from ValueError for backward compatibility - - def __init__(self, url: Any) -> None: - # The type of url is not yarl.URL because the exception can be raised - # on URL(url) call - super().__init__(url) - - @property - def url(self) -> Any: - return self.args[0] - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.url}>" - - -class ClientSSLError(ClientConnectorError): - """Base error for ssl.*Errors.""" - - -if ssl is not None: - cert_errors = (ssl.CertificateError,) - cert_errors_bases = ( - ClientSSLError, - ssl.CertificateError, - ) - - ssl_errors = (ssl.SSLError,) - ssl_error_bases = (ClientSSLError, ssl.SSLError) -else: # pragma: no cover - cert_errors = tuple() - cert_errors_bases = ( - ClientSSLError, - ValueError, - ) - - ssl_errors = tuple() - ssl_error_bases = (ClientSSLError,) - - -class ClientConnectorSSLError(*ssl_error_bases): # type: ignore[misc] - """Response ssl error.""" - - -class ClientConnectorCertificateError(*cert_errors_bases): # type: ignore[misc] - """Response certificate error.""" - - def __init__( - self, connection_key: ConnectionKey, certificate_error: Exception - ) -> None: - self._conn_key = connection_key - self._certificate_error = certificate_error - self.args = (connection_key, certificate_error) - - @property - def certificate_error(self) -> Exception: - return self._certificate_error - - @property - def host(self) -> str: - return self._conn_key.host - - @property - def port(self) -> Optional[int]: - return self._conn_key.port - - @property - def ssl(self) -> bool: - return self._conn_key.is_ssl - - def __str__(self) -> str: - return ( - "Cannot connect to host {0.host}:{0.port} ssl:{0.ssl} " - "[{0.certificate_error.__class__.__name__}: " - "{0.certificate_error.args}]".format(self) - ) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_core/_streams.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_core/_streams.py deleted file mode 100644 index 54ea2b2bafd321a4f88dfa6fd19993213eec8105..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_core/_streams.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import annotations - -import math -from typing import Any, TypeVar, overload - -from ..streams.memory import ( - MemoryObjectReceiveStream, - MemoryObjectSendStream, - MemoryObjectStreamState, -) - -T_Item = TypeVar("T_Item") - - -@overload -def create_memory_object_stream( - max_buffer_size: float = ..., -) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: - ... - - -@overload -def create_memory_object_stream( - max_buffer_size: float = ..., item_type: type[T_Item] = ... -) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]: - ... - - -def create_memory_object_stream( - max_buffer_size: float = 0, item_type: type[T_Item] | None = None -) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: - """ - Create a memory object stream. - - :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking - :param item_type: type of item, for marking the streams with the right generic type for - static typing (not used at run time) - :return: a tuple of (send stream, receive stream) - - """ - if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): - raise ValueError("max_buffer_size must be either an integer or math.inf") - if max_buffer_size < 0: - raise ValueError("max_buffer_size cannot be negative") - - state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size) - return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/_compat.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/_compat.py deleted file mode 100644 index 9153d150ce67a708f920fcf9c606970fc061f816..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/_compat.py +++ /dev/null @@ -1,623 +0,0 @@ -import codecs -import io -import os -import re -import sys -import typing as t -from weakref import WeakKeyDictionary - -CYGWIN = sys.platform.startswith("cygwin") -WIN = sys.platform.startswith("win") -auto_wrap_for_ansi: t.Optional[t.Callable[[t.TextIO], t.TextIO]] = None -_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") - - -def _make_text_stream( - stream: t.BinaryIO, - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, - force_writable: bool = False, -) -> t.TextIO: - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = "replace" - return _NonClosingTextIOWrapper( - stream, - encoding, - errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable, - ) - - -def is_ascii_encoding(encoding: str) -> bool: - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == "ascii" - except LookupError: - return False - - -def get_best_encoding(stream: t.IO[t.Any]) -> str: - """Returns the default stream encoding if not found.""" - rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return "utf-8" - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - def __init__( - self, - stream: t.BinaryIO, - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, - force_writable: bool = False, - **extra: t.Any, - ) -> None: - self._stream = stream = t.cast( - t.BinaryIO, _FixupStream(stream, force_readable, force_writable) - ) - super().__init__(stream, encoding, errors, **extra) - - def __del__(self) -> None: - try: - self.detach() - except Exception: - pass - - def isatty(self) -> bool: - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream: - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - - The forcing of readable and writable flags are there because some tools - put badly patched objects on sys (one such offender are certain version - of jupyter notebook). - """ - - def __init__( - self, - stream: t.BinaryIO, - force_readable: bool = False, - force_writable: bool = False, - ): - self._stream = stream - self._force_readable = force_readable - self._force_writable = force_writable - - def __getattr__(self, name: str) -> t.Any: - return getattr(self._stream, name) - - def read1(self, size: int) -> bytes: - f = getattr(self._stream, "read1", None) - - if f is not None: - return t.cast(bytes, f(size)) - - return self._stream.read(size) - - def readable(self) -> bool: - if self._force_readable: - return True - x = getattr(self._stream, "readable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self) -> bool: - if self._force_writable: - return True - x = getattr(self._stream, "writable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.write("") # type: ignore - except Exception: - try: - self._stream.write(b"") - except Exception: - return False - return True - - def seekable(self) -> bool: - x = getattr(self._stream, "seekable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool: - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - -def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool: - try: - stream.write(b"") - except Exception: - try: - stream.write("") - return False - except Exception: - pass - return default - return True - - -def _find_binary_reader(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]: - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return t.cast(t.BinaryIO, stream) - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return t.cast(t.BinaryIO, buf) - - return None - - -def _find_binary_writer(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]: - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return t.cast(t.BinaryIO, stream) - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return t.cast(t.BinaryIO, buf) - - return None - - -def _stream_is_misconfigured(stream: t.TextIO) -> bool: - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") - - -def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: t.Optional[str]) -> bool: - """A stream attribute is compatible if it is equal to the - desired value or the desired value is unset and the attribute - has a value. - """ - stream_value = getattr(stream, attr, None) - return stream_value == value or (value is None and stream_value is not None) - - -def _is_compatible_text_stream( - stream: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] -) -> bool: - """Check if a stream's encoding and errors attributes are - compatible with the desired values. - """ - return _is_compat_stream_attr( - stream, "encoding", encoding - ) and _is_compat_stream_attr(stream, "errors", errors) - - -def _force_correct_text_stream( - text_stream: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - is_binary: t.Callable[[t.IO[t.Any], bool], bool], - find_binary: t.Callable[[t.IO[t.Any]], t.Optional[t.BinaryIO]], - force_readable: bool = False, - force_writable: bool = False, -) -> t.TextIO: - if is_binary(text_stream, False): - binary_reader = t.cast(t.BinaryIO, text_stream) - else: - text_stream = t.cast(t.TextIO, text_stream) - # If the stream looks compatible, and won't default to a - # misconfigured ascii encoding, return it as-is. - if _is_compatible_text_stream(text_stream, encoding, errors) and not ( - encoding is None and _stream_is_misconfigured(text_stream) - ): - return text_stream - - # Otherwise, get the underlying binary reader. - possible_binary_reader = find_binary(text_stream) - - # If that's not possible, silently use the original reader - # and get mojibake instead of exceptions. - if possible_binary_reader is None: - return text_stream - - binary_reader = possible_binary_reader - - # Default errors to replace instead of strict in order to get - # something that works. - if errors is None: - errors = "replace" - - # Wrap the binary stream in a text stream with the correct - # encoding parameters. - return _make_text_stream( - binary_reader, - encoding, - errors, - force_readable=force_readable, - force_writable=force_writable, - ) - - -def _force_correct_text_reader( - text_reader: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, -) -> t.TextIO: - return _force_correct_text_stream( - text_reader, - encoding, - errors, - _is_binary_reader, - _find_binary_reader, - force_readable=force_readable, - ) - - -def _force_correct_text_writer( - text_writer: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - force_writable: bool = False, -) -> t.TextIO: - return _force_correct_text_stream( - text_writer, - encoding, - errors, - _is_binary_writer, - _find_binary_writer, - force_writable=force_writable, - ) - - -def get_binary_stdin() -> t.BinaryIO: - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError("Was not able to determine binary stream for sys.stdin.") - return reader - - -def get_binary_stdout() -> t.BinaryIO: - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError("Was not able to determine binary stream for sys.stdout.") - return writer - - -def get_binary_stderr() -> t.BinaryIO: - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError("Was not able to determine binary stream for sys.stderr.") - return writer - - -def get_text_stdin( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True) - - -def get_text_stdout( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True) - - -def get_text_stderr( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) - - -def _wrap_io_open( - file: t.Union[str, "os.PathLike[str]", int], - mode: str, - encoding: t.Optional[str], - errors: t.Optional[str], -) -> t.IO[t.Any]: - """Handles not passing ``encoding`` and ``errors`` in binary mode.""" - if "b" in mode: - return open(file, mode) - - return open(file, mode, encoding=encoding, errors=errors) - - -def open_stream( - filename: "t.Union[str, os.PathLike[str]]", - mode: str = "r", - encoding: t.Optional[str] = None, - errors: t.Optional[str] = "strict", - atomic: bool = False, -) -> t.Tuple[t.IO[t.Any], bool]: - binary = "b" in mode - filename = os.fspath(filename) - - # Standard streams first. These are simple because they ignore the - # atomic flag. Use fsdecode to handle Path("-"). - if os.fsdecode(filename) == "-": - if any(m in mode for m in ["w", "a", "x"]): - if binary: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if binary: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - return _wrap_io_open(filename, mode, encoding, errors), True - - # Some usability stuff for atomic writes - if "a" in mode: - raise ValueError( - "Appending to an existing file is not supported, because that" - " would involve an expensive `copy`-operation to a temporary" - " file. Open the file in normal `w`-mode and copy explicitly" - " if that's what you're after." - ) - if "x" in mode: - raise ValueError("Use the `overwrite`-parameter instead.") - if "w" not in mode: - raise ValueError("Atomic writes only make sense with `w`-mode.") - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import errno - import random - - try: - perm: t.Optional[int] = os.stat(filename).st_mode - except OSError: - perm = None - - flags = os.O_RDWR | os.O_CREAT | os.O_EXCL - - if binary: - flags |= getattr(os, "O_BINARY", 0) - - while True: - tmp_filename = os.path.join( - os.path.dirname(filename), - f".__atomic-write{random.randrange(1 << 32):08x}", - ) - try: - fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) - break - except OSError as e: - if e.errno == errno.EEXIST or ( - os.name == "nt" - and e.errno == errno.EACCES - and os.path.isdir(e.filename) - and os.access(e.filename, os.W_OK) - ): - continue - raise - - if perm is not None: - os.chmod(tmp_filename, perm) # in case perm includes bits in umask - - f = _wrap_io_open(fd, mode, encoding, errors) - af = _AtomicFile(f, tmp_filename, os.path.realpath(filename)) - return t.cast(t.IO[t.Any], af), True - - -class _AtomicFile: - def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None: - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self) -> str: - return self._real_filename - - def close(self, delete: bool = False) -> None: - if self.closed: - return - self._f.close() - os.replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name: str) -> t.Any: - return getattr(self._f, name) - - def __enter__(self) -> "_AtomicFile": - return self - - def __exit__(self, exc_type: t.Optional[t.Type[BaseException]], *_: t.Any) -> None: - self.close(delete=exc_type is not None) - - def __repr__(self) -> str: - return repr(self._f) - - -def strip_ansi(value: str) -> str: - return _ansi_re.sub("", value) - - -def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool: - while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): - stream = stream._stream - - return stream.__class__.__module__.startswith("ipykernel.") - - -def should_strip_ansi( - stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None -) -> bool: - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) and not _is_jupyter_kernel_output(stream) - return not color - - -# On Windows, wrap the output streams with colorama to support ANSI -# color codes. -# NOTE: double check is needed so mypy does not analyze this on Linux -if sys.platform.startswith("win") and WIN: - from ._winconsole import _get_windows_console_stream - - def _get_argv_encoding() -> str: - import locale - - return locale.getpreferredencoding() - - _ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() - - def auto_wrap_for_ansi( - stream: t.TextIO, color: t.Optional[bool] = None - ) -> t.TextIO: - """Support ANSI color and style codes on Windows by wrapping a - stream with colorama. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - - if cached is not None: - return cached - - import colorama - - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = t.cast(t.TextIO, ansi_wrapper.stream) - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except BaseException: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - - return rv - -else: - - def _get_argv_encoding() -> str: - return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding() - - def _get_windows_console_stream( - f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] - ) -> t.Optional[t.TextIO]: - return None - - -def term_len(x: str) -> int: - return len(strip_ansi(x)) - - -def isatty(stream: t.IO[t.Any]) -> bool: - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func( - src_func: t.Callable[[], t.Optional[t.TextIO]], - wrapper_func: t.Callable[[], t.TextIO], -) -> t.Callable[[], t.Optional[t.TextIO]]: - cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() - - def func() -> t.Optional[t.TextIO]: - stream = src_func() - - if stream is None: - return None - - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - cache[stream] = rv - except Exception: - pass - return rv - - return func - - -_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) - - -binary_streams: t.Mapping[str, t.Callable[[], t.BinaryIO]] = { - "stdin": get_binary_stdin, - "stdout": get_binary_stdout, - "stderr": get_binary_stderr, -} - -text_streams: t.Mapping[ - str, t.Callable[[t.Optional[str], t.Optional[str]], t.TextIO] -] = { - "stdin": get_text_stdin, - "stdout": get_text_stdout, - "stderr": get_text_stderr, -} diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/colorLib/unbuilder.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/colorLib/unbuilder.py deleted file mode 100644 index ac243550b8908aef120b395e740b9974559d65b5..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/colorLib/unbuilder.py +++ /dev/null @@ -1,81 +0,0 @@ -from fontTools.ttLib.tables import otTables as ot -from .table_builder import TableUnbuilder - - -def unbuildColrV1(layerList, baseGlyphList): - layers = [] - if layerList: - layers = layerList.Paint - unbuilder = LayerListUnbuilder(layers) - return { - rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint) - for rec in baseGlyphList.BaseGlyphPaintRecord - } - - -def _flatten_layers(lst): - for paint in lst: - if paint["Format"] == ot.PaintFormat.PaintColrLayers: - yield from _flatten_layers(paint["Layers"]) - else: - yield paint - - -class LayerListUnbuilder: - def __init__(self, layers): - self.layers = layers - - callbacks = { - ( - ot.Paint, - ot.PaintFormat.PaintColrLayers, - ): self._unbuildPaintColrLayers, - } - self.tableUnbuilder = TableUnbuilder(callbacks) - - def unbuildPaint(self, paint): - assert isinstance(paint, ot.Paint) - return self.tableUnbuilder.unbuild(paint) - - def _unbuildPaintColrLayers(self, source): - assert source["Format"] == ot.PaintFormat.PaintColrLayers - - layers = list( - _flatten_layers( - [ - self.unbuildPaint(childPaint) - for childPaint in self.layers[ - source["FirstLayerIndex"] : source["FirstLayerIndex"] - + source["NumLayers"] - ] - ] - ) - ) - - if len(layers) == 1: - return layers[0] - - return {"Format": source["Format"], "Layers": layers} - - -if __name__ == "__main__": - from pprint import pprint - import sys - from fontTools.ttLib import TTFont - - try: - fontfile = sys.argv[1] - except IndexError: - sys.exit("usage: fonttools colorLib.unbuilder FONTFILE") - - font = TTFont(fontfile) - colr = font["COLR"] - if colr.version < 1: - sys.exit(f"error: No COLR table version=1 found in {fontfile}") - - colorGlyphs = unbuildColrV1( - colr.table.LayerList, - colr.table.BaseGlyphList, - ) - - pprint(colorGlyphs) diff --git a/spaces/colakin/video-generater/public/ffmpeg/fftools/thread_queue.c b/spaces/colakin/video-generater/public/ffmpeg/fftools/thread_queue.c deleted file mode 100644 index a1ab4ce92e4ceb2d38427dc578c1166e7ed5ddb3..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/fftools/thread_queue.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include - -#include "libavutil/avassert.h" -#include "libavutil/error.h" -#include "libavutil/fifo.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/mem.h" -#include "libavutil/thread.h" - -#include "objpool.h" -#include "thread_queue.h" - -enum { - FINISHED_SEND = (1 << 0), - FINISHED_RECV = (1 << 1), -}; - -typedef struct FifoElem { - void *obj; - unsigned int stream_idx; -} FifoElem; - -struct ThreadQueue { - int *finished; - unsigned int nb_streams; - - AVFifo *fifo; - - ObjPool *obj_pool; - void (*obj_move)(void *dst, void *src); - - pthread_mutex_t lock; - pthread_cond_t cond; -}; - -void tq_free(ThreadQueue **ptq) -{ - ThreadQueue *tq = *ptq; - - if (!tq) - return; - - if (tq->fifo) { - FifoElem elem; - while (av_fifo_read(tq->fifo, &elem, 1) >= 0) - objpool_release(tq->obj_pool, &elem.obj); - } - av_fifo_freep2(&tq->fifo); - - objpool_free(&tq->obj_pool); - - av_freep(&tq->finished); - - pthread_cond_destroy(&tq->cond); - pthread_mutex_destroy(&tq->lock); - - av_freep(ptq); -} - -ThreadQueue *tq_alloc(unsigned int nb_streams, size_t queue_size, - ObjPool *obj_pool, void (*obj_move)(void *dst, void *src)) -{ - ThreadQueue *tq; - int ret; - - tq = av_mallocz(sizeof(*tq)); - if (!tq) - return NULL; - - ret = pthread_cond_init(&tq->cond, NULL); - if (ret) { - av_freep(&tq); - return NULL; - } - - ret = pthread_mutex_init(&tq->lock, NULL); - if (ret) { - pthread_cond_destroy(&tq->cond); - av_freep(&tq); - return NULL; - } - - tq->finished = av_calloc(nb_streams, sizeof(*tq->finished)); - if (!tq->finished) - goto fail; - tq->nb_streams = nb_streams; - - tq->fifo = av_fifo_alloc2(queue_size, sizeof(FifoElem), 0); - if (!tq->fifo) - goto fail; - - tq->obj_pool = obj_pool; - tq->obj_move = obj_move; - - return tq; -fail: - tq_free(&tq); - return NULL; -} - -int tq_send(ThreadQueue *tq, unsigned int stream_idx, void *data) -{ - int *finished; - int ret; - - av_assert0(stream_idx < tq->nb_streams); - finished = &tq->finished[stream_idx]; - - pthread_mutex_lock(&tq->lock); - - if (*finished & FINISHED_SEND) { - ret = AVERROR(EINVAL); - goto finish; - } - - while (!(*finished & FINISHED_RECV) && !av_fifo_can_write(tq->fifo)) - pthread_cond_wait(&tq->cond, &tq->lock); - - if (*finished & FINISHED_RECV) { - ret = AVERROR_EOF; - *finished |= FINISHED_SEND; - } else { - FifoElem elem = { .stream_idx = stream_idx }; - - ret = objpool_get(tq->obj_pool, &elem.obj); - if (ret < 0) - goto finish; - - tq->obj_move(elem.obj, data); - - ret = av_fifo_write(tq->fifo, &elem, 1); - av_assert0(ret >= 0); - pthread_cond_broadcast(&tq->cond); - } - -finish: - pthread_mutex_unlock(&tq->lock); - - return ret; -} - -static int receive_locked(ThreadQueue *tq, int *stream_idx, - void *data) -{ - FifoElem elem; - unsigned int nb_finished = 0; - - if (av_fifo_read(tq->fifo, &elem, 1) >= 0) { - tq->obj_move(data, elem.obj); - objpool_release(tq->obj_pool, &elem.obj); - *stream_idx = elem.stream_idx; - return 0; - } - - for (unsigned int i = 0; i < tq->nb_streams; i++) { - if (!(tq->finished[i] & FINISHED_SEND)) - continue; - - /* return EOF to the consumer at most once for each stream */ - if (!(tq->finished[i] & FINISHED_RECV)) { - tq->finished[i] |= FINISHED_RECV; - *stream_idx = i; - return AVERROR_EOF; - } - - nb_finished++; - } - - return nb_finished == tq->nb_streams ? AVERROR_EOF : AVERROR(EAGAIN); -} - -int tq_receive(ThreadQueue *tq, int *stream_idx, void *data) -{ - int ret; - - *stream_idx = -1; - - pthread_mutex_lock(&tq->lock); - - while (1) { - ret = receive_locked(tq, stream_idx, data); - if (ret == AVERROR(EAGAIN)) { - pthread_cond_wait(&tq->cond, &tq->lock); - continue; - } - - break; - } - - if (ret == 0) - pthread_cond_broadcast(&tq->cond); - - pthread_mutex_unlock(&tq->lock); - - return ret; -} - -void tq_send_finish(ThreadQueue *tq, unsigned int stream_idx) -{ - av_assert0(stream_idx < tq->nb_streams); - - pthread_mutex_lock(&tq->lock); - - /* mark the stream as send-finished; - * next time the consumer thread tries to read this stream it will get - * an EOF and recv-finished flag will be set */ - tq->finished[stream_idx] |= FINISHED_SEND; - pthread_cond_broadcast(&tq->cond); - - pthread_mutex_unlock(&tq->lock); -} - -void tq_receive_finish(ThreadQueue *tq, unsigned int stream_idx) -{ - av_assert0(stream_idx < tq->nb_streams); - - pthread_mutex_lock(&tq->lock); - - /* mark the stream as recv-finished; - * next time the producer thread tries to send for this stream, it will - * get an EOF and send-finished flag will be set */ - tq->finished[stream_idx] |= FINISHED_RECV; - pthread_cond_broadcast(&tq->cond); - - pthread_mutex_unlock(&tq->lock); -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo5.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo5.c deleted file mode 100644 index df95064e3fc0939514044283230456f82329693c..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo5.c +++ /dev/null @@ -1,697 +0,0 @@ -/* - * Indeo Video Interactive v5 compatible decoder - * Copyright (c) 2009 Maxim Poliakovski - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Indeo Video Interactive version 5 decoder - * - * Indeo5 data is usually transported within .avi or .mov files. - * Known FOURCCs: 'IV50' - */ - -#define BITSTREAM_READER_LE -#include "avcodec.h" -#include "codec_internal.h" -#include "get_bits.h" -#include "ivi.h" -#include "ivi_dsp.h" -#include "indeo5data.h" - -/** - * Indeo5 frame types. - */ -enum { - FRAMETYPE_INTRA = 0, - FRAMETYPE_INTER = 1, ///< non-droppable P-frame - FRAMETYPE_INTER_SCAL = 2, ///< droppable P-frame used in the scalability mode - FRAMETYPE_INTER_NOREF = 3, ///< droppable P-frame - FRAMETYPE_NULL = 4 ///< empty frame with no data -}; - -#define IVI5_PIC_SIZE_ESC 15 - -/** - * Decode Indeo5 GOP (Group of pictures) header. - * This header is present in key frames only. - * It defines parameters for all frames in a GOP. - * - * @param[in,out] ctx ptr to the decoder context - * @param[in] avctx ptr to the AVCodecContext - * @return result code: 0 = OK, -1 = error - */ -static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx) -{ - int result, i, p, tile_size, pic_size_indx, mb_size, blk_size, is_scalable; - int quant_mat, blk_size_changed = 0; - IVIBandDesc *band, *band1, *band2; - IVIPicConfig pic_conf; - - ctx->gop_flags = get_bits(&ctx->gb, 8); - - ctx->gop_hdr_size = (ctx->gop_flags & 1) ? get_bits(&ctx->gb, 16) : 0; - - if (ctx->gop_flags & IVI5_IS_PROTECTED) - ctx->lock_word = get_bits_long(&ctx->gb, 32); - - tile_size = (ctx->gop_flags & 0x40) ? 64 << get_bits(&ctx->gb, 2) : 0; - if (tile_size > 256) { - av_log(avctx, AV_LOG_ERROR, "Invalid tile size: %d\n", tile_size); - return AVERROR_INVALIDDATA; - } - - /* decode number of wavelet bands */ - /* num_levels * 3 + 1 */ - pic_conf.luma_bands = get_bits(&ctx->gb, 2) * 3 + 1; - pic_conf.chroma_bands = get_bits1(&ctx->gb) * 3 + 1; - is_scalable = pic_conf.luma_bands != 1 || pic_conf.chroma_bands != 1; - if (is_scalable && (pic_conf.luma_bands != 4 || pic_conf.chroma_bands != 1)) { - av_log(avctx, AV_LOG_ERROR, "Scalability: unsupported subdivision! Luma bands: %d, chroma bands: %d\n", - pic_conf.luma_bands, pic_conf.chroma_bands); - return AVERROR_INVALIDDATA; - } - - pic_size_indx = get_bits(&ctx->gb, 4); - if (pic_size_indx == IVI5_PIC_SIZE_ESC) { - pic_conf.pic_height = get_bits(&ctx->gb, 13); - pic_conf.pic_width = get_bits(&ctx->gb, 13); - } else { - pic_conf.pic_height = ivi5_common_pic_sizes[pic_size_indx * 2 + 1] << 2; - pic_conf.pic_width = ivi5_common_pic_sizes[pic_size_indx * 2 ] << 2; - } - - if (ctx->gop_flags & 2) { - avpriv_report_missing_feature(avctx, "YV12 picture format"); - return AVERROR_PATCHWELCOME; - } - - pic_conf.chroma_height = (pic_conf.pic_height + 3) >> 2; - pic_conf.chroma_width = (pic_conf.pic_width + 3) >> 2; - - if (!tile_size) { - pic_conf.tile_height = pic_conf.pic_height; - pic_conf.tile_width = pic_conf.pic_width; - } else { - pic_conf.tile_height = pic_conf.tile_width = tile_size; - } - - /* check if picture layout was changed and reallocate buffers */ - if (ivi_pic_config_cmp(&pic_conf, &ctx->pic_conf) || ctx->gop_invalid) { - result = ff_ivi_init_planes(avctx, ctx->planes, &pic_conf, 0); - if (result < 0) { - av_log(avctx, AV_LOG_ERROR, "Couldn't reallocate color planes!\n"); - return result; - } - ctx->pic_conf = pic_conf; - ctx->is_scalable = is_scalable; - blk_size_changed = 1; /* force reallocation of the internal structures */ - } - - for (p = 0; p <= 1; p++) { - for (i = 0; i < (!p ? pic_conf.luma_bands : pic_conf.chroma_bands); i++) { - band = &ctx->planes[p].bands[i]; - - band->is_halfpel = get_bits1(&ctx->gb); - - mb_size = get_bits1(&ctx->gb); - blk_size = 8 >> get_bits1(&ctx->gb); - mb_size = blk_size << !mb_size; - - if (p==0 && blk_size==4) { - av_log(avctx, AV_LOG_ERROR, "4x4 luma blocks are unsupported!\n"); - return AVERROR_PATCHWELCOME; - } - - blk_size_changed = mb_size != band->mb_size || blk_size != band->blk_size; - if (blk_size_changed) { - band->mb_size = mb_size; - band->blk_size = blk_size; - } - - if (get_bits1(&ctx->gb)) { - avpriv_report_missing_feature(avctx, "Extended transform info"); - return AVERROR_PATCHWELCOME; - } - - /* select transform function and scan pattern according to plane and band number */ - switch ((p << 2) + i) { - case 0: - band->inv_transform = ff_ivi_inverse_slant_8x8; - band->dc_transform = ff_ivi_dc_slant_2d; - band->scan = ff_zigzag_direct; - band->transform_size = 8; - break; - - case 1: - band->inv_transform = ff_ivi_row_slant8; - band->dc_transform = ff_ivi_dc_row_slant; - band->scan = ff_ivi_vertical_scan_8x8; - band->transform_size = 8; - break; - - case 2: - band->inv_transform = ff_ivi_col_slant8; - band->dc_transform = ff_ivi_dc_col_slant; - band->scan = ff_ivi_horizontal_scan_8x8; - band->transform_size = 8; - break; - - case 3: - band->inv_transform = ff_ivi_put_pixels_8x8; - band->dc_transform = ff_ivi_put_dc_pixel_8x8; - band->scan = ff_ivi_horizontal_scan_8x8; - band->transform_size = 8; - break; - - case 4: - band->inv_transform = ff_ivi_inverse_slant_4x4; - band->dc_transform = ff_ivi_dc_slant_2d; - band->scan = ff_ivi_direct_scan_4x4; - band->transform_size = 4; - break; - } - - band->is_2d_trans = band->inv_transform == ff_ivi_inverse_slant_8x8 || - band->inv_transform == ff_ivi_inverse_slant_4x4; - - if (band->transform_size != band->blk_size) { - av_log(avctx, AV_LOG_ERROR, "transform and block size mismatch (%d != %d)\n", band->transform_size, band->blk_size); - return AVERROR_INVALIDDATA; - } - - /* select dequant matrix according to plane and band number */ - if (!p) { - quant_mat = (pic_conf.luma_bands > 1) ? i+1 : 0; - } else { - quant_mat = 5; - } - - if (band->blk_size == 8) { - if(quant_mat >= 5){ - av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat); - return -1; - } - band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0]; - band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0]; - band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0]; - band->inter_scale = &ivi5_scale_quant_8x8_inter[quant_mat][0]; - } else { - band->intra_base = ivi5_base_quant_4x4_intra; - band->inter_base = ivi5_base_quant_4x4_inter; - band->intra_scale = ivi5_scale_quant_4x4_intra; - band->inter_scale = ivi5_scale_quant_4x4_inter; - } - - if (get_bits(&ctx->gb, 2)) { - av_log(avctx, AV_LOG_ERROR, "End marker missing!\n"); - return AVERROR_INVALIDDATA; - } - } - } - - /* copy chroma parameters into the 2nd chroma plane */ - for (i = 0; i < pic_conf.chroma_bands; i++) { - band1 = &ctx->planes[1].bands[i]; - band2 = &ctx->planes[2].bands[i]; - - band2->width = band1->width; - band2->height = band1->height; - band2->mb_size = band1->mb_size; - band2->blk_size = band1->blk_size; - band2->is_halfpel = band1->is_halfpel; - band2->intra_base = band1->intra_base; - band2->inter_base = band1->inter_base; - band2->intra_scale = band1->intra_scale; - band2->inter_scale = band1->inter_scale; - band2->scan = band1->scan; - band2->inv_transform = band1->inv_transform; - band2->dc_transform = band1->dc_transform; - band2->is_2d_trans = band1->is_2d_trans; - band2->transform_size= band1->transform_size; - } - - /* reallocate internal structures if needed */ - if (blk_size_changed) { - result = ff_ivi_init_tiles(ctx->planes, pic_conf.tile_width, - pic_conf.tile_height); - if (result < 0) { - av_log(avctx, AV_LOG_ERROR, - "Couldn't reallocate internal structures!\n"); - return result; - } - } - - if (ctx->gop_flags & 8) { - if (get_bits(&ctx->gb, 3)) { - av_log(avctx, AV_LOG_ERROR, "Alignment bits are not zero!\n"); - return AVERROR_INVALIDDATA; - } - - if (get_bits1(&ctx->gb)) - skip_bits(&ctx->gb, 24); /* skip transparency fill color */ - } - - align_get_bits(&ctx->gb); - - skip_bits(&ctx->gb, 23); /* FIXME: unknown meaning */ - - /* skip GOP extension if any */ - if (get_bits1(&ctx->gb)) { - do { - i = get_bits(&ctx->gb, 16); - } while (i & 0x8000); - } - - align_get_bits(&ctx->gb); - - return 0; -} - - -/** - * Skip a header extension. - * - * @param[in,out] gb the GetBit context - */ -static inline int skip_hdr_extension(GetBitContext *gb) -{ - int i, len; - - do { - len = get_bits(gb, 8); - if (8*len > get_bits_left(gb)) - return AVERROR_INVALIDDATA; - for (i = 0; i < len; i++) skip_bits(gb, 8); - } while(len); - - return 0; -} - - -/** - * Decode Indeo5 picture header. - * - * @param[in,out] ctx ptr to the decoder context - * @param[in] avctx ptr to the AVCodecContext - * @return result code: 0 = OK, -1 = error - */ -static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx) -{ - int ret; - - if (get_bits(&ctx->gb, 5) != 0x1F) { - av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n"); - return AVERROR_INVALIDDATA; - } - - ctx->prev_frame_type = ctx->frame_type; - ctx->frame_type = get_bits(&ctx->gb, 3); - if (ctx->frame_type >= 5) { - av_log(avctx, AV_LOG_ERROR, "Invalid frame type: %d \n", ctx->frame_type); - ctx->frame_type = FRAMETYPE_INTRA; - return AVERROR_INVALIDDATA; - } - - ctx->frame_num = get_bits(&ctx->gb, 8); - - if (ctx->frame_type == FRAMETYPE_INTRA) { - if ((ret = decode_gop_header(ctx, avctx)) < 0) { - av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n"); - ctx->gop_invalid = 1; - return ret; - } - ctx->gop_invalid = 0; - } - - if (ctx->frame_type == FRAMETYPE_INTER_SCAL && !ctx->is_scalable) { - av_log(avctx, AV_LOG_ERROR, "Scalable inter frame in non scalable stream\n"); - ctx->frame_type = FRAMETYPE_INTER; - return AVERROR_INVALIDDATA; - } - - if (ctx->frame_type != FRAMETYPE_NULL) { - ctx->frame_flags = get_bits(&ctx->gb, 8); - - ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits(&ctx->gb, 24) : 0; - - ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0; - - /* skip unknown extension if any */ - if (ctx->frame_flags & 0x20) - skip_hdr_extension(&ctx->gb); /* XXX: untested */ - - /* decode macroblock huffman codebook */ - ret = ff_ivi_dec_huff_desc(&ctx->gb, ctx->frame_flags & 0x40, - IVI_MB_HUFF, &ctx->mb_vlc, avctx); - if (ret < 0) - return ret; - - skip_bits(&ctx->gb, 3); /* FIXME: unknown meaning! */ - } - - align_get_bits(&ctx->gb); - - return 0; -} - - -/** - * Decode Indeo5 band header. - * - * @param[in,out] ctx ptr to the decoder context - * @param[in,out] band ptr to the band descriptor - * @param[in] avctx ptr to the AVCodecContext - * @return result code: 0 = OK, -1 = error - */ -static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band, - AVCodecContext *avctx) -{ - int i, ret; - uint8_t band_flags; - - band_flags = get_bits(&ctx->gb, 8); - - if (band_flags & 1) { - band->is_empty = 1; - return 0; - } - - band->data_size = (ctx->frame_flags & 0x80) ? get_bits(&ctx->gb, 24) : 0; - - band->inherit_mv = band_flags & 2; - band->inherit_qdelta = band_flags & 8; - band->qdelta_present = band_flags & 4; - if (!band->qdelta_present) band->inherit_qdelta = 1; - - /* decode rvmap probability corrections if any */ - band->num_corr = 0; /* there are no corrections */ - if (band_flags & 0x10) { - band->num_corr = get_bits(&ctx->gb, 8); /* get number of correction pairs */ - if (band->num_corr > 61) { - av_log(avctx, AV_LOG_ERROR, "Too many corrections: %d\n", - band->num_corr); - return AVERROR_INVALIDDATA; - } - - /* read correction pairs */ - for (i = 0; i < band->num_corr * 2; i++) - band->corr[i] = get_bits(&ctx->gb, 8); - } - - /* select appropriate rvmap table for this band */ - band->rvmap_sel = (band_flags & 0x40) ? get_bits(&ctx->gb, 3) : 8; - - /* decode block huffman codebook */ - ret = ff_ivi_dec_huff_desc(&ctx->gb, band_flags & 0x80, IVI_BLK_HUFF, - &band->blk_vlc, avctx); - if (ret < 0) - return ret; - - band->checksum_present = get_bits1(&ctx->gb); - if (band->checksum_present) - band->checksum = get_bits(&ctx->gb, 16); - - band->glob_quant = get_bits(&ctx->gb, 5); - - /* skip unknown extension if any */ - if (band_flags & 0x20) { /* XXX: untested */ - align_get_bits(&ctx->gb); - skip_hdr_extension(&ctx->gb); - } - - align_get_bits(&ctx->gb); - - return 0; -} - - -/** - * Decode info (block type, cbp, quant delta, motion vector) - * for all macroblocks in the current tile. - * - * @param[in,out] ctx ptr to the decoder context - * @param[in,out] band ptr to the band descriptor - * @param[in,out] tile ptr to the tile descriptor - * @param[in] avctx ptr to the AVCodecContext - * @return result code: 0 = OK, -1 = error - */ -static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band, - IVITile *tile, AVCodecContext *avctx) -{ - int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, - mv_scale, blks_per_mb, s; - IVIMbInfo *mb, *ref_mb; - int row_offset = band->mb_size * band->pitch; - - mb = tile->mbs; - ref_mb = tile->ref_mbs; - offs = tile->ypos * band->pitch + tile->xpos; - - if (!ref_mb && - ((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv)) - return AVERROR_INVALIDDATA; - - if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) { - av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n", - tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)); - return AVERROR_INVALIDDATA; - } - - /* scale factor for motion vectors */ - mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3); - mv_x = mv_y = 0; - - for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) { - mb_offset = offs; - - for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) { - mb->xpos = x; - mb->ypos = y; - mb->buf_offs = mb_offset; - - if (get_bits1(&ctx->gb)) { - if (ctx->frame_type == FRAMETYPE_INTRA) { - av_log(avctx, AV_LOG_ERROR, "Empty macroblock in an INTRA picture!\n"); - return AVERROR_INVALIDDATA; - } - mb->type = 1; /* empty macroblocks are always INTER */ - mb->cbp = 0; /* all blocks are empty */ - - mb->q_delta = 0; - if (!band->plane && !band->band_num && (ctx->frame_flags & 8)) { - mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, - IVI_VLC_BITS, 1); - mb->q_delta = IVI_TOSIGNED(mb->q_delta); - } - - mb->mv_x = mb->mv_y = 0; /* no motion vector coded */ - if (band->inherit_mv && ref_mb){ - /* motion vector inheritance */ - if (mv_scale) { - mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); - mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); - } else { - mb->mv_x = ref_mb->mv_x; - mb->mv_y = ref_mb->mv_y; - } - } - } else { - if (band->inherit_mv && ref_mb) { - mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */ - } else if (ctx->frame_type == FRAMETYPE_INTRA) { - mb->type = 0; /* mb_type is always INTRA for intra-frames */ - } else { - mb->type = get_bits1(&ctx->gb); - } - - blks_per_mb = band->mb_size != band->blk_size ? 4 : 1; - mb->cbp = get_bits(&ctx->gb, blks_per_mb); - - mb->q_delta = 0; - if (band->qdelta_present) { - if (band->inherit_qdelta) { - if (ref_mb) mb->q_delta = ref_mb->q_delta; - } else if (mb->cbp || (!band->plane && !band->band_num && - (ctx->frame_flags & 8))) { - mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, - IVI_VLC_BITS, 1); - mb->q_delta = IVI_TOSIGNED(mb->q_delta); - } - } - - if (!mb->type) { - mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */ - } else { - if (band->inherit_mv && ref_mb){ - /* motion vector inheritance */ - if (mv_scale) { - mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); - mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); - } else { - mb->mv_x = ref_mb->mv_x; - mb->mv_y = ref_mb->mv_y; - } - } else { - /* decode motion vector deltas */ - mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, - IVI_VLC_BITS, 1); - mv_y += IVI_TOSIGNED(mv_delta); - mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, - IVI_VLC_BITS, 1); - mv_x += IVI_TOSIGNED(mv_delta); - mb->mv_x = mv_x; - mb->mv_y = mv_y; - } - } - } - - s= band->is_halfpel; - if (mb->type) - if ( x + (mb->mv_x >>s) + (y+ (mb->mv_y >>s))*band->pitch < 0 || - x + ((mb->mv_x+s)>>s) + band->mb_size - 1 - + (y+band->mb_size - 1 +((mb->mv_y+s)>>s))*band->pitch > band->bufsize - 1) { - av_log(avctx, AV_LOG_ERROR, "motion vector %d %d outside reference\n", x*s + mb->mv_x, y*s + mb->mv_y); - return AVERROR_INVALIDDATA; - } - - mb++; - if (ref_mb) - ref_mb++; - mb_offset += band->mb_size; - } - - offs += row_offset; - } - - align_get_bits(&ctx->gb); - - return 0; -} - - -/** - * Switch buffers. - * - * @param[in,out] ctx ptr to the decoder context - */ -static void switch_buffers(IVI45DecContext *ctx) -{ - switch (ctx->prev_frame_type) { - case FRAMETYPE_INTRA: - case FRAMETYPE_INTER: - ctx->buf_switch ^= 1; - ctx->dst_buf = ctx->buf_switch; - ctx->ref_buf = ctx->buf_switch ^ 1; - break; - case FRAMETYPE_INTER_SCAL: - if (!ctx->inter_scal) { - ctx->ref2_buf = 2; - ctx->inter_scal = 1; - } - FFSWAP(int, ctx->dst_buf, ctx->ref2_buf); - ctx->ref_buf = ctx->ref2_buf; - break; - case FRAMETYPE_INTER_NOREF: - break; - } - - switch (ctx->frame_type) { - case FRAMETYPE_INTRA: - ctx->buf_switch = 0; - /* FALLTHROUGH */ - case FRAMETYPE_INTER: - ctx->inter_scal = 0; - ctx->dst_buf = ctx->buf_switch; - ctx->ref_buf = ctx->buf_switch ^ 1; - break; - case FRAMETYPE_INTER_SCAL: - case FRAMETYPE_INTER_NOREF: - case FRAMETYPE_NULL: - break; - } -} - - -static int is_nonnull_frame(IVI45DecContext *ctx) -{ - return ctx->frame_type != FRAMETYPE_NULL; -} - - -/** - * Initialize Indeo5 decoder. - */ -static av_cold int decode_init(AVCodecContext *avctx) -{ - IVI45DecContext *ctx = avctx->priv_data; - int result; - - ctx->gop_invalid = 1; - - ff_ivi_init_static_vlc(); - - /* copy rvmap tables in our context so we can apply changes to them */ - memcpy(ctx->rvmap_tabs, ff_ivi_rvmap_tabs, sizeof(ff_ivi_rvmap_tabs)); - - /* set the initial picture layout according to the basic profile: - there is only one band per plane (no scalability), only one tile (no local decoding) - and picture format = YVU9 */ - ctx->pic_conf.pic_width = avctx->width; - ctx->pic_conf.pic_height = avctx->height; - ctx->pic_conf.chroma_width = (avctx->width + 3) >> 2; - ctx->pic_conf.chroma_height = (avctx->height + 3) >> 2; - ctx->pic_conf.tile_width = avctx->width; - ctx->pic_conf.tile_height = avctx->height; - ctx->pic_conf.luma_bands = ctx->pic_conf.chroma_bands = 1; - - result = ff_ivi_init_planes(avctx, ctx->planes, &ctx->pic_conf, 0); - if (result) { - av_log(avctx, AV_LOG_ERROR, "Couldn't allocate color planes!\n"); - return AVERROR_INVALIDDATA; - } - - ctx->buf_switch = 0; - ctx->inter_scal = 0; - - ctx->decode_pic_hdr = decode_pic_hdr; - ctx->decode_band_hdr = decode_band_hdr; - ctx->decode_mb_info = decode_mb_info; - ctx->switch_buffers = switch_buffers; - ctx->is_nonnull_frame = is_nonnull_frame; - - ctx->is_indeo4 = 0; - - avctx->pix_fmt = AV_PIX_FMT_YUV410P; - - return 0; -} - -const FFCodec ff_indeo5_decoder = { - .p.name = "indeo5", - CODEC_LONG_NAME("Intel Indeo Video Interactive 5"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_INDEO5, - .priv_data_size = sizeof(IVI45DecContext), - .init = decode_init, - .close = ff_ivi_decode_close, - FF_CODEC_DECODE_CB(ff_ivi_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/hevc_lpf_sao_lsx.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/hevc_lpf_sao_lsx.c deleted file mode 100644 index b5822afd9439550735c95c2e16fe8d567b98614d..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/hevc_lpf_sao_lsx.c +++ /dev/null @@ -1,2487 +0,0 @@ -/* - * Copyright (c) 2022 Loongson Technology Corporation Limited - * Contributed by Lu Wang - * Hao Chen - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/loongarch/loongson_intrinsics.h" -#include "hevcdsp_lsx.h" - -void ff_hevc_loop_filter_luma_h_8_lsx(uint8_t *src, ptrdiff_t stride, - int32_t beta, const int32_t *tc, - const uint8_t *p_is_pcm, const uint8_t *q_is_pcm) -{ - ptrdiff_t stride_2x = (stride << 1); - ptrdiff_t stride_4x = (stride << 2); - ptrdiff_t stride_3x = stride_2x + stride; - uint8_t *p3 = src - stride_4x; - uint8_t *p2 = src - stride_3x; - uint8_t *p1 = src - stride_2x; - uint8_t *p0 = src - stride; - uint8_t *q0 = src; - uint8_t *q1 = src + stride; - uint8_t *q2 = src + stride_2x; - uint8_t *q3 = src + stride_3x; - uint8_t flag0, flag1; - int32_t dp00, dq00, dp30, dq30, d00, d30, d0030, d0434; - int32_t dp04, dq04, dp34, dq34, d04, d34; - int32_t tc0, p_is_pcm0, q_is_pcm0, beta30, beta20, tc250; - int32_t tc4, p_is_pcm4, q_is_pcm4, tc254, tmp; - - __m128i dst0, dst1, dst2, dst3, dst4, dst5; - __m128i cmp0, cmp1, cmp2, cmp3, p_is_pcm_vec, q_is_pcm_vec; - __m128i temp0, temp1; - __m128i temp2, tc_pos, tc_neg; - __m128i diff0, diff1, delta0, delta1, delta2, abs_delta0; - __m128i zero = {0}; - __m128i p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, q2_src, q3_src; - - dp00 = abs(p2[0] - (p1[0] << 1) + p0[0]); - dq00 = abs(q2[0] - (q1[0] << 1) + q0[0]); - dp30 = abs(p2[3] - (p1[3] << 1) + p0[3]); - dq30 = abs(q2[3] - (q1[3] << 1) + q0[3]); - d00 = dp00 + dq00; - d30 = dp30 + dq30; - dp04 = abs(p2[4] - (p1[4] << 1) + p0[4]); - dq04 = abs(q2[4] - (q1[4] << 1) + q0[4]); - dp34 = abs(p2[7] - (p1[7] << 1) + p0[7]); - dq34 = abs(q2[7] - (q1[7] << 1) + q0[7]); - d04 = dp04 + dq04; - d34 = dp34 + dq34; - - p_is_pcm0 = p_is_pcm[0]; - p_is_pcm4 = p_is_pcm[1]; - q_is_pcm0 = q_is_pcm[0]; - q_is_pcm4 = q_is_pcm[1]; - - DUP2_ARG1(__lsx_vreplgr2vr_d, p_is_pcm0, p_is_pcm4, cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - d0030 = (d00 + d30) >= beta; - d0434 = (d04 + d34) >= beta; - DUP2_ARG1(__lsx_vreplgr2vr_w, d0030, d0434, cmp0, cmp1); - cmp3 = __lsx_vpackev_w(cmp1, cmp0); - cmp3 = __lsx_vseqi_w(cmp3, 0); - - if ((!p_is_pcm0 || !p_is_pcm4 || !q_is_pcm0 || !q_is_pcm4) && - (!d0030 || !d0434)) { - DUP4_ARG2(__lsx_vld, p3, 0, p2, 0, p1, 0, p0, 0, - p3_src, p2_src, p1_src, p0_src); - DUP2_ARG1(__lsx_vreplgr2vr_d, q_is_pcm0, q_is_pcm4, cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - - tc0 = tc[0]; - beta30 = beta >> 3; - beta20 = beta >> 2; - tc250 = (((tc0 << 2) + tc0 + 1) >> 1); - tc4 = tc[1]; - tc254 = (((tc4 << 2) + tc4 + 1) >> 1); - - DUP2_ARG1(__lsx_vreplgr2vr_h, tc0, tc4, cmp0, cmp1); - DUP4_ARG2(__lsx_vilvl_b, zero, p3_src, zero, p2_src, zero, p1_src, zero, - p0_src, p3_src, p2_src, p1_src, p0_src); - DUP4_ARG2(__lsx_vld, q0, 0, q1, 0, q2, 0, q3, 0, - q0_src, q1_src, q2_src, q3_src); - flag0 = abs(p3[0] - p0[0]) + abs(q3[0] - q0[0]) < beta30 && - abs(p0[0] - q0[0]) < tc250; - flag0 = flag0 && (abs(p3[3] - p0[3]) + abs(q3[3] - q0[3]) < beta30 && - abs(p0[3] - q0[3]) < tc250 && (d00 << 1) < beta20 && - (d30 << 1) < beta20); - tc_pos = __lsx_vpackev_d(cmp1, cmp0); - DUP4_ARG2(__lsx_vilvl_b, zero, q0_src, zero, q1_src, zero, q2_src, - zero, q3_src, q0_src, q1_src, q2_src, q3_src); - - flag1 = abs(p3[4] - p0[4]) + abs(q3[4] - q0[4]) < beta30 && - abs(p0[4] - q0[4]) < tc254; - flag1 = flag1 && (abs(p3[7] - p0[7]) + abs(q3[7] - q0[7]) < beta30 && - abs(p0[7] - q0[7]) < tc254 && (d04 << 1) < beta20 && - (d34 << 1) < beta20); - DUP2_ARG1(__lsx_vreplgr2vr_w, flag0, flag1, cmp0, cmp1); - cmp2 = __lsx_vpackev_w(cmp1, cmp0); - cmp2 = __lsx_vseqi_w(cmp2, 0); - - if (flag0 && flag1) { /* strong only */ - /* strong filter */ - tc_pos = __lsx_vslli_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - /* p part */ - DUP2_ARG2(__lsx_vadd_h, p1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(p3_src, p2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst0 = __lsx_vadd_h(temp2, p2_src); - - temp1 = __lsx_vadd_h(temp0, p2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, p1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst1 = __lsx_vadd_h(temp2, p1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, q1_src, - temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst2 = __lsx_vadd_h(temp2, p0_src); - - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst0, p2_src, p_is_pcm_vec, dst1, - p1_src, p_is_pcm_vec, dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, p0_src, p_is_pcm_vec); - - /* q part */ - DUP2_ARG2(__lsx_vadd_h, q1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(q3_src, q2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, q2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst5 = __lsx_vadd_h(temp2, q2_src); - - temp1 = __lsx_vadd_h(temp0, q2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, q1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst4 = __lsx_vadd_h(temp2, q1_src); - - temp0 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp0, p1_src, temp1, q2_src, - temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst3 = __lsx_vadd_h(temp2, q0_src); - - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst3, q0_src, q_is_pcm_vec, dst4, - q1_src, q_is_pcm_vec, dst3, dst4); - dst5 = __lsx_vbitsel_v(dst5, q2_src, q_is_pcm_vec); - - /* pack results to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1); - dst2 = __lsx_vpickev_b(dst5, dst4); - - /* pack src to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, p1_src, p2_src, q0_src, p0_src, - dst3, dst4); - dst5 = __lsx_vpickev_b(q2_src, q1_src); - - cmp3 = __lsx_vnor_v(cmp3, cmp3); - DUP2_ARG3(__lsx_vbitsel_v, dst0, dst3, cmp3, dst1, dst4, cmp3, - dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, dst5, cmp3); - - __lsx_vstelm_d(dst0, p2, 0, 0); - __lsx_vstelm_d(dst0, p2 + stride, 0, 1); - __lsx_vstelm_d(dst1, p2 + stride_2x, 0, 0); - __lsx_vstelm_d(dst1, p2 + stride_3x, 0, 1); - __lsx_vstelm_d(dst2, p2 + stride_4x, 0, 0); - __lsx_vstelm_d(dst2, p2 + stride_4x + stride, 0, 1); - /* strong filter ends */ - } else if (flag0 == flag1) { /* weak only */ - /* weak filter */ - tc_neg = __lsx_vneg_h(tc_pos); - DUP2_ARG2(__lsx_vsub_h, q0_src, p0_src, q1_src, p1_src, - diff0, diff1); - DUP2_ARG2(__lsx_vadd_h, __lsx_vslli_h(diff0, 3), diff0, - __lsx_vslli_h(diff1, 1), diff1, diff0, diff1); - delta0 = __lsx_vsub_h(diff0, diff1); - delta0 = __lsx_vsrari_h(delta0, 4); - temp1 = __lsx_vadd_h(__lsx_vslli_h(tc_pos, 3), - __lsx_vslli_h(tc_pos, 1)); - abs_delta0 = __lsx_vadda_h(delta0, zero); - abs_delta0 = __lsx_vsle_hu(temp1, abs_delta0); - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - - delta0 = __lsx_vclip_h(delta0, tc_neg, tc_pos); - temp2 = __lsx_vadd_h(delta0, p0_src); - temp2 = __lsx_vclip255_h(temp2); - temp0 = __lsx_vbitsel_v(temp2, p0_src, - __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec)); - temp2 = __lsx_vsub_h(q0_src, delta0); - temp2 = __lsx_vclip255_h(temp2); - temp2 = __lsx_vbitsel_v(temp2, q0_src, __lsx_vnor_v(q_is_pcm_vec, - q_is_pcm_vec)); - DUP2_ARG2(__lsx_vnor_v, p_is_pcm_vec, p_is_pcm_vec, q_is_pcm_vec, - q_is_pcm_vec, p_is_pcm_vec, q_is_pcm_vec); - - tmp = (beta + (beta >> 1)) >> 3; - DUP2_ARG1(__lsx_vreplgr2vr_d, dp00 + dp30 < tmp, dp04 + dp34 < tmp, - cmp0, cmp1); - cmp0 = __lsx_vpackev_d(cmp1, cmp0); - cmp0 = __lsx_vseqi_d(cmp0, 0); - p_is_pcm_vec = __lsx_vor_v(p_is_pcm_vec, cmp0); - - DUP2_ARG1(__lsx_vreplgr2vr_d, dq00 + dq30 < tmp, dq04 + dq34 < tmp, - cmp0, cmp1); - cmp0 = __lsx_vpackev_d(cmp1, cmp0); - cmp0 = __lsx_vseqi_d(cmp0, 0); - q_is_pcm_vec = __lsx_vor_v(q_is_pcm_vec, cmp0); - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vavgr_hu, p2_src, p0_src, q0_src, q2_src, - delta1, delta2); - DUP2_ARG2(__lsx_vsub_h, delta1, p1_src, delta2, q1_src, - delta1, delta2); - delta1 = __lsx_vadd_h(delta1, delta0); - delta2 = __lsx_vsub_h(delta2, delta0); - DUP2_ARG2(__lsx_vsrai_h, delta1, 1, delta2, 1, delta1, delta2); - DUP2_ARG3(__lsx_vclip_h, delta1, tc_neg, tc_pos, delta2, - tc_neg, tc_pos, delta1, delta2); - DUP2_ARG2(__lsx_vadd_h, p1_src, delta1, q1_src, delta2, - delta1, delta2); - DUP2_ARG1(__lsx_vclip255_h, delta1, delta2, delta1, delta2); - DUP2_ARG3(__lsx_vbitsel_v, delta1, p1_src, p_is_pcm_vec, delta2, - q1_src, q_is_pcm_vec, delta1, delta2); - - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - DUP4_ARG3(__lsx_vbitsel_v, delta1, p1_src, abs_delta0, temp0, - p0_src, abs_delta0, temp2, q0_src, abs_delta0, delta2, - q1_src, abs_delta0, dst1, dst2, dst3, dst4); - /* pack results to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, dst2, dst1, dst4, dst3, dst0, dst1); - /* pack src to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, p0_src, p1_src, q1_src, q0_src, - dst2, dst3); - cmp3 = __lsx_vnor_v(cmp3, cmp3); - DUP2_ARG3(__lsx_vbitsel_v, dst0, dst2, cmp3, dst1, dst3, cmp3, - dst0, dst1); - - p2 += stride; - __lsx_vstelm_d(dst0, p2, 0, 0); - __lsx_vstelm_d(dst0, p2 + stride, 0, 1); - __lsx_vstelm_d(dst1, p2 + stride_2x, 0, 0); - __lsx_vstelm_d(dst1, p2 + stride_3x, 0, 1); - /* weak filter ends */ - } else { /* strong + weak */ - /* strong filter */ - tc_pos = __lsx_vslli_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - /* p part */ - DUP2_ARG2(__lsx_vadd_h, p1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(p3_src, p2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst0 = __lsx_vadd_h(temp2, p2_src); - - temp1 = __lsx_vadd_h(temp0, p2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, p1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst1 = __lsx_vadd_h(temp2, p1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, q1_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst2 = __lsx_vadd_h(temp2, p0_src); - - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst0, p2_src, p_is_pcm_vec, dst1, - p1_src, p_is_pcm_vec, dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, p0_src, p_is_pcm_vec); - - /* q part */ - DUP2_ARG2(__lsx_vadd_h, q1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(q3_src, q2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, q2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst5 = __lsx_vadd_h(temp2, q2_src); - - temp1 = __lsx_vadd_h(temp0, q2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, q1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst4 = __lsx_vadd_h(temp2, q1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p1_src, temp1, q2_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst3 = __lsx_vadd_h(temp2, q0_src); - - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst3, q0_src, q_is_pcm_vec, dst4, - q1_src, q_is_pcm_vec, dst3, dst4); - dst5 = __lsx_vbitsel_v(dst5, q2_src, q_is_pcm_vec); - - /* pack strong results to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1); - dst2 = __lsx_vpickev_b(dst5, dst4); - /* strong filter ends */ - - /* weak filter */ - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vsub_h, q0_src, p0_src, q1_src, p1_src, - diff0, diff1); - DUP2_ARG2(__lsx_vadd_h, __lsx_vslli_h(diff0, 3), diff0, - __lsx_vslli_h(diff1, 1), diff1, diff0, diff1); - delta0 = __lsx_vsub_h(diff0, diff1); - delta0 = __lsx_vsrari_h(delta0, 4); - temp1 = __lsx_vadd_h(__lsx_vslli_h(tc_pos, 3), - __lsx_vslli_h(tc_pos, 1)); - abs_delta0 = __lsx_vadda_h(delta0, zero); - abs_delta0 = __lsx_vsle_hu(temp1, abs_delta0); - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - - delta0 = __lsx_vclip_h(delta0, tc_neg, tc_pos); - temp2 = __lsx_vadd_h(delta0, p0_src); - temp2 = __lsx_vclip255_h(temp2); - temp0 = __lsx_vbitsel_v(temp2, p0_src, p_is_pcm_vec); - - temp2 = __lsx_vsub_h(q0_src, delta0); - temp2 = __lsx_vclip255_h(temp2); - temp2 = __lsx_vbitsel_v(temp2, q0_src, q_is_pcm_vec); - - tmp = (beta + (beta >> 1)) >> 3; - DUP2_ARG1(__lsx_vreplgr2vr_d, dp00 + dp30 < tmp, dp04 + dp34 < tmp, - cmp0, cmp1); - cmp0 = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vor_v(p_is_pcm_vec, __lsx_vseqi_d(cmp0, 0)); - DUP2_ARG1(__lsx_vreplgr2vr_d, dq00 + dq30 < tmp, dq04 + dq34 < tmp, - cmp0, cmp1); - cmp0 = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vor_v(q_is_pcm_vec, __lsx_vseqi_d(cmp0, 0)); - - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vavgr_hu, p2_src, p0_src, q0_src, q2_src, - delta1, delta2); - DUP2_ARG2(__lsx_vsub_h, delta1, p1_src, delta2, q1_src, - delta1, delta2); - delta1 = __lsx_vadd_h(delta1, delta0); - delta2 = __lsx_vsub_h(delta2, delta0); - DUP2_ARG2(__lsx_vsrai_h, delta1, 1, delta2, 1, delta1, delta2); - DUP2_ARG3(__lsx_vclip_h, delta1, tc_neg, tc_pos, delta2, tc_neg, - tc_pos, delta1, delta2); - DUP2_ARG2(__lsx_vadd_h, p1_src, delta1, q1_src, delta2, - delta1, delta2); - DUP2_ARG1(__lsx_vclip255_h, delta1, delta2, delta1, delta2); - DUP2_ARG3(__lsx_vbitsel_v, delta1, p1_src, p_is_pcm_vec, delta2, - q1_src, q_is_pcm_vec, delta1, delta2); - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - DUP4_ARG3(__lsx_vbitsel_v, delta1, p1_src, abs_delta0, delta2, - q1_src, abs_delta0, temp0, p0_src, abs_delta0, temp2, - q0_src, abs_delta0, delta1, delta2, temp0, temp2); - /* weak filter ends */ - - /* pack weak results to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, delta1, p2_src, temp2, temp0, - dst3, dst4); - dst5 = __lsx_vpickev_b(q2_src, delta2); - - /* select between weak or strong */ - DUP2_ARG3(__lsx_vbitsel_v, dst0, dst3, cmp2, dst1, dst4, cmp2, - dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, dst5, cmp2); - - /* pack src to 8 bit */ - DUP2_ARG2(__lsx_vpickev_b, p1_src, p2_src, q0_src, p0_src, - dst3, dst4); - dst5 = __lsx_vpickev_b(q2_src, q1_src); - - cmp3 = __lsx_vnor_v(cmp3, cmp3); - DUP2_ARG3(__lsx_vbitsel_v, dst0, dst3, cmp3, dst1, dst4, cmp3, - dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, dst5, cmp3); - - __lsx_vstelm_d(dst0, p2, 0, 0); - __lsx_vstelm_d(dst0, p2 + stride, 0, 1); - __lsx_vstelm_d(dst1, p2 + stride_2x, 0, 0); - __lsx_vstelm_d(dst1, p2 + stride_3x, 0, 1); - __lsx_vstelm_d(dst2, p2 + stride_4x, 0, 0); - __lsx_vstelm_d(dst2, p2 + stride_4x + stride, 0, 1); - } - } -} - -void ff_hevc_loop_filter_luma_v_8_lsx(uint8_t *src, ptrdiff_t stride, - int32_t beta, const int32_t *tc, - const uint8_t *p_is_pcm, const uint8_t *q_is_pcm) -{ - ptrdiff_t stride_2x = (stride << 1); - ptrdiff_t stride_4x = (stride << 2); - ptrdiff_t stride_3x = stride_2x + stride; - uint8_t *p3 = src; - uint8_t *p2 = src + stride_3x; - uint8_t *p1 = src + stride_4x; - uint8_t *p0 = src + stride_4x + stride_3x; - uint8_t flag0, flag1; - int32_t dp00, dq00, dp30, dq30, d00, d30; - int32_t d0030, d0434; - int32_t dp04, dq04, dp34, dq34, d04, d34; - int32_t tc0, p_is_pcm0, q_is_pcm0, beta30, beta20, tc250; - int32_t tc4, p_is_pcm4, q_is_pcm4, tc254, tmp; - - __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - __m128i cmp0, cmp1, cmp2, p_is_pcm_vec, q_is_pcm_vec; - __m128i cmp3; - __m128i temp0, temp1; - __m128i temp2; - __m128i tc_pos, tc_neg; - __m128i diff0, diff1, delta0, delta1, delta2, abs_delta0; - __m128i zero = {0}; - __m128i p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, q2_src, q3_src; - - dp00 = abs(p3[-3] - (p3[-2] << 1) + p3[-1]); - dq00 = abs(p3[2] - (p3[1] << 1) + p3[0]); - dp30 = abs(p2[-3] - (p2[-2] << 1) + p2[-1]); - dq30 = abs(p2[2] - (p2[1] << 1) + p2[0]); - d00 = dp00 + dq00; - d30 = dp30 + dq30; - p_is_pcm0 = p_is_pcm[0]; - q_is_pcm0 = q_is_pcm[0]; - - dp04 = abs(p1[-3] - (p1[-2] << 1) + p1[-1]); - dq04 = abs(p1[2] - (p1[1] << 1) + p1[0]); - dp34 = abs(p0[-3] - (p0[-2] << 1) + p0[-1]); - dq34 = abs(p0[2] - (p0[1] << 1) + p0[0]); - d04 = dp04 + dq04; - d34 = dp34 + dq34; - p_is_pcm4 = p_is_pcm[1]; - q_is_pcm4 = q_is_pcm[1]; - - DUP2_ARG1(__lsx_vreplgr2vr_d, p_is_pcm0, p_is_pcm4, cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - - d0030 = (d00 + d30) >= beta; - d0434 = (d04 + d34) >= beta; - - DUP2_ARG1(__lsx_vreplgr2vr_d, d0030, d0434, cmp0, cmp1); - cmp3 = __lsx_vpackev_d(cmp1, cmp0); - cmp3 = __lsx_vseqi_d(cmp3, 0); - - if ((!p_is_pcm0 || !p_is_pcm4 || !q_is_pcm0 || !q_is_pcm4) && - (!d0030 || !d0434)) { - src -= 4; - DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride_2x, 0, - src + stride_3x, 0, p3_src, p2_src, p1_src, p0_src); - src += stride_4x; - DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride_2x, 0, - src + stride_3x, 0, q0_src, q1_src, q2_src, q3_src); - src -= stride_4x; - - DUP2_ARG1(__lsx_vreplgr2vr_d, q_is_pcm0, q_is_pcm4, cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - - tc0 = tc[0]; - beta30 = beta >> 3; - beta20 = beta >> 2; - tc250 = (((tc0 << 2) + tc0 + 1) >> 1); - tc4 = tc[1]; - tc254 = (((tc4 << 2) + tc4 + 1) >> 1); - DUP2_ARG1( __lsx_vreplgr2vr_h, tc0 << 1, tc4 << 1, cmp0, cmp1); - tc_pos = __lsx_vpackev_d(cmp1, cmp0); - LSX_TRANSPOSE8x8_B(p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, - q2_src, q3_src, p3_src, p2_src, p1_src, p0_src, - q0_src, q1_src, q2_src, q3_src); - - flag0 = abs(p3[-4] - p3[-1]) + abs(p3[3] - p3[0]) < beta30 && - abs(p3[-1] - p3[0]) < tc250; - flag0 = flag0 && (abs(p2[-4] - p2[-1]) + abs(p2[3] - p2[0]) < beta30 && - abs(p2[-1] - p2[0]) < tc250 && (d00 << 1) < beta20 && - (d30 << 1) < beta20); - cmp0 = __lsx_vreplgr2vr_d(flag0); - DUP4_ARG2(__lsx_vilvl_b, zero, p3_src, zero, p2_src, zero, p1_src, zero, - p0_src, p3_src, p2_src, p1_src, p0_src); - - flag1 = abs(p1[-4] - p1[-1]) + abs(p1[3] - p1[0]) < beta30 && - abs(p1[-1] - p1[0]) < tc254; - flag1 = flag1 && (abs(p0[-4] - p0[-1]) + abs(p0[3] - p0[0]) < beta30 && - abs(p0[-1] - p0[0]) < tc254 && (d04 << 1) < beta20 && - (d34 << 1) < beta20); - DUP4_ARG2(__lsx_vilvl_b, zero, q0_src, zero, q1_src, zero, q2_src, zero, - q3_src, q0_src, q1_src, q2_src, q3_src); - - cmp1 = __lsx_vreplgr2vr_d(flag1); - cmp2 = __lsx_vpackev_d(cmp1, cmp0); - cmp2 = __lsx_vseqi_d(cmp2, 0); - - if (flag0 && flag1) { /* strong only */ - /* strong filter */ - tc_neg = __lsx_vneg_h(tc_pos); - /* p part */ - DUP2_ARG2(__lsx_vadd_h, p1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(p3_src, p2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst0 = __lsx_vadd_h(temp2, p2_src); - - temp1 = __lsx_vadd_h(temp0, p2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, p1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst1 = __lsx_vadd_h(temp2, p1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, q1_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst2 = __lsx_vadd_h(temp2, p0_src); - - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst0, p2_src, p_is_pcm_vec, dst1, p1_src, - p_is_pcm_vec, dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, p0_src, p_is_pcm_vec); - - /* q part */ - DUP2_ARG2(__lsx_vadd_h, q1_src, p0_src, temp0, q0_src, - temp0, temp0); - temp1 = __lsx_vadd_h(q3_src, q2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, q2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst5 = __lsx_vadd_h(temp2, q2_src); - - temp1 = __lsx_vadd_h(temp0, q2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, q1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst4 = __lsx_vadd_h(temp2, q1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p1_src, temp1, q2_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst3 = __lsx_vadd_h(temp2, q0_src); - - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst3, q0_src, q_is_pcm_vec, dst4, q1_src, - q_is_pcm_vec, dst3, dst4); - dst5 = __lsx_vbitsel_v(dst5, q2_src, q_is_pcm_vec); - /* strong filter ends */ - } else if (flag0 == flag1) { /* weak only */ - /* weak filter */ - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vsub_h, q0_src, p0_src, q1_src, p1_src, - diff0, diff1); - DUP2_ARG2(__lsx_vadd_h, __lsx_vslli_h(diff0, 3), diff0, - __lsx_vslli_h(diff1, 1), diff1, diff0, diff1); - delta0 = __lsx_vsub_h(diff0, diff1); - delta0 = __lsx_vsrari_h(delta0, 4); - temp1 = __lsx_vadd_h(__lsx_vslli_h(tc_pos, 3), - __lsx_vslli_h(tc_pos, 1)); - abs_delta0 = __lsx_vadda_h(delta0, zero); - abs_delta0 = __lsx_vsle_hu(temp1, abs_delta0); - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - - delta0 = __lsx_vclip_h(delta0, tc_neg, tc_pos); - temp2 = __lsx_vadd_h(delta0, p0_src); - temp2 = __lsx_vclip255_h(temp2); - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - temp0 = __lsx_vbitsel_v(temp2, p0_src, p_is_pcm_vec); - - temp2 = __lsx_vsub_h(q0_src, delta0); - temp2 = __lsx_vclip255_h(temp2); - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - temp2 = __lsx_vbitsel_v(temp2, q0_src, q_is_pcm_vec); - - tmp = ((beta + (beta >> 1)) >> 3); - DUP2_ARG1(__lsx_vreplgr2vr_d, !p_is_pcm0 && ((dp00 + dp30) < tmp), - !p_is_pcm4 && ((dp04 + dp34) < tmp), cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - - DUP2_ARG1(__lsx_vreplgr2vr_h, (!q_is_pcm0) && (dq00 + dq30 < tmp), - (!q_is_pcm4) && (dq04 + dq34 < tmp), cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vavgr_hu, p2_src, p0_src, q0_src, q2_src, - delta1, delta2); - DUP2_ARG2(__lsx_vsub_h, delta1, p1_src, delta2, q1_src, - delta1, delta2); - delta1 = __lsx_vadd_h(delta1, delta0); - delta2 = __lsx_vsub_h(delta2, delta0); - DUP2_ARG2(__lsx_vsrai_h, delta1, 1, delta2, 1, delta1, delta2); - DUP2_ARG3(__lsx_vclip_h, delta1, tc_neg, tc_pos, delta2, tc_neg, - tc_pos, delta1, delta2); - DUP2_ARG2(__lsx_vadd_h, p1_src, delta1, q1_src, delta2, - delta1, delta2); - DUP2_ARG1(__lsx_vclip255_h, delta1, delta2, delta1, delta2); - DUP2_ARG3(__lsx_vbitsel_v, delta1, p1_src, p_is_pcm_vec, delta2, - q1_src, q_is_pcm_vec, delta1, delta2); - - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - DUP4_ARG3(__lsx_vbitsel_v, delta1, p1_src, abs_delta0, temp0, - p0_src, abs_delta0, temp2, q0_src, abs_delta0, delta2, - q1_src, abs_delta0, dst0, dst1, dst2, dst3); - /* weak filter ends */ - - cmp3 = __lsx_vnor_v(cmp3, cmp3); - DUP4_ARG3(__lsx_vbitsel_v, dst0, p1_src, cmp3, dst1, p0_src, - cmp3, dst2, q0_src, cmp3, dst3, q1_src, cmp3, - dst0, dst1, dst2, dst3); - DUP2_ARG2(__lsx_vpickev_b, dst2, dst0, dst3, dst1, dst0, dst1); - - /* transpose */ - dst4 = __lsx_vilvl_b(dst1, dst0); - dst5 = __lsx_vilvh_b(dst1, dst0); - dst0 = __lsx_vilvl_h(dst5, dst4); - dst1 = __lsx_vilvh_h(dst5, dst4); - - src += 2; - __lsx_vstelm_w(dst0, src, 0, 0); - __lsx_vstelm_w(dst0, src + stride, 0, 1); - __lsx_vstelm_w(dst0, src + stride_2x, 0, 2); - __lsx_vstelm_w(dst0, src + stride_3x, 0, 3); - src += stride_4x; - __lsx_vstelm_w(dst1, src, 0, 0); - __lsx_vstelm_w(dst1, src + stride, 0, 1); - __lsx_vstelm_w(dst1, src + stride_2x, 0, 2); - __lsx_vstelm_w(dst1, src + stride_3x, 0, 3); - return; - } else { /* strong + weak */ - /* strong filter */ - tc_neg = __lsx_vneg_h(tc_pos); - - /* p part */ - DUP2_ARG2(__lsx_vadd_h, p1_src, p0_src, temp0, q0_src, - temp0, temp0); - - temp1 = __lsx_vadd_h(p3_src, p2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst0 = __lsx_vadd_h(temp2, p2_src); - - temp1 = __lsx_vadd_h(temp0, p2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, p1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst1 = __lsx_vadd_h(temp2, p1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p2_src, temp1, q1_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, p0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst2 = __lsx_vadd_h(temp2, p0_src); - - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst0, p2_src, p_is_pcm_vec, dst1, p1_src, - p_is_pcm_vec, dst0, dst1); - dst2 = __lsx_vbitsel_v(dst2, p0_src, p_is_pcm_vec); - - /* q part */ - DUP2_ARG2(__lsx_vadd_h, q1_src, p0_src, temp0, q0_src, temp0, temp0); - temp1 = __lsx_vadd_h(q3_src, q2_src); - temp1 = __lsx_vslli_h(temp1, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, q2_src, temp1, temp0, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q2_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst5 = __lsx_vadd_h(temp2, q2_src); - - temp1 = __lsx_vadd_h(temp0, q2_src); - temp1 = __lsx_vsrari_h(temp1, 2); - temp2 = __lsx_vsub_h(temp1, q1_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst4 = __lsx_vadd_h(temp2, q1_src); - - temp1 = __lsx_vslli_h(temp0, 1); - DUP2_ARG2(__lsx_vadd_h, temp1, p1_src, temp1, q2_src, temp1, temp1); - temp1 = __lsx_vsrari_h(temp1, 3); - temp2 = __lsx_vsub_h(temp1, q0_src); - temp2 = __lsx_vclip_h(temp2, tc_neg, tc_pos); - dst3 = __lsx_vadd_h(temp2, q0_src); - - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, dst3, q0_src, q_is_pcm_vec, dst4, q1_src, - q_is_pcm_vec, dst3, dst4); - dst5 = __lsx_vbitsel_v(dst5, q2_src, q_is_pcm_vec); - /* strong filter ends */ - - /* weak filter */ - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vsub_h, q0_src, p0_src, q1_src, p1_src, - diff0, diff1); - DUP2_ARG2(__lsx_vadd_h, __lsx_vslli_h(diff0, 3), diff0, - __lsx_vslli_h(diff1, 1), diff1, diff0, diff1); - delta0 = __lsx_vsub_h(diff0, diff1); - delta0 = __lsx_vsrari_h(delta0, 4); - - temp1 = __lsx_vadd_h(__lsx_vslli_h(tc_pos, 3), - __lsx_vslli_h(tc_pos, 1)); - abs_delta0 = __lsx_vadda_h(delta0, zero); - abs_delta0 = __lsx_vsle_hu(temp1, abs_delta0); - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - delta0 = __lsx_vclip_h(delta0, tc_neg, tc_pos); - temp2 = __lsx_vadd_h(delta0, p0_src); - temp2 = __lsx_vclip255_h(temp2); - temp0 = __lsx_vbitsel_v(temp2, p0_src, p_is_pcm_vec); - temp2 = __lsx_vsub_h(q0_src, delta0); - temp2 = __lsx_vclip255_h(temp2); - temp2 = __lsx_vbitsel_v(temp2, q0_src, q_is_pcm_vec); - - tmp = (beta + (beta >> 1)) >> 3; - DUP2_ARG1(__lsx_vreplgr2vr_d, !p_is_pcm0 && ((dp00 + dp30) < tmp), - !p_is_pcm4 && ((dp04 + dp34) < tmp), cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - - DUP2_ARG1(__lsx_vreplgr2vr_h, (!q_is_pcm0) && (dq00 + dq30 < tmp), - (!q_is_pcm4) && (dq04 + dq34 < tmp), cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - tc_pos = __lsx_vsrai_h(tc_pos, 1); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG2(__lsx_vavgr_hu, p2_src, p0_src, q0_src, q2_src, - delta1, delta2); - DUP2_ARG2(__lsx_vsub_h, delta1, p1_src, delta2, q1_src, - delta1, delta2); - delta1 = __lsx_vadd_h(delta1, delta0); - delta2 = __lsx_vsub_h(delta2, delta0); - DUP2_ARG2(__lsx_vsrai_h, delta1, 1, delta2, 1, delta1, delta2); - DUP2_ARG3(__lsx_vclip_h, delta1, tc_neg, tc_pos, delta2, tc_neg, - tc_pos, delta1, delta2); - DUP2_ARG2(__lsx_vadd_h, p1_src, delta1, q1_src, delta2, - delta1, delta2); - DUP2_ARG1(__lsx_vclip255_h, delta1, delta2, delta1, delta2); - DUP2_ARG3(__lsx_vbitsel_v, delta1, p1_src, p_is_pcm_vec, delta2, - q1_src, q_is_pcm_vec, delta1, delta2); - - abs_delta0 = __lsx_vnor_v(abs_delta0, abs_delta0); - DUP4_ARG3(__lsx_vbitsel_v, delta1, p1_src, abs_delta0, delta2, - q1_src, abs_delta0, temp0, p0_src, abs_delta0, temp2, - q0_src, abs_delta0, delta1, delta2, temp0, temp2); - /* weak filter ends*/ - - /* select between weak or strong */ - DUP4_ARG3(__lsx_vbitsel_v, dst0, p2_src, cmp2, dst1, delta1, - cmp2, dst2, temp0, cmp2, dst3, temp2, cmp2, - dst0, dst1, dst2, dst3); - DUP2_ARG3(__lsx_vbitsel_v, dst4, delta2, cmp2, dst5, q2_src, cmp2, - dst4, dst5); - } - - cmp3 = __lsx_vnor_v(cmp3, cmp3); - DUP4_ARG3(__lsx_vbitsel_v, dst0, p2_src, cmp3, dst1, p1_src, cmp3, dst2, - p0_src, cmp3, dst3, q0_src, cmp3, dst0, dst1, dst2, dst3); - DUP2_ARG3(__lsx_vbitsel_v, dst4, q1_src, cmp3, dst5, q2_src, cmp3, - dst4, dst5); - - /* pack results to 8 bit */ - DUP4_ARG2(__lsx_vpickev_b, dst2, dst0, dst3, dst1, dst4, dst4, dst5, - dst5, dst0, dst1, dst2, dst3); - - /* transpose */ - DUP2_ARG2(__lsx_vilvl_b, dst1, dst0, dst3, dst2, dst4, dst6); - DUP2_ARG2(__lsx_vilvh_b, dst1, dst0, dst3, dst2, dst5, dst7); - DUP2_ARG2(__lsx_vilvl_h, dst5, dst4, dst7, dst6, dst0, dst2); - DUP2_ARG2(__lsx_vilvh_h, dst5, dst4, dst7, dst6, dst1, dst3); - - src += 1; - __lsx_vstelm_w(dst0, src, 0, 0); - __lsx_vstelm_h(dst2, src, 4, 0); - src += stride; - __lsx_vstelm_w(dst0, src, 0, 1); - __lsx_vstelm_h(dst2, src, 4, 2); - src += stride; - - __lsx_vstelm_w(dst0, src, 0, 2); - __lsx_vstelm_h(dst2, src, 4, 4); - src += stride; - __lsx_vstelm_w(dst0, src, 0, 3); - __lsx_vstelm_h(dst2, src, 4, 6); - src += stride; - - __lsx_vstelm_w(dst1, src, 0, 0); - __lsx_vstelm_h(dst3, src, 4, 0); - src += stride; - __lsx_vstelm_w(dst1, src, 0, 1); - __lsx_vstelm_h(dst3, src, 4, 2); - src += stride; - - __lsx_vstelm_w(dst1, src, 0, 2); - __lsx_vstelm_h(dst3, src, 4, 4); - src += stride; - __lsx_vstelm_w(dst1, src, 0, 3); - __lsx_vstelm_h(dst3, src, 4, 6); - } -} - -void ff_hevc_loop_filter_chroma_h_8_lsx(uint8_t *src, ptrdiff_t stride, - const int32_t *tc, const uint8_t *p_is_pcm, - const uint8_t *q_is_pcm) -{ - uint8_t *p1_ptr = src - (stride << 1); - uint8_t *p0_ptr = src - stride; - uint8_t *q0_ptr = src; - uint8_t *q1_ptr = src + stride; - __m128i cmp0, cmp1, p_is_pcm_vec, q_is_pcm_vec; - __m128i p1, p0, q0, q1; - __m128i tc_pos, tc_neg; - __m128i zero = {0}; - __m128i temp0, temp1, delta; - - if (!(tc[0] <= 0) || !(tc[1] <= 0)) { - DUP2_ARG1(__lsx_vreplgr2vr_h, tc[0], tc[1], cmp0, cmp1); - tc_pos = __lsx_vpackev_d(cmp1, cmp0); - tc_neg = __lsx_vneg_h(tc_pos); - DUP2_ARG1(__lsx_vreplgr2vr_d, p_is_pcm[0], p_is_pcm[1], cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - - DUP2_ARG1(__lsx_vreplgr2vr_d, q_is_pcm[0], q_is_pcm[1], cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - - DUP4_ARG2(__lsx_vld, p1_ptr, 0, p0_ptr, 0, q0_ptr, 0, q1_ptr, 0, - p1, p0, q0, q1); - DUP4_ARG2(__lsx_vilvl_b, zero, p1, zero, p0, zero, q0, zero, q1, - p1, p0, q0, q1); - DUP2_ARG2(__lsx_vsub_h, q0, p0, p1, q1, temp0, temp1); - temp0 = __lsx_vslli_h(temp0, 2); - temp0 = __lsx_vadd_h(temp0, temp1); - delta = __lsx_vsrari_h(temp0, 3); - delta = __lsx_vclip_h(delta, tc_neg, tc_pos); - temp0 = __lsx_vadd_h(p0, delta); - temp0 = __lsx_vclip255_h(temp0); - p_is_pcm_vec = __lsx_vnor_v(p_is_pcm_vec, p_is_pcm_vec); - temp0 = __lsx_vbitsel_v(temp0, p0, p_is_pcm_vec); - - temp1 = __lsx_vsub_h(q0, delta); - temp1 = __lsx_vclip255_h(temp1); - q_is_pcm_vec = __lsx_vnor_v(q_is_pcm_vec, q_is_pcm_vec); - temp1 = __lsx_vbitsel_v(temp1, q0, q_is_pcm_vec); - - tc_pos = __lsx_vslei_d(tc_pos, 0); - DUP2_ARG3(__lsx_vbitsel_v, temp0, p0, tc_pos, temp1, q0, tc_pos, - temp0, temp1); - temp0 = __lsx_vpickev_b(temp1, temp0); - __lsx_vstelm_d(temp0, p0_ptr, 0, 0); - __lsx_vstelm_d(temp0, p0_ptr + stride, 0, 1); - } -} - -void ff_hevc_loop_filter_chroma_v_8_lsx(uint8_t *src, ptrdiff_t stride, - const int32_t *tc, const uint8_t *p_is_pcm, - const uint8_t *q_is_pcm) -{ - ptrdiff_t stride_2x = (stride << 1); - ptrdiff_t stride_4x = (stride << 2); - ptrdiff_t stride_3x = stride_2x + stride; - __m128i cmp0, cmp1, p_is_pcm_vec, q_is_pcm_vec; - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i p1, p0, q0, q1; - __m128i tc_pos, tc_neg; - __m128i zero = {0}; - __m128i temp0, temp1, delta; - - if (!(tc[0] <= 0) || !(tc[1] <= 0)) { - DUP2_ARG1(__lsx_vreplgr2vr_h, tc[0], tc[1], cmp0, cmp1); - tc_pos = __lsx_vpackev_d(cmp1, cmp0); - tc_neg = __lsx_vneg_h(tc_pos); - - DUP2_ARG1(__lsx_vreplgr2vr_d, p_is_pcm[0], p_is_pcm[1], cmp0, cmp1); - p_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - p_is_pcm_vec = __lsx_vseqi_d(p_is_pcm_vec, 0); - DUP2_ARG1(__lsx_vreplgr2vr_d, q_is_pcm[0], q_is_pcm[1], cmp0, cmp1); - q_is_pcm_vec = __lsx_vpackev_d(cmp1, cmp0); - q_is_pcm_vec = __lsx_vseqi_d(q_is_pcm_vec, 0); - - src -= 2; - DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride_2x, 0, - src + stride_3x, 0, src0, src1, src2, src3); - src += stride_4x; - DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride_2x, 0, - src + stride_3x, 0, src4, src5, src6, src7); - src -= stride_4x; - LSX_TRANSPOSE8x4_B(src0, src1, src2, src3, src4, src5, src6, src7, - p1, p0, q0, q1); - DUP4_ARG2(__lsx_vilvl_b, zero, p1, zero, p0, zero, q0, zero, q1, - p1, p0, q0, q1); - - DUP2_ARG2(__lsx_vsub_h, q0, p0, p1, q1, temp0, temp1); - temp0 = __lsx_vslli_h(temp0, 2); - temp0 = __lsx_vadd_h(temp0, temp1); - delta = __lsx_vsrari_h(temp0, 3); - delta = __lsx_vclip_h(delta, tc_neg, tc_pos); - - temp0 = __lsx_vadd_h(p0, delta); - temp1 = __lsx_vsub_h(q0, delta); - DUP2_ARG1(__lsx_vclip255_h, temp0, temp1, temp0, temp1); - DUP2_ARG2(__lsx_vnor_v, p_is_pcm_vec, p_is_pcm_vec, q_is_pcm_vec, - q_is_pcm_vec, p_is_pcm_vec, q_is_pcm_vec); - DUP2_ARG3(__lsx_vbitsel_v, temp0, p0, p_is_pcm_vec, temp1, q0, - q_is_pcm_vec, temp0, temp1); - - tc_pos = __lsx_vslei_d(tc_pos, 0); - DUP2_ARG3(__lsx_vbitsel_v, temp0, p0, tc_pos, temp1, q0, tc_pos, - temp0, temp1); - temp0 = __lsx_vpackev_b(temp1, temp0); - - src += 1; - __lsx_vstelm_h(temp0, src, 0, 0); - __lsx_vstelm_h(temp0, src + stride, 0, 1); - __lsx_vstelm_h(temp0, src + stride_2x, 0, 2); - __lsx_vstelm_h(temp0, src + stride_3x, 0, 3); - src += stride_4x; - __lsx_vstelm_h(temp0, src, 0, 4); - __lsx_vstelm_h(temp0, src + stride, 0, 5); - __lsx_vstelm_h(temp0, src + stride_2x, 0, 6); - __lsx_vstelm_h(temp0, src + stride_3x, 0, 7); - src -= stride_4x; - } -} - -static void hevc_sao_edge_filter_0degree_4width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i cmp_minus10, cmp_minus11, diff_minus10, diff_minus11; - __m128i sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i src_minus10, src_minus11, src_plus10, offset, src0, dst0; - __m128i const1 = __lsx_vldi(1); - __m128i zero = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src -= 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src_minus10, src_minus11); - - for (height -= 2; height; height -= 2) { - src += src_stride_2x; - src_minus10 = __lsx_vpickev_d(src_minus11, src_minus10); - src0 = __lsx_vshuf_b(zero, src_minus10, shuf1); - src_plus10 = __lsx_vshuf_b(zero, src_minus10, shuf2); - - DUP2_ARG2(__lsx_vseq_b, src0, src_minus10, src0, src_plus10, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src0, src_minus10, src0, src_plus10, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - offset = __lsx_vadd_b(diff_minus10, diff_minus11); - offset = __lsx_vaddi_bu(offset, 2); - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, - src_minus10, src_minus11); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, - sao_offset, sao_offset, offset, offset, offset); - src0 = __lsx_vxori_b(src0, 128); - dst0 = __lsx_vsadd_b(src0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); - dst += dst_stride_2x; - } - - src_minus10 = __lsx_vpickev_d(src_minus11, src_minus10); - src0 = __lsx_vshuf_b(zero, src_minus10, shuf1); - src_plus10 = __lsx_vshuf_b(zero, src_minus10, shuf2); - - DUP2_ARG2(__lsx_vseq_b, src0, src_minus10, src0, src_plus10, cmp_minus10, - cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src0, src_minus10, src0, src_plus10, cmp_minus10, - cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - offset = __lsx_vadd_b(diff_minus10, diff_minus11); - offset = __lsx_vaddi_bu(offset, 2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, sao_offset, - offset, offset, offset); - src0 = __lsx_vxori_b(src0, 128); - dst0 = __lsx_vsadd_b(src0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); -} - -static void hevc_sao_edge_filter_0degree_8width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i cmp_minus10, cmp_minus11, diff_minus10, diff_minus11; - __m128i src0, src1, dst0, src_minus10, src_minus11, src_plus10, src_plus11; - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i zeros = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src -= 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src_minus10, src_minus11); - - for (height -= 2; height; height -= 2) { - src += src_stride_2x; - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf1, zeros, - src_minus11, shuf1, src0, src1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, - src_minus11, shuf2, src_plus10, src_plus11); - DUP2_ARG2(__lsx_vpickev_d, src_minus11, src_minus10, src_plus11, - src_plus10, src_minus10, src_plus10); - src0 = __lsx_vpickev_d(src1, src0); - - DUP2_ARG2(__lsx_vseq_b, src0, src_minus10, src0, src_plus10, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src0, src_minus10, src0, src_plus10, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - offset = __lsx_vadd_b(diff_minus10, diff_minus11); - offset = __lsx_vaddi_bu(offset, 2); - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, - src_minus10, src_minus11); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - src0 = __lsx_vxori_b(src0, 128); - dst0 = __lsx_vsadd_b(src0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); - dst += dst_stride_2x; - } - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf1, zeros, src_minus11, - shuf1, src0, src1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, src_minus11, - shuf2, src_plus10, src_plus11); - DUP2_ARG2(__lsx_vpickev_d, src_minus11, src_minus10, src_plus11, - src_plus10, src_minus10, src_plus10); - src0 = __lsx_vpickev_d(src1, src0); - - DUP2_ARG2(__lsx_vseq_b, src0, src_minus10, src0, src_plus10, cmp_minus10, - cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src0, src_minus10, src0, src_plus10, cmp_minus10, - cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - offset = __lsx_vadd_b(diff_minus10, diff_minus11); - offset = __lsx_vaddi_bu(offset, 2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - src0 = __lsx_vxori_b(src0, 128); - dst0 = __lsx_vsadd_b(src0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); -} - -static void hevc_sao_edge_filter_0degree_16multiple_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t width, - int32_t height) -{ - uint8_t *dst_ptr; - const uint8_t *src_minus1; - int32_t v_cnt; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - const int32_t src_stride_4x = (src_stride << 2); - const int32_t dst_stride_4x = (dst_stride << 2); - const int32_t src_stride_3x = src_stride_2x + src_stride; - const int32_t dst_stride_3x = dst_stride_2x + dst_stride; - - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i sao_offset; - __m128i cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - __m128i cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - __m128i diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - __m128i diff_plus13; - __m128i src10, src11, src12, src13, dst0, dst1, dst2, dst3; - __m128i src_minus10, src_minus11, src_minus12, src_minus13; - __m128i offset_mask0, offset_mask1, offset_mask2, offset_mask3; - __m128i src_zero0, src_zero1, src_zero2, src_zero3; - __m128i src_plus10, src_plus11, src_plus12, src_plus13; - - sao_offset = __lsx_vld(sao_offset_val, 0); - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_minus1 = src - 1; - src_minus10 = __lsx_vld(src_minus1, 0); - DUP2_ARG2(__lsx_vldx, src_minus1, src_stride, src_minus1, - src_stride_2x, src_minus11, src_minus12); - src_minus13 = __lsx_vldx(src_minus1, src_stride_3x); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus1 += 16; - dst_ptr = dst + v_cnt; - src10 = __lsx_vld(src_minus1, 0); - DUP2_ARG2(__lsx_vldx, src_minus1, src_stride, src_minus1, - src_stride_2x, src11, src12); - src13 = __lsx_vldx(src_minus1, src_stride_3x); - DUP4_ARG3(__lsx_vshuf_b, src10, src_minus10, shuf1, src11, - src_minus11, shuf1, src12, src_minus12, shuf1, src13, - src_minus13, shuf1, src_zero0, src_zero1, - src_zero2, src_zero3); - DUP4_ARG3(__lsx_vshuf_b, src10, src_minus10, shuf2, src11, - src_minus11, shuf2, src12, src_minus12, shuf2, src13, - src_minus13, shuf2, src_plus10, src_plus11, - src_plus12, src_plus13); - DUP4_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, src_plus11, - cmp_minus10, cmp_plus10, cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vseq_b, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, src_plus13, - cmp_minus12, cmp_plus12, cmp_minus13, cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, diff_minus10, diff_plus10, diff_minus11, - diff_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, diff_minus12, diff_plus12, diff_minus13, - diff_plus13); - DUP4_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, src_plus11, - cmp_minus10, cmp_plus10, cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vsle_bu, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, src_plus13, - cmp_minus12, cmp_plus12, cmp_minus13, cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_plus10, const1, cmp_plus10, diff_minus11, const1, - cmp_minus11, diff_plus11, const1, cmp_plus11, - diff_minus10, diff_plus10, diff_minus11, diff_plus11); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus12, const1, cmp_minus12, - diff_plus12, const1, cmp_plus12, diff_minus13, const1, - cmp_minus13, diff_plus13, const1, cmp_plus13, - diff_minus12, diff_plus12, diff_minus13, diff_plus13); - - DUP4_ARG2(__lsx_vadd_b, diff_minus10, diff_plus10, diff_minus11, - diff_plus11, diff_minus12, diff_plus12, diff_minus13, - diff_plus13, offset_mask0, offset_mask1, offset_mask2, - offset_mask3); - DUP4_ARG2(__lsx_vaddi_bu, offset_mask0, 2, offset_mask1, 2, - offset_mask2, 2, offset_mask3, 2, offset_mask0, - offset_mask1, offset_mask2, offset_mask3); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask0, - sao_offset, sao_offset, offset_mask0, offset_mask0, - offset_mask0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask1, - sao_offset, sao_offset, offset_mask1, offset_mask1, - offset_mask1); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask2, - sao_offset, sao_offset, offset_mask2, offset_mask2, - offset_mask2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask3, - sao_offset, sao_offset, offset_mask3, offset_mask3, - offset_mask3); - - DUP4_ARG2(__lsx_vxori_b, src_zero0, 128, src_zero1, 128, - src_zero2, 128, src_zero3, 128, src_zero0, src_zero1, - src_zero2, src_zero3); - DUP4_ARG2(__lsx_vsadd_b, src_zero0, offset_mask0, src_zero1, - offset_mask1, src_zero2, offset_mask2, src_zero3, - offset_mask3, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vxori_b, dst0, 128, dst1, 128, dst2, 128, dst3, - 128, dst0, dst1, dst2, dst3); - - src_minus10 = src10; - src_minus11 = src11; - src_minus12 = src12; - src_minus13 = src13; - - __lsx_vst(dst0, dst_ptr, 0); - __lsx_vst(dst1, dst_ptr + dst_stride, 0); - __lsx_vst(dst2, dst_ptr + dst_stride_2x, 0); - __lsx_vst(dst3, dst_ptr + dst_stride_3x, 0); - } - src += src_stride_4x; - dst += dst_stride_4x; - } -} - -static void hevc_sao_edge_filter_90degree_4width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i dst0; - __m128i sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - __m128i src_minus10, src_minus11, src10, src11; - __m128i src_zero0, src_zero1; - __m128i offset; - __m128i offset_mask0, offset_mask1; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - /* load in advance */ - DUP4_ARG2(__lsx_vld, src - src_stride, 0, src, 0, src + src_stride, 0, - src + src_stride_2x, 0, src_minus10, src_minus11, src10, src11); - - for (height -= 2; height; height -= 2) { - src += src_stride_2x; - DUP4_ARG2(__lsx_vilvl_b, src10, src_minus10, src_minus11, src_minus11, - src11, src_minus11, src10, src10, src_minus10, src_zero0, - src_minus11, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, - src10, src11); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); - dst += dst_stride_2x; - } - - DUP4_ARG2(__lsx_vilvl_b, src10, src_minus10, src_minus11, src_minus11, - src11, src_minus11, src10, src10, src_minus10, src_zero0, - src_minus11, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_bu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); -} - -static void hevc_sao_edge_filter_90degree_8width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i src_zero0, src_zero1, dst0; - __m128i cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - __m128i src_minus10, src_minus11, src10, src11; - __m128i offset_mask0, offset_mask1; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src - src_stride, 0, src, 0, src_minus10, src_minus11); - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src10, src11); - - for (height -= 2; height; height -= 2) { - src += src_stride_2x; - DUP4_ARG2(__lsx_vilvl_b, src10, src_minus10, src_minus11, src_minus11, - src11, src_minus11, src10, src10, src_minus10, src_zero0, - src_minus11, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, - src10, src11); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); - dst += dst_stride_2x; - } - - DUP4_ARG2(__lsx_vilvl_b, src10, src_minus10, src_minus11, src_minus11, - src11, src_minus11, src10, src10, src_minus10, src_zero0, - src_minus11, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); -} - -static void hevc_sao_edge_filter_90degree_16multiple_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t * - sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig = src; - uint8_t *dst_orig = dst; - int32_t h_cnt, v_cnt; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - const int32_t src_stride_4x = (src_stride << 2); - const int32_t dst_stride_4x = (dst_stride << 2); - const int32_t src_stride_3x = src_stride_2x + src_stride; - const int32_t dst_stride_3x = dst_stride_2x + dst_stride; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - __m128i cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - __m128i diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - __m128i diff_plus13; - __m128i src10, src_minus10, dst0, src11, src_minus11, dst1; - __m128i src12, dst2, src13, dst3; - __m128i offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset; - - sao_offset = __lsx_vld(sao_offset_val, 0); - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src = src_orig + v_cnt; - dst = dst_orig + v_cnt; - - DUP2_ARG2(__lsx_vld, src - src_stride, 0, src, 0, - src_minus10, src_minus11); - - for (h_cnt = (height >> 2); h_cnt--;) { - DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, - src, src_stride_3x, src, src_stride_4x, - src10, src11, src12, src13); - DUP4_ARG2(__lsx_vseq_b, src_minus11, src_minus10, src_minus11, - src10, src10, src_minus11, src10, src11, cmp_minus10, - cmp_plus10, cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vseq_b, src11, src10, src11, src12, src12, src11, - src12, src13, cmp_minus12, cmp_plus12, - cmp_minus13, cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, diff_minus10, diff_plus10, diff_minus11, - diff_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, diff_minus12, diff_plus12, diff_minus13, - diff_plus13); - DUP4_ARG2(__lsx_vsle_bu, src_minus11, src_minus10, src_minus11, - src10, src10, src_minus11, src10, src11, cmp_minus10, - cmp_plus10, cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vsle_bu, src11, src10, src11, src12, src12, src11, - src12, src13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_plus10, const1, cmp_plus10, diff_minus11, const1, - cmp_minus11, diff_plus11, const1, cmp_plus11, - diff_minus10, diff_plus10, diff_minus11, diff_plus11); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus12, const1, cmp_minus12, - diff_plus12, const1, cmp_plus12, diff_minus13, const1, - cmp_minus13, diff_plus13, const1, cmp_plus13, - diff_minus12, diff_plus12, diff_minus13, diff_plus13); - - DUP4_ARG2(__lsx_vadd_b, diff_minus10, diff_plus10, diff_minus11, - diff_plus11, diff_minus12, diff_plus12, diff_minus13, - diff_plus13, offset_mask0, offset_mask1, offset_mask2, - offset_mask3); - DUP4_ARG2(__lsx_vaddi_bu, offset_mask0, 2, offset_mask1, 2, - offset_mask2, 2, offset_mask3, 2, offset_mask0, - offset_mask1, offset_mask2, offset_mask3); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask0, - sao_offset, sao_offset, offset_mask0,\ - offset_mask0, offset_mask0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask1, - sao_offset, sao_offset, offset_mask1, offset_mask1, - offset_mask1); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask2, - sao_offset, sao_offset, offset_mask2, offset_mask2, - offset_mask2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask3, - sao_offset, sao_offset, offset_mask3, offset_mask3, - offset_mask3); - - src_minus10 = src12; - DUP4_ARG2(__lsx_vxori_b, src_minus11, 128, src10, 128, src11, 128, - src12, 128, src_minus11, src10, src11, src12); - DUP4_ARG2(__lsx_vsadd_b, src_minus11, offset_mask0, src10, - offset_mask1, src11, offset_mask2, src12, - offset_mask3, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vxori_b, dst0, 128, dst1, 128, dst2, 128, dst3, - 128, dst0, dst1, dst2, dst3); - src_minus11 = src13; - - __lsx_vst(dst0, dst, 0); - __lsx_vstx(dst1, dst, dst_stride); - __lsx_vstx(dst2, dst, dst_stride_2x); - __lsx_vstx(dst3, dst, dst_stride_3x); - src += src_stride_4x; - dst += dst_stride_4x; - } - } -} - -static void hevc_sao_edge_filter_45degree_4width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i cmp_minus10, diff_minus10, src_minus10, cmp_minus11, diff_minus11; - __m128i src_minus11, src10, src11; - __m128i src_plus0, src_zero0, src_plus1, src_zero1, dst0; - __m128i offset_mask0, offset_mask1; - __m128i zeros = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src_orig - src_stride, 0, src_orig, 0, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += src_stride_2x; - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, - shuf1, src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src10, shuf2, zeros, src11, shuf2, - src_plus0, src_plus1); - - DUP2_ARG2(__lsx_vilvl_b, src_plus0, src_minus10, src_plus1, - src_minus11, src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, - src_zero1, src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); - dst += dst_stride_2x; - } - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, shuf1, - src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src10, shuf2, zeros, src11, shuf2, - src_plus0, src_plus1); - - DUP2_ARG2(__lsx_vilvl_b, src_plus0, src_minus10, src_plus1, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, offset_mask0, - offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); -} - -static void hevc_sao_edge_filter_45degree_8width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - __m128i src_minus10, src10, src_minus11, src11; - __m128i src_zero0, src_plus10, src_zero1, src_plus11, dst0; - __m128i offset_mask0, offset_mask1; - __m128i zeros = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src_orig - src_stride, 0, src_orig, 0, src_minus10, - src_minus11); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += src_stride_2x; - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, - shuf1, src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src10, shuf2, zeros, src11, shuf2, - src_plus10, src_plus11); - - DUP2_ARG2(__lsx_vilvl_b, src_plus10, src_minus10, src_plus11, - src_minus11, src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11) - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); - dst += dst_stride_2x; - } - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, shuf1, - src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src10, shuf2, zeros, src11, shuf2, - src_plus10, src_plus11); - DUP2_ARG2(__lsx_vilvl_b, src_plus10, src_minus10, src_plus11, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, offset_mask0, - offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); -} - -static void hevc_sao_edge_filter_45degree_16multiple_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t * - sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig = src; - uint8_t *dst_orig = dst; - int32_t v_cnt; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - const int32_t src_stride_4x = (src_stride << 2); - const int32_t dst_stride_4x = (dst_stride << 2); - const int32_t src_stride_3x = src_stride_2x + src_stride; - const int32_t dst_stride_3x = dst_stride_2x + dst_stride; - - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - __m128i cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - __m128i diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - __m128i diff_plus13, src_minus14, src_plus13; - __m128i offset_mask0, offset_mask1, offset_mask2, offset_mask3; - __m128i src10, src_minus10, dst0, src11, src_minus11, dst1; - __m128i src12, src_minus12, dst2, src13, src_minus13, dst3; - __m128i src_zero0, src_plus10, src_zero1, src_plus11, src_zero2; - __m128i src_zero3, sao_offset, src_plus12; - - sao_offset = __lsx_vld(sao_offset_val, 0); - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_orig = src - 1; - dst_orig = dst; - src_minus11 = __lsx_vld(src_orig, 0); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src_minus12, src_minus13); - src_minus14 = __lsx_vldx(src_orig, src_stride_3x); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus10 = __lsx_vld(src_orig - src_stride, 0); - src_orig += 16; - src10 = __lsx_vld(src_orig, 0); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, - src_stride_2x, src11, src12); - src13 = __lsx_vldx(src_orig, src_stride_3x); - src_plus13 = __lsx_vld(src + v_cnt + src_stride_4x, 1); - - DUP4_ARG3(__lsx_vshuf_b, src10, src_minus11, shuf1, src11, - src_minus12, shuf1, src12, src_minus13, shuf1, - src13, src_minus14, shuf1, src_zero0, src_zero1, - src_zero2, src_zero3); - DUP2_ARG3(__lsx_vshuf_b, src11, src_minus12, shuf2, src12, - src_minus13, shuf2, src_plus10, src_plus11); - src_plus12 = __lsx_vshuf_b(src13, src_minus14, shuf2); - - DUP4_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, - src_plus11, cmp_minus10, cmp_plus10, - cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vseq_b, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, - src_plus13, cmp_minus12, cmp_plus12, - cmp_minus13, cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, diff_minus10, diff_plus10, diff_minus11, - diff_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, diff_minus12, diff_plus12, diff_minus13, - diff_plus13); - DUP4_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, - src_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vsle_bu, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, - src_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_plus10, const1, cmp_plus10, diff_minus11, const1, - cmp_minus11, diff_plus11, const1, cmp_plus11, - diff_minus10, diff_plus10, diff_minus11, diff_plus11); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus12, const1, cmp_minus12, - diff_plus12, const1, cmp_plus12, diff_minus13, const1, - cmp_minus13, diff_plus13, const1, cmp_plus13, - diff_minus12, diff_plus12, diff_minus13, diff_plus13); - - DUP4_ARG2(__lsx_vadd_b, diff_minus10, diff_plus10, diff_minus11, - diff_plus11, diff_minus12, diff_plus12, diff_minus13, - diff_plus13, offset_mask0, offset_mask1, offset_mask2, - offset_mask3); - DUP4_ARG2(__lsx_vaddi_bu, offset_mask0, 2, offset_mask1, 2, - offset_mask2, 2, offset_mask3, 2, offset_mask0, - offset_mask1, offset_mask2, offset_mask3); - - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask0, - sao_offset, sao_offset, offset_mask0, offset_mask0, - offset_mask0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask1, - sao_offset, sao_offset, offset_mask1, offset_mask1, - offset_mask1); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask2, - sao_offset, sao_offset, offset_mask2, offset_mask2, - offset_mask2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask3, - sao_offset, sao_offset, offset_mask3, offset_mask3, - offset_mask3); - - DUP4_ARG2(__lsx_vxori_b, src_zero0, 128, src_zero1, 128, src_zero2, - 128, src_zero3, 128, src_zero0, src_zero1, src_zero2, - src_zero3); - DUP4_ARG2(__lsx_vsadd_b, src_zero0, offset_mask0, src_zero1, - offset_mask1, src_zero2, offset_mask2, src_zero3, - offset_mask3, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vxori_b, dst0, 128, dst1, 128, dst2, 128, dst3, - 128, dst0, dst1, dst2, dst3); - - src_minus11 = src10; - src_minus12 = src11; - src_minus13 = src12; - src_minus14 = src13; - - __lsx_vst(dst0, dst_orig, 0); - __lsx_vstx(dst1, dst_orig, dst_stride); - __lsx_vstx(dst2, dst_orig, dst_stride_2x); - __lsx_vstx(dst3, dst_orig, dst_stride_3x); - dst_orig += 16; - } - src += src_stride_4x; - dst += dst_stride_4x; - } -} - -static void hevc_sao_edge_filter_135degree_4width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i src_zero0, src_zero1, dst0; - __m128i cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - __m128i src_minus10, src10, src_minus11, src11; - __m128i offset_mask0, offset_mask1; - __m128i zeros = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src_orig - src_stride, 0, src_orig, 0, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += src_stride_2x; - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, - shuf1, src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, src_minus11, - shuf2, src_minus10, src_minus11); - - DUP2_ARG2(__lsx_vilvl_b, src10, src_minus10, src11, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); - dst += dst_stride_2x; - } - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, shuf1, - src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, src_minus11, - shuf2, src_minus10, src_minus11); - - DUP2_ARG2(__lsx_vilvl_b, src10, src_minus10, src11, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, offset_mask0, - offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_w(dst0, dst, 0, 0); - __lsx_vstelm_w(dst0, dst + dst_stride, 0, 2); - dst += dst_stride_2x; -} - -static void hevc_sao_edge_filter_135degree_8width_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i offset, sao_offset = __lsx_vld(sao_offset_val, 0); - __m128i cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - __m128i src_minus10, src10, src_minus11, src11; - __m128i src_zero0, src_zero1, dst0; - __m128i offset_mask0, offset_mask1; - __m128i zeros = {0}; - - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - DUP2_ARG2(__lsx_vld, src_orig - src_stride, 0, src_orig, 0, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += src_stride_2x; - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, - shuf1, src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, src_minus11, - shuf2, src_minus10, src_minus11); - - DUP2_ARG2(__lsx_vilvl_b, src10, src_minus10, src11, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, - src_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, - cmp_minus11, cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_minus11, const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, - offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src10, src11); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); - dst += dst_stride_2x; - } - - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus11, shuf1, zeros, src10, shuf1, - src_zero0, src_zero1); - DUP2_ARG3(__lsx_vshuf_b, zeros, src_minus10, shuf2, zeros, src_minus11, - shuf2, src_minus10, src_minus11); - - DUP2_ARG2(__lsx_vilvl_b, src10, src_minus10, src11, src_minus11, - src_minus10, src_minus11); - DUP2_ARG2(__lsx_vilvl_b, src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - DUP2_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - diff_minus10, diff_minus11); - DUP2_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero1, src_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_minus11, cmp_minus11, - cmp_minus10, cmp_minus11); - DUP2_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, diff_minus11, - const1, cmp_minus11, diff_minus10, diff_minus11); - - DUP2_ARG2(__lsx_vhaddw_hu_bu, diff_minus10, diff_minus10, diff_minus11, - diff_minus11, offset_mask0, offset_mask1); - DUP2_ARG2(__lsx_vaddi_hu, offset_mask0, 2, offset_mask1, 2, offset_mask0, - offset_mask1); - DUP2_ARG2(__lsx_vpickev_b, offset_mask1, offset_mask0, src_zero1, - src_zero0, offset, dst0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset, sao_offset, - sao_offset, offset, offset, offset); - dst0 = __lsx_vxori_b(dst0, 128); - dst0 = __lsx_vsadd_b(dst0, offset); - dst0 = __lsx_vxori_b(dst0, 128); - - __lsx_vstelm_d(dst0, dst, 0, 0); - __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1); -} - -static void hevc_sao_edge_filter_135degree_16multiple_lsx(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig; - uint8_t *dst_orig; - int32_t v_cnt; - const int32_t src_stride_2x = (src_stride << 1); - const int32_t dst_stride_2x = (dst_stride << 1); - const int32_t src_stride_4x = (src_stride << 2); - const int32_t dst_stride_4x = (dst_stride << 2); - const int32_t src_stride_3x = src_stride_2x + src_stride; - const int32_t dst_stride_3x = dst_stride_2x + dst_stride; - - __m128i shuf1 = {0x807060504030201, 0x100F0E0D0C0B0A09}; - __m128i shuf2 = {0x908070605040302, 0x11100F0E0D0C0B0A}; - __m128i edge_idx = {0x403000201, 0x0}; - __m128i const1 = __lsx_vldi(1); - __m128i dst0, dst1, dst2, dst3; - __m128i cmp_minus10, cmp_minus11, cmp_minus12, cmp_minus13, cmp_plus10; - __m128i cmp_plus11, cmp_plus12, cmp_plus13, diff_minus10, diff_minus11; - __m128i diff_minus12, diff_minus13, diff_plus10, diff_plus11, diff_plus12; - __m128i diff_plus13, src10, src11, src12, src13, src_minus10, src_minus11; - __m128i src_plus10, src_plus11, src_plus12, src_plus13; - __m128i src_minus12, src_minus13, src_zero0, src_zero1, src_zero2, src_zero3; - __m128i offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset; - - sao_offset = __lsx_vld(sao_offset_val, 0); - sao_offset = __lsx_vpickev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_orig = src - 1; - dst_orig = dst; - - src_minus11 = __lsx_vld(src_orig, 0); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src_plus10, src_plus11); - src_plus12 = __lsx_vldx(src_orig, src_stride_3x); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus10 = __lsx_vld(src_orig - src_stride, 2); - src_plus13 = __lsx_vldx(src_orig, src_stride_4x); - src_orig += 16; - src10 = __lsx_vld(src_orig, 0); - DUP2_ARG2(__lsx_vldx, src_orig, src_stride, src_orig, src_stride_2x, - src11, src12); - src13 =__lsx_vldx(src_orig, src_stride_3x); - - DUP4_ARG3(__lsx_vshuf_b, src10, src_minus11, shuf1, src11, - src_plus10, shuf1, src12, src_plus11, shuf1, src13, - src_plus12, shuf1, src_zero0, src_zero1, src_zero2, - src_zero3); - src_minus11 = __lsx_vshuf_b(src10, src_minus11, shuf2); - DUP2_ARG3(__lsx_vshuf_b, src11, src_plus10, shuf2, src12, - src_plus11, shuf2, src_minus12, src_minus13); - - DUP4_ARG2(__lsx_vseq_b, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, - src_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vseq_b, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, - src_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, diff_minus10, diff_plus10, diff_minus11, - diff_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, diff_minus12, diff_plus12, diff_minus13, - diff_plus13); - DUP4_ARG2(__lsx_vsle_bu, src_zero0, src_minus10, src_zero0, - src_plus10, src_zero1, src_minus11, src_zero1, src_plus11, - cmp_minus10, cmp_plus10, cmp_minus11, cmp_plus11); - DUP4_ARG2(__lsx_vsle_bu, src_zero2, src_minus12, src_zero2, - src_plus12, src_zero3, src_minus13, src_zero3, src_plus13, - cmp_minus12, cmp_plus12, cmp_minus13, cmp_plus13); - DUP4_ARG2(__lsx_vnor_v, cmp_minus10, cmp_minus10, cmp_plus10, - cmp_plus10, cmp_minus11, cmp_minus11, cmp_plus11, - cmp_plus11, cmp_minus10, cmp_plus10, cmp_minus11, - cmp_plus11); - DUP4_ARG2(__lsx_vnor_v, cmp_minus12, cmp_minus12, cmp_plus12, - cmp_plus12, cmp_minus13, cmp_minus13, cmp_plus13, - cmp_plus13, cmp_minus12, cmp_plus12, cmp_minus13, - cmp_plus13); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus10, const1, cmp_minus10, - diff_plus10, const1, cmp_plus10, diff_minus11, const1, - cmp_minus11, diff_plus11, const1, cmp_plus11, - diff_minus10, diff_plus10, diff_minus11, diff_plus11); - DUP4_ARG3(__lsx_vbitsel_v, diff_minus12, const1, cmp_minus12, - diff_plus12, const1, cmp_plus12, diff_minus13, const1, - cmp_minus13, diff_plus13, const1, cmp_plus13, - diff_minus12, diff_plus12, diff_minus13, diff_plus13); - - DUP4_ARG2(__lsx_vadd_b, diff_minus10, diff_plus10, diff_minus11, - diff_plus11, diff_minus12, diff_plus12, diff_minus13, - diff_plus13, offset_mask0, offset_mask1, offset_mask2, - offset_mask3); - DUP4_ARG2(__lsx_vaddi_bu, offset_mask0, 2, offset_mask1, 2, - offset_mask2, 2, offset_mask3, 2, offset_mask0, - offset_mask1, offset_mask2, offset_mask3); - - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask0, - sao_offset, sao_offset, offset_mask0, offset_mask0, - offset_mask0); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask1, - sao_offset, sao_offset, offset_mask1, offset_mask1, - offset_mask1); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask2, - sao_offset, sao_offset, offset_mask2, offset_mask2, - offset_mask2); - DUP2_ARG3(__lsx_vshuf_b, edge_idx, edge_idx, offset_mask3, - sao_offset, sao_offset, offset_mask3, offset_mask3, - offset_mask3); - - DUP4_ARG2(__lsx_vxori_b, src_zero0, 128, src_zero1, 128, - src_zero2, 128, src_zero3, 128, src_zero0, src_zero1, - src_zero2, src_zero3); - DUP4_ARG2(__lsx_vsadd_b, src_zero0, offset_mask0, src_zero1, - offset_mask1, src_zero2, offset_mask2, src_zero3, - offset_mask3, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vxori_b, dst0, 128, dst1, 128, dst2, 128, dst3, - 128, dst0, dst1, dst2, dst3); - - src_minus11 = src10; - src_plus10 = src11; - src_plus11 = src12; - src_plus12 = src13; - - __lsx_vst(dst0, dst_orig, 0); - __lsx_vstx(dst1, dst_orig, dst_stride); - __lsx_vstx(dst2, dst_orig, dst_stride_2x); - __lsx_vstx(dst3, dst_orig, dst_stride_3x); - dst_orig += 16; - } - - src += src_stride_4x; - dst += dst_stride_4x; - } -} - -void ff_hevc_sao_edge_filter_8_lsx(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride_dst, - const int16_t *sao_offset_val, - int eo, int width, int height) -{ - ptrdiff_t stride_src = (2 * MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); - - switch (eo) { - case 0: - if (width >> 4) { - hevc_sao_edge_filter_0degree_16multiple_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width & 0x0F), - height); - dst += width & 0xFFFFFFF0; - src += width & 0xFFFFFFF0; - width &= 0x0F; - } - - if (width >> 3) { - hevc_sao_edge_filter_0degree_8width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width &= 0x07; - } - - if (width) { - hevc_sao_edge_filter_0degree_4width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 1: - if (width >> 4) { - hevc_sao_edge_filter_90degree_16multiple_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width & 0x0F), - height); - dst += width & 0xFFFFFFF0; - src += width & 0xFFFFFFF0; - width &= 0x0F; - } - - if (width >> 3) { - hevc_sao_edge_filter_90degree_8width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width &= 0x07; - } - - if (width) { - hevc_sao_edge_filter_90degree_4width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 2: - if (width >> 4) { - hevc_sao_edge_filter_45degree_16multiple_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width & 0x0F), - height); - dst += width & 0xFFFFFFF0; - src += width & 0xFFFFFFF0; - width &= 0x0F; - } - - if (width >> 3) { - hevc_sao_edge_filter_45degree_8width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width &= 0x07; - } - - if (width) { - hevc_sao_edge_filter_45degree_4width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 3: - if (width >> 4) { - hevc_sao_edge_filter_135degree_16multiple_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width & 0x0F), - height); - dst += width & 0xFFFFFFF0; - src += width & 0xFFFFFFF0; - width &= 0x0F; - } - - if (width >> 3) { - hevc_sao_edge_filter_135degree_8width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width &= 0x07; - } - - if (width) { - hevc_sao_edge_filter_135degree_4width_lsx(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264qpel_mmi.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264qpel_mmi.c deleted file mode 100644 index 3482956e13b61d36c95abe7506c056cd782c0549..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264qpel_mmi.c +++ /dev/null @@ -1,3134 +0,0 @@ -/* - * Loongson SIMD optimized h264qpel - * - * Copyright (c) 2015 Loongson Technology Corporation Limited - * Copyright (c) 2015 Zhou Xiaoyong - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "h264dsp_mips.h" -#include "hpeldsp_mips.h" -#include "libavcodec/bit_depth_template.c" -#include "libavutil/mips/mmiutils.h" - -static inline void copy_block4_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride, int h) -{ - double ftmp[1]; - DECLARE_VAR_LOW32; - - __asm__ volatile ( - "1: \n\t" - MMI_ULWC1(%[ftmp0], %[src], 0x00) - MMI_SWC1(%[ftmp0], %[dst], 0x00) - "addi %[h], %[h], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), - [dst]"+&r"(dst), [src]"+&r"(src), - RESTRICT_ASM_LOW32 - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride) - : "memory" - ); -} - -static inline void copy_block8_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride, int h) -{ - double ftmp[1]; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "1: \n\t" - MMI_ULDC1(%[ftmp0], %[src], 0x00) - MMI_SDC1(%[ftmp0], %[dst], 0x00) - "addi %[h], %[h], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), - RESTRICT_ASM_ALL64 - [dst]"+&r"(dst), [src]"+&r"(src), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride) - : "memory" - ); -} - -static inline void copy_block16_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride, int h) -{ - double ftmp[1]; - uint64_t tmp[1]; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "1: \n\t" - MMI_ULDC1(%[ftmp0], %[src], 0x00) - "ldl %[tmp0], 0x0f(%[src]) \n\t" - "ldr %[tmp0], 0x08(%[src]) \n\t" - MMI_SDC1(%[ftmp0], %[dst], 0x00) - "sdl %[tmp0], 0x0f(%[dst]) \n\t" - "sdr %[tmp0], 0x08(%[dst]) \n\t" - "addi %[h], %[h], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_ALL64 - [dst]"+&r"(dst), [src]"+&r"(src), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride) - : "memory" - ); -} - -#define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1) -#define op2_put(a, b) a = CLIP(((b) + 512)>>10) -static void put_h264_qpel4_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[10]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x04 \n\t" - "1: \n\t" - MMI_ULWC1(%[ftmp1], %[src], -0x02) - MMI_ULWC1(%[ftmp2], %[src], -0x01) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - MMI_ULWC1(%[ftmp4], %[src], 0x01) - MMI_ULWC1(%[ftmp5], %[src], 0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x03) - - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp7], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp8], %[ftmp2], %[ftmp5] \n\t" - "paddsh %[ftmp9], %[ftmp1], %[ftmp6] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_20] \n\t" - "pmullh %[ftmp8], %[ftmp8], %[ff_pw_5] \n\t" - "psubsh %[ftmp7], %[ftmp7], %[ftmp8] \n\t" - "paddsh %[ftmp9], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp9], %[ftmp9], %[ff_pw_16] \n\t" - "psrah %[ftmp9], %[ftmp9], %[ff_pw_5] \n\t" - "packushb %[ftmp9], %[ftmp9], %[ftmp0] \n\t" - MMI_SWC1(%[ftmp9], %[dst], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - [dst]"+&r"(dst), [src]"+&r"(src) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f), - [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void put_h264_qpel8_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[11]; - uint64_t tmp[1]; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x08 \n\t" - "1: \n\t" - MMI_ULDC1(%[ftmp1], %[src], -0x02) - MMI_ULDC1(%[ftmp2], %[src], -0x01) - MMI_ULDC1(%[ftmp3], %[src], 0x00) - MMI_ULDC1(%[ftmp4], %[src], 0x01) - MMI_ULDC1(%[ftmp5], %[src], 0x02) - MMI_ULDC1(%[ftmp6], %[src], 0x03) - "punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp4], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp4], %[ftmp0] \n\t" - "paddsh %[ftmp3], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp4], %[ftmp8], %[ftmp10] \n\t" - "pmullh %[ftmp3], %[ftmp3], %[ff_pw_20] \n\t" - "pmullh %[ftmp4], %[ftmp4], %[ff_pw_20] \n\t" - "punpcklbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp5], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp5], %[ftmp0] \n\t" - "paddsh %[ftmp2], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp5], %[ftmp8], %[ftmp10] \n\t" - "pmullh %[ftmp2], %[ftmp2], %[ff_pw_5] \n\t" - "pmullh %[ftmp5], %[ftmp5], %[ff_pw_5] \n\t" - "punpcklbh %[ftmp7], %[ftmp1], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp6], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp1], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp6], %[ftmp8], %[ftmp10] \n\t" - "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "psubsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" - "paddsh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" - "paddsh %[ftmp4], %[ftmp4], %[ftmp6] \n\t" - "paddsh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "paddsh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ff_pw_5] \n\t" - "psrah %[ftmp4], %[ftmp4], %[ff_pw_5] \n\t" - "packushb %[ftmp9], %[ftmp3], %[ftmp4] \n\t" - MMI_SDC1(%[ftmp9], %[dst], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_ALL64 - [dst]"+&r"(dst), [src]"+&r"(src) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f), - [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void put_h264_qpel16_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - put_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride); - put_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride); - src += 8*srcStride; - dst += 8*dstStride; - put_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride); - put_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride); -} - -static void avg_h264_qpel4_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[11]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x04 \n\t" - "1: \n\t" - MMI_ULWC1(%[ftmp1], %[src], -0x02) - MMI_ULWC1(%[ftmp2], %[src], -0x01) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - MMI_ULWC1(%[ftmp4], %[src], 0x01) - MMI_ULWC1(%[ftmp5], %[src], 0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x03) - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp7], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp8], %[ftmp2], %[ftmp5] \n\t" - "paddsh %[ftmp9], %[ftmp1], %[ftmp6] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_20] \n\t" - "pmullh %[ftmp8], %[ftmp8], %[ff_pw_5] \n\t" - "psubsh %[ftmp7], %[ftmp7], %[ftmp8] \n\t" - "paddsh %[ftmp9], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp9], %[ftmp9], %[ff_pw_16] \n\t" - "psrah %[ftmp9], %[ftmp9], %[ff_pw_5] \n\t" - "packushb %[ftmp9], %[ftmp9], %[ftmp0] \n\t" - MMI_LWC1(%[ftmp10], %[dst], 0x00) - "pavgb %[ftmp9], %[ftmp9], %[ftmp10] \n\t" - MMI_SWC1(%[ftmp9], %[dst], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - [dst]"+&r"(dst), [src]"+&r"(src) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f), - [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void avg_h264_qpel8_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[11]; - uint64_t tmp[1]; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x08 \n\t" - "1: \n\t" - MMI_ULDC1(%[ftmp1], %[src], -0x02) - MMI_ULDC1(%[ftmp2], %[src], -0x01) - MMI_ULDC1(%[ftmp3], %[src], 0x00) - MMI_ULDC1(%[ftmp4], %[src], 0x01) - MMI_ULDC1(%[ftmp5], %[src], 0x02) - MMI_ULDC1(%[ftmp6], %[src], 0x03) - "punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp4], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp4], %[ftmp0] \n\t" - "paddsh %[ftmp3], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp4], %[ftmp8], %[ftmp10] \n\t" - "pmullh %[ftmp3], %[ftmp3], %[ff_pw_20] \n\t" - "pmullh %[ftmp4], %[ftmp4], %[ff_pw_20] \n\t" - "punpcklbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp5], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp5], %[ftmp0] \n\t" - "paddsh %[ftmp2], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp5], %[ftmp8], %[ftmp10] \n\t" - "pmullh %[ftmp2], %[ftmp2], %[ff_pw_5] \n\t" - "pmullh %[ftmp5], %[ftmp5], %[ff_pw_5] \n\t" - "punpcklbh %[ftmp7], %[ftmp1], %[ftmp0] \n\t" - "punpckhbh %[ftmp8], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp9], %[ftmp6], %[ftmp0] \n\t" - "punpckhbh %[ftmp10], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp1], %[ftmp7], %[ftmp9] \n\t" - "paddsh %[ftmp6], %[ftmp8], %[ftmp10] \n\t" - "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "psubsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" - "paddsh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" - "paddsh %[ftmp4], %[ftmp4], %[ftmp6] \n\t" - "paddsh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "paddsh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ff_pw_5] \n\t" - "psrah %[ftmp4], %[ftmp4], %[ff_pw_5] \n\t" - "packushb %[ftmp9], %[ftmp3], %[ftmp4] \n\t" - MMI_LDC1(%[ftmp10], %[dst], 0x00) - "pavgb %[ftmp9], %[ftmp9], %[ftmp10] \n\t" - MMI_SDC1(%[ftmp9], %[dst], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_ALL64 - [dst]"+&r"(dst), [src]"+&r"(src) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f), - [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void avg_h264_qpel16_h_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - avg_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride); - avg_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride); - src += 8*srcStride; - dst += 8*dstStride; - avg_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride); - avg_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride); -} - -static void put_h264_qpel4_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[12]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - - src -= 2 * srcStride; - - __asm__ volatile ( - ".set push \n\t" - ".set noreorder \n\t" - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x02 \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp10] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "dli %[tmp0], 0x05 \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp11] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - MMI_LWC1(%[ftmp6], %[src], 0x00) - "paddh %[ftmp7], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp7], %[ftmp7], %[ftmp10] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp6] \n\t" - "paddh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" - "psrah %[ftmp7], %[ftmp7], %[ftmp11] \n\t" - "packushb %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_SWC1(%[ftmp7], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp7], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp7], %[ftmp7], %[ftmp10] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp3] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp6] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" - "psrah %[ftmp7], %[ftmp7], %[ftmp11] \n\t" - "packushb %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_SWC1(%[ftmp7], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp7], %[ftmp5], %[ftmp6] \n\t" - "psllh %[ftmp7], %[ftmp7], %[ftmp10] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp4] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp7], %[ftmp7], %[ftmp3] \n\t" - "psrah %[ftmp7], %[ftmp7], %[ftmp11] \n\t" - "packushb %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_SWC1(%[ftmp7], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - "paddh %[ftmp7], %[ftmp6], %[ftmp1] \n\t" - "psllh %[ftmp7], %[ftmp7], %[ftmp10] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" - "psubh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - "paddh %[ftmp7], %[ftmp7], %[ftmp4] \n\t" - "psrah %[ftmp7], %[ftmp7], %[ftmp11] \n\t" - "packushb %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_SWC1(%[ftmp7], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - ".set pop \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - [dst]"+&r"(dst), [src]"+&r"(src) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void put_h264_qpel8_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - int w = 2; - int h = 8; - double ftmp[10]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - - src -= 2 * srcStride; - - while (w--) { - __asm__ volatile ( - ".set push \n\t" - ".set noreorder \n\t" - "dli %[tmp0], 0x02 \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp8] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "dli %[tmp0], 0x05 \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp9] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3] , %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "bne %[h], 0x10, 2f \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "2: \n\t" - ".set pop \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - [src]"+&r"(src), [dst]"+&r"(dst), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); - - src += 4 - (h + 5) * srcStride; - dst += 4 - h * dstStride; - } -} - -static void put_h264_qpel16_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - put_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride); - put_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride); - src += 8*srcStride; - dst += 8*dstStride; - put_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride); - put_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride); -} - -static void avg_h264_qpel4_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - double ftmp[10]; - uint64_t tmp[1]; - - src -= 2 * srcStride; - - __asm__ volatile ( - ".set push \n\t" - ".set noreorder \n\t" - "dli %[tmp0], 0x02 \n\t" - "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - "mtc1 %[tmp0], %[ftmp9] \n\t" - "dli %[tmp0], 0x05 \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp8] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp0], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp1], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp2], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp3], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - ".set pop \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp[0]), - [src]"+&r"(src), [dst]"+&r"(dst) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void avg_h264_qpel8_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - int w = 2; - int h = 8; - double ftmp[10]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - - src -= 2 * srcStride; - - while (w--) { - __asm__ volatile ( - ".set push \n\t" - ".set noreorder \n\t" - "dli %[tmp0], 0x02 \n\t" - "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - "mtc1 %[tmp0], %[ftmp9] \n\t" - "dli %[tmp0], 0x05 \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp8] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp0], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp1], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp2], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp3], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp4], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp5], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp0], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp1], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - "bne %[h], 0x10, 2f \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp2], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp3], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp3], %[src], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp4], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp4], %[src], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp5], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp0], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp1], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp2], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - MMI_LWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp9] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" - "packushb %[ftmp6], %[ftmp6], %[ftmp6] \n\t" - MMI_LWC1(%[ftmp3], %[dst], 0x00) - "pavgb %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SWC1(%[ftmp6], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "2: \n\t" - ".set pop \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - [src]"+&r"(src), [dst]"+&r"(dst), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); - - src += 4 - (h + 5) * srcStride; - dst += 4 - h * dstStride; - } -} - -static void avg_h264_qpel16_v_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - avg_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride); - avg_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride); - src += 8*srcStride; - dst += 8*dstStride; - avg_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride); - avg_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride); -} - -static void put_h264_qpel4_hv_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - INIT_CLIP - int i; - int16_t _tmp[36]; - int16_t *tmp = _tmp; - double ftmp[10]; - uint64_t tmp0; - DECLARE_VAR_LOW32; - - src -= 2*srcStride; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x09 \n\t" - "1: \n\t" - MMI_ULWC1(%[ftmp1], %[src], -0x02) - MMI_ULWC1(%[ftmp2], %[src], -0x01) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - MMI_ULWC1(%[ftmp4], %[src], 0x01) - MMI_ULWC1(%[ftmp5], %[src], 0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x03) - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp7], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp8], %[ftmp2], %[ftmp5] \n\t" - "paddsh %[ftmp9], %[ftmp1], %[ftmp6] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_20] \n\t" - "pmullh %[ftmp8], %[ftmp8], %[ff_pw_5] \n\t" - "psubsh %[ftmp7], %[ftmp7], %[ftmp8] \n\t" - "paddsh %[ftmp9], %[ftmp7], %[ftmp9] \n\t" - MMI_SDC1(%[ftmp9], %[tmp], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[tmp], %[tmp], %[tmpStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp0), - RESTRICT_ASM_LOW32 - [tmp]"+&r"(tmp), [src]"+&r"(src) - : [tmpStride]"r"(8), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f) - : "memory" - ); - - tmp -= 28; - - for (i=0; i<4; i++) { - const int16_t tmpB= tmp[-8]; - const int16_t tmpA= tmp[-4]; - const int16_t tmp0= tmp[ 0]; - const int16_t tmp1= tmp[ 4]; - const int16_t tmp2= tmp[ 8]; - const int16_t tmp3= tmp[12]; - const int16_t tmp4= tmp[16]; - const int16_t tmp5= tmp[20]; - const int16_t tmp6= tmp[24]; - op2_put(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)); - op2_put(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)); - op2_put(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)); - op2_put(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)); - dst++; - tmp++; - } -} - -static void put_h264_qpel8or16_hv1_lowpass_mmi(int16_t *tmp, - const uint8_t *src, ptrdiff_t tmpStride, ptrdiff_t srcStride, int size) -{ - int w = (size + 8) >> 2; - double ftmp[11]; - uint64_t tmp0; - DECLARE_VAR_LOW32; - - src -= 2 * srcStride + 2; - - while (w--) { - __asm__ volatile ( - "dli %[tmp0], 0x02 \n\t" - MMI_ULWC1(%[ftmp0], %[src], 0x00) - "mtc1 %[tmp0], %[ftmp10] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" - MMI_ULWC1(%[ftmp1], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_ULWC1(%[ftmp2], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_ULWC1(%[ftmp3], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - MMI_ULWC1(%[ftmp4], %[src], 0x00) - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - MMI_ULWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x00) - MMI_ULWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x30) - MMI_ULWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x60) - MMI_ULWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x90) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0xc0) - MMI_ULWC1(%[ftmp4], %[src], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0xf0) - MMI_ULWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x120) - MMI_ULWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x150) - "bne %[size], 0x10, 2f \n\t" - - MMI_ULWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x180) - MMI_ULWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x1b0) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - "paddh %[ftmp6], %[ftmp0], %[ftmp1] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x1e0) - MMI_ULWC1(%[ftmp4], %[src], 0x00) - "paddh %[ftmp6], %[ftmp1], %[ftmp2] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x210) - MMI_ULWC1(%[ftmp5], %[src], 0x00) - "paddh %[ftmp6], %[ftmp2], %[ftmp3] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x240) - MMI_ULWC1(%[ftmp0], %[src], 0x00) - "paddh %[ftmp6], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" - "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x270) - MMI_ULWC1(%[ftmp1], %[src], 0x00) - "paddh %[ftmp6], %[ftmp4], %[ftmp5] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x2a0) - MMI_ULWC1(%[ftmp2], %[src], 0x00) - "paddh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "psllh %[ftmp6], %[ftmp6], %[ftmp10] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "pmullh %[ftmp6], %[ftmp6], %[ff_pw_5] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" - MMI_SDC1(%[ftmp6], %[tmp], 0x2d0) - "2: \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), - [tmp0]"=&r"(tmp0), - RESTRICT_ASM_LOW32 - [src]"+&r"(src) - : [tmp]"r"(tmp), [size]"r"(size), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); - - tmp += 4; - src += 4 - (size + 5) * srcStride; - } -} - -static void put_h264_qpel8or16_hv2_lowpass_mmi(uint8_t *dst, - int16_t *tmp, ptrdiff_t dstStride, ptrdiff_t tmpStride, int size) -{ - int w = size >> 4; - double ftmp[10]; - uint64_t tmp0; - DECLARE_VAR_ALL64; - - do { - int h = size; - - __asm__ volatile ( - "dli %[tmp0], 0x02 \n\t" - "mtc1 %[tmp0], %[ftmp8] \n\t" - "dli %[tmp0], 0x06 \n\t" - "mtc1 %[tmp0], %[ftmp9] \n\t" - "1: \n\t" - MMI_LDC1(%[ftmp0], %[tmp], 0x00) - MMI_LDC1(%[ftmp3], %[tmp], 0x08) - MMI_LDC1(%[ftmp6], %[tmp], 0x10) - MMI_ULDC1(%[ftmp1], %[tmp], 0x02) - MMI_ULDC1(%[ftmp4], %[tmp], 0x0a) - MMI_ULDC1(%[ftmp5], %[tmp], 0x12) - "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp6] \n\t" - MMI_ULDC1(%[ftmp2], %[tmp], 0x04) - MMI_ULDC1(%[ftmp6], %[tmp], 0x06) - MMI_ULDC1(%[ftmp5], %[tmp], 0x0c) - MMI_ULDC1(%[ftmp7], %[tmp], 0x0e) - "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" - "psubh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp8] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp8] \n\t" - "psubh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" - "paddsh %[ftmp3] , %[ftmp3], %[ftmp5] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp8] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp8] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp9] \n\t" - "packushb %[ftmp0], %[ftmp0], %[ftmp3] \n\t" - "addi %[h], %[h], -0x01 \n\t" - MMI_SDC1(%[ftmp0], %[dst], 0x00) - PTR_ADDIU "%[tmp], %[tmp], 0x30 \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp0), - RESTRICT_ASM_ALL64 - [tmp]"+&r"(tmp), [dst]"+&r"(dst), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride) - : "memory" - ); - - tmp += 8 - size * 24; - dst += 8 - size * dstStride; - } while (w--); -} - -static void put_h264_qpel8or16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride, int size) -{ - put_h264_qpel8or16_hv1_lowpass_mmi(tmp, src, tmpStride, srcStride, size); - put_h264_qpel8or16_hv2_lowpass_mmi(dst, tmp, dstStride, tmpStride, size); -} - -static void put_h264_qpel8_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride) -{ - put_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride, - srcStride, 8); -} - -static void put_h264_qpel16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride) -{ - put_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride, - srcStride, 16); -} - -static void put_h264_qpel8_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src, - const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride) -{ - int h = 8; - double ftmp[9]; - uint64_t tmp[1]; - DECLARE_VAR_LOW32; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "dli %[tmp0], 0x02 \n\t" - "mtc1 %[tmp0], %[ftmp7] \n\t" - "dli %[tmp0], 0x05 \n\t" - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "mtc1 %[tmp0], %[ftmp8] \n\t" - "1: \n\t" - MMI_ULDC1(%[ftmp1], %[src], 0x00) - MMI_ULDC1(%[ftmp3], %[src], 0x01) - "punpckhbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpckhbh %[ftmp4], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - "psllh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "psllh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - MMI_ULDC1(%[ftmp3], %[src], -0x01) - MMI_ULDC1(%[ftmp5], %[src], 0x02) - "punpckhbh %[ftmp4], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpckhbh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "psubh %[ftmp2], %[ftmp2], %[ftmp6] \n\t" - "psubh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - "pmullh %[ftmp2], %[ftmp2], %[ff_pw_5] \n\t" - "pmullh %[ftmp1], %[ftmp1], %[ff_pw_5] \n\t" - MMI_ULWC1(%[ftmp3], %[src], -0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x07) - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ff_pw_16] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" - "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" - "psrah %[ftmp2], %[ftmp2], %[ftmp8] \n\t" - MMI_LDC1(%[ftmp5], %[src2], 0x00) - "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t" - PTR_ADDU "%[src], %[src], %[dstStride] \n\t" - "pavgb %[ftmp1], %[ftmp1], %[ftmp5] \n\t" - PTR_ADDU "%[h], %[h], -0x01 \n\t" - MMI_SDC1(%[ftmp1], %[dst], 0x00) - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - PTR_ADDU "%[src2], %[src2], %[src2Stride] \n\t" - "bgtz %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), - [tmp0]"=&r"(tmp[0]), - RESTRICT_ASM_LOW32 - RESTRICT_ASM_ALL64 - [src]"+&r"(src), [dst]"+&r"(dst), - [src2]"+&r"(src2), [h]"+&r"(h) - : [src2Stride]"r"((mips_reg)src2Stride), - [dstStride]"r"((mips_reg)dstStride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void put_pixels8_l2_shift5_mmi(uint8_t *dst, int16_t *src16, - const uint8_t *src8, ptrdiff_t dstStride, ptrdiff_t src8Stride, int h) -{ - double ftmp[7]; - uint64_t tmp0; - DECLARE_VAR_ALL64; - DECLARE_VAR_ADDRT; - - do { - __asm__ volatile ( - "dli %[tmp0], 0x05 \n\t" - MMI_ULDC1(%[ftmp0], %[src16], 0x00) - "mtc1 %[tmp0], %[ftmp6] \n\t" - MMI_ULDC1(%[ftmp1], %[src16], 0x08) - MMI_ULDC1(%[ftmp2], %[src16], 0x30) - MMI_ULDC1(%[ftmp3], %[src16], 0x38) - "psrah %[ftmp0], %[ftmp0], %[ftmp6] \n\t" - "psrah %[ftmp1], %[ftmp1], %[ftmp6] \n\t" - "psrah %[ftmp2], %[ftmp2], %[ftmp6] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp6] \n\t" - "packushb %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - "packushb %[ftmp2], %[ftmp2], %[ftmp3] \n\t" - MMI_LDC1(%[ftmp5], %[src8], 0x00) - MMI_LDXC1(%[ftmp4], %[src8], %[src8Stride], 0x00) - "pavgb %[ftmp0], %[ftmp0], %[ftmp5] \n\t" - "pavgb %[ftmp2], %[ftmp2], %[ftmp4] \n\t" - MMI_SDC1(%[ftmp0], %[dst], 0x00) - MMI_SDXC1(%[ftmp2], %[dst], %[dstStride], 0x00) - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), - RESTRICT_ASM_ALL64 - RESTRICT_ASM_ADDRT - [tmp0]"=&r"(tmp0) - : [src8]"r"(src8), [src16]"r"(src16), - [dst]"r"(dst), - [src8Stride]"r"((mips_reg)src8Stride), - [dstStride]"r"((mips_reg)dstStride) - : "memory" - ); - - src8 += 2 * src8Stride; - src16 += 48; - dst += 2 * dstStride; - } while (h -= 2); -} - -static void put_h264_qpel16_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src, - const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride) -{ - put_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride); - put_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride, - src2Stride); - - src += 8 * dstStride; - dst += 8 * dstStride; - src2 += 8 * src2Stride; - - put_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride); - put_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride, - src2Stride); -} - -static void put_pixels16_l2_shift5_mmi(uint8_t *dst, int16_t *src16, - const uint8_t *src8, ptrdiff_t dstStride, ptrdiff_t src8Stride, int h) -{ - put_pixels8_l2_shift5_mmi(dst, src16, src8, dstStride, src8Stride, h); - put_pixels8_l2_shift5_mmi(dst + 8, src16 + 8, src8 + 8, dstStride, - src8Stride, h); -} - -static void avg_h264_qpel4_hv_lowpass_mmi(uint8_t *dst, const uint8_t *src, - int dstStride, int srcStride) -{ - INIT_CLIP - int i; - int16_t _tmp[36]; - int16_t *tmp = _tmp; - double ftmp[10]; - uint64_t tmp0; - DECLARE_VAR_LOW32; - - src -= 2*srcStride; - - __asm__ volatile ( - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "dli %[tmp0], 0x09 \n\t" - "1: \n\t" - MMI_ULWC1(%[ftmp1], %[src], -0x02) - MMI_ULWC1(%[ftmp2], %[src], -0x01) - MMI_ULWC1(%[ftmp3], %[src], 0x00) - MMI_ULWC1(%[ftmp4], %[src], 0x01) - MMI_ULWC1(%[ftmp5], %[src], 0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x03) - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" - "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddsh %[ftmp7], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp8], %[ftmp2], %[ftmp5] \n\t" - "paddsh %[ftmp9], %[ftmp1], %[ftmp6] \n\t" - "pmullh %[ftmp7], %[ftmp7], %[ff_pw_20] \n\t" - "pmullh %[ftmp8], %[ftmp8], %[ff_pw_5] \n\t" - "psubsh %[ftmp7], %[ftmp7], %[ftmp8] \n\t" - "paddsh %[ftmp9], %[ftmp7], %[ftmp9] \n\t" - MMI_SDC1(%[ftmp9], %[tmp], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[src], %[src], %[srcStride] \n\t" - PTR_ADDU "%[tmp], %[tmp], %[tmpStride] \n\t" - "bnez %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp0), - RESTRICT_ASM_LOW32 - [tmp]"+&r"(tmp), [src]"+&r"(src) - : [tmpStride]"r"(8), - [srcStride]"r"((mips_reg)srcStride), - [ff_pw_20]"f"(ff_pw_20.f), [ff_pw_5]"f"(ff_pw_5.f) - : "memory" - ); - - tmp -= 28; - - for (i=0; i<4; i++) { - const int16_t tmpB= tmp[-8]; - const int16_t tmpA= tmp[-4]; - const int16_t tmp0= tmp[ 0]; - const int16_t tmp1= tmp[ 4]; - const int16_t tmp2= tmp[ 8]; - const int16_t tmp3= tmp[12]; - const int16_t tmp4= tmp[16]; - const int16_t tmp5= tmp[20]; - const int16_t tmp6= tmp[24]; - op2_avg(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)); - op2_avg(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)); - op2_avg(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)); - op2_avg(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)); - dst++; - tmp++; - } -} - -static void avg_h264_qpel8or16_hv2_lowpass_mmi(uint8_t *dst, - int16_t *tmp, ptrdiff_t dstStride, ptrdiff_t tmpStride, int size) -{ - int w = size >> 4; - double ftmp[11]; - uint64_t tmp0; - DECLARE_VAR_ALL64; - - do { - int h = size; - __asm__ volatile ( - "dli %[tmp0], 0x02 \n\t" - "mtc1 %[tmp0], %[ftmp9] \n\t" - "dli %[tmp0], 0x06 \n\t" - "mtc1 %[tmp0], %[ftmp10] \n\t" - "1: \n\t" - MMI_LDC1(%[ftmp0], %[tmp], 0x00) - MMI_LDC1(%[ftmp3], %[tmp], 0x08) - MMI_ULDC1(%[ftmp1], %[tmp], 0x02) - MMI_ULDC1(%[ftmp4], %[tmp], 0x0a) - MMI_LDC1(%[ftmp7], %[tmp], 0x10) - MMI_ULDC1(%[ftmp8], %[tmp], 0x12) - "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp8] \n\t" - "paddh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" - MMI_ULDC1(%[ftmp2], %[tmp], 0x04) - MMI_ULDC1(%[ftmp5], %[tmp], 0x0c) - MMI_ULDC1(%[ftmp7], %[tmp], 0x06) - MMI_ULDC1(%[ftmp8], %[tmp], 0x0e) - "paddh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp8] \n\t" - "psubh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp9] \n\t" - "psubh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "paddsh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" - "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp9] \n\t" - "paddh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "psrah %[ftmp0], %[ftmp0], %[ftmp10] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp10] \n\t" - "packushb %[ftmp0], %[ftmp0], %[ftmp3] \n\t" - MMI_LDC1(%[ftmp6], %[dst], 0x00) - "pavgb %[ftmp0], %[ftmp0], %[ftmp6] \n\t" - MMI_SDC1(%[ftmp0], %[dst], 0x00) - "addi %[h], %[h], -0x01 \n\t" - PTR_ADDI "%[tmp], %[tmp], 0x30 \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - "bnez %[h], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [ftmp10]"=&f"(ftmp[10]), - [tmp0]"=&r"(tmp0), - RESTRICT_ASM_ALL64 - [tmp]"+&r"(tmp), [dst]"+&r"(dst), - [h]"+&r"(h) - : [dstStride]"r"((mips_reg)dstStride) - : "memory" - ); - - tmp += 8 - size * 24; - dst += 8 - size * dstStride; - } while (w--); -} - -static void avg_h264_qpel8or16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride, int size) -{ - put_h264_qpel8or16_hv1_lowpass_mmi(tmp, src, tmpStride, srcStride, size); - avg_h264_qpel8or16_hv2_lowpass_mmi(dst, tmp, dstStride, tmpStride, size); -} - -static void avg_h264_qpel8_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride) -{ - avg_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride, - srcStride, 8); -} - -static void avg_h264_qpel16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp, - const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t tmpStride, - ptrdiff_t srcStride) -{ - avg_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride, - srcStride, 16); -} - -static void avg_h264_qpel8_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src, - const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride) -{ - double ftmp[10]; - uint64_t tmp[2]; - DECLARE_VAR_LOW32; - DECLARE_VAR_ALL64; - - __asm__ volatile ( - "dli %[tmp1], 0x02 \n\t" - "ori %[tmp0], $0, 0x8 \n\t" - "mtc1 %[tmp1], %[ftmp7] \n\t" - "dli %[tmp1], 0x05 \n\t" - "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" - "mtc1 %[tmp1], %[ftmp8] \n\t" - "1: \n\t" - MMI_ULDC1(%[ftmp1], %[src], 0x00) - MMI_ULDC1(%[ftmp2], %[src], 0x01) - "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" - "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" - "punpckhbh %[ftmp4], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" - "psllh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" - "psllh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" - MMI_ULDC1(%[ftmp2], %[src], -0x01) - MMI_ULDC1(%[ftmp5], %[src], 0x02) - "punpckhbh %[ftmp4], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpckhbh %[ftmp6], %[ftmp5], %[ftmp0] \n\t" - "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" - "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" - "psubh %[ftmp1], %[ftmp1], %[ftmp2] \n\t" - "psubh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" - "pmullh %[ftmp1], %[ftmp1], %[ff_pw_5] \n\t" - "pmullh %[ftmp3], %[ftmp3], %[ff_pw_5] \n\t" - MMI_ULWC1(%[ftmp2], %[src], -0x02) - MMI_ULWC1(%[ftmp6], %[src], 0x07) - "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" - "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" - "paddh %[ftmp2], %[ftmp2], %[ff_pw_16] \n\t" - "paddh %[ftmp5], %[ftmp5], %[ff_pw_16] \n\t" - "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t" - "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" - "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp8] \n\t" - MMI_LDC1(%[ftmp5], %[src2], 0x00) - "packushb %[ftmp1], %[ftmp1], %[ftmp3] \n\t" - MMI_LDC1(%[ftmp9], %[dst], 0x00) - "pavgb %[ftmp1], %[ftmp1], %[ftmp5] \n\t" - "pavgb %[ftmp1], %[ftmp1], %[ftmp9] \n\t" - PTR_ADDU "%[src], %[src], %[dstStride] \n\t" - MMI_SDC1(%[ftmp1], %[dst], 0x00) - "daddi %[tmp0], %[tmp0], -0x01 \n\t" - PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t" - PTR_ADDU "%[src2], %[src2], %[src2Stride] \n\t" - "bgtz %[tmp0], 1b \n\t" - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), - [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]), - RESTRICT_ASM_LOW32 - RESTRICT_ASM_ALL64 - [dst]"+&r"(dst), [src]"+&r"(src), - [src2]"+&r"(src2) - : [dstStride]"r"((mips_reg)dstStride), - [src2Stride]"r"((mips_reg)src2Stride), - [ff_pw_5]"f"(ff_pw_5.f), [ff_pw_16]"f"(ff_pw_16.f) - : "memory" - ); -} - -static void avg_h264_qpel16_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src, - const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride) -{ - avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride); - avg_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride, - src2Stride); - - src += 8 * dstStride; - dst += 8 * dstStride; - src2 += 8 * src2Stride; - - avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride); - avg_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride, - src2Stride); -} - -static void avg_pixels8_l2_shift5_mmi(uint8_t *dst, int16_t *src16, - const uint8_t *src8, ptrdiff_t dstStride, ptrdiff_t src8Stride, int b) -{ - double ftmp[8]; - uint64_t tmp0; - DECLARE_VAR_ALL64; - DECLARE_VAR_ADDRT; - - do { - __asm__ volatile ( - "dli %[tmp0], 0x05 \n\t" - MMI_ULDC1(%[ftmp0], %[src16], 0x00) - "mtc1 %[tmp0], %[ftmp6] \n\t" - MMI_ULDC1(%[ftmp1], %[src16], 0x08) - MMI_ULDC1(%[ftmp2], %[src16], 0x30) - MMI_ULDC1(%[ftmp3], %[src16], 0x38) - "psrah %[ftmp0], %[ftmp0], %[ftmp6] \n\t" - "psrah %[ftmp1], %[ftmp1], %[ftmp6] \n\t" - "psrah %[ftmp2], %[ftmp2], %[ftmp6] \n\t" - "psrah %[ftmp3], %[ftmp3], %[ftmp6] \n\t" - "packushb %[ftmp0], %[ftmp0], %[ftmp1] \n\t" - MMI_LDC1(%[ftmp4], %[src8], 0x00) - MMI_LDXC1(%[ftmp5], %[src8], %[src8Stride], 0x00) - "packushb %[ftmp2], %[ftmp2], %[ftmp3] \n\t" - "pavgb %[ftmp0], %[ftmp0], %[ftmp4] \n\t" - "pavgb %[ftmp2], %[ftmp2], %[ftmp5] \n\t" - MMI_LDC1(%[ftmp7], %[dst], 0x00) - "pavgb %[ftmp0], %[ftmp0], %[ftmp7] \n\t" - MMI_SDC1(%[ftmp0], %[dst], 0x00) - MMI_LDXC1(%[ftmp7], %[dst], %[dstStride], 0x00) - "pavgb %[ftmp2], %[ftmp2], %[ftmp7] \n\t" - MMI_SDXC1(%[ftmp2], %[dst], %[dstStride], 0x00) - : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), - [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), - [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), - [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), - RESTRICT_ASM_ALL64 - RESTRICT_ASM_ADDRT - [tmp0]"=&r"(tmp0) - : [src8]"r"(src8), [src16]"r"(src16), - [dst]"r"(dst), - [src8Stride]"r"((mips_reg)src8Stride), - [dstStride]"r"((mips_reg)dstStride) - : "memory" - ); - - src8 += 2 * src8Stride; - src16 += 48; - dst += 2 * dstStride; - } while (b -= 2); -} - -static void avg_pixels16_l2_shift5_mmi(uint8_t *dst, int16_t *src16, - const uint8_t *src8, ptrdiff_t dstStride, ptrdiff_t src8Stride, int b) -{ - avg_pixels8_l2_shift5_mmi(dst, src16, src8, dstStride, src8Stride, b); - avg_pixels8_l2_shift5_mmi(dst + 8, src16 + 8, src8 + 8, dstStride, - src8Stride, b); -} - -//DEF_H264_MC_MMI(put_, 4) -void ff_put_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_put_pixels4_8_mmi(dst, src, stride, 4); -} - -void ff_put_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[16]; - put_h264_qpel4_h_lowpass_mmi(half, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, src, half, stride, stride, 4, 4); -} - -void ff_put_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - put_h264_qpel4_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_put_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[16]; - put_h264_qpel4_h_lowpass_mmi(half, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, src+1, half, stride, stride, 4, 4); -} - -void ff_put_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t half[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(half, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, full_mid, half, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(dst, full_mid, stride, 4); -} - -void ff_put_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t half[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(half, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, full_mid+4, half, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - put_h264_qpel4_hv_lowpass_mmi(dst, src, stride, stride); -} - -void ff_put_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t halfH[16]; - uint8_t halfHV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t halfH[16]; - uint8_t halfHV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfV[16]; - uint8_t halfHV[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4); -} - -void ff_put_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfV[16]; - uint8_t halfHV[16]; - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_put_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4); -} - -//DEF_H264_MC_MMI(avg_, 4) -void ff_avg_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_avg_pixels4_8_mmi(dst, src, stride, 4); -} - -void ff_avg_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[16]; - put_h264_qpel4_h_lowpass_mmi(half, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, src, half, stride, stride, 4, 4); -} - -void ff_avg_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - avg_h264_qpel4_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_avg_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[16]; - put_h264_qpel4_h_lowpass_mmi(half, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, src+1, half, stride, stride, 4, 4); -} - -void ff_avg_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t half[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(half, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, full_mid, half, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - avg_h264_qpel4_v_lowpass_mmi(dst, full_mid, stride, 4); -} - -void ff_avg_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t half[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(half, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, full_mid+4, half, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfH[16]; - uint8_t halfV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - avg_h264_qpel4_hv_lowpass_mmi(dst, src, stride, stride); -} - -void ff_avg_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t halfH[16]; - uint8_t halfHV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src, 4, stride); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t halfH[16]; - uint8_t halfHV[16]; - put_h264_qpel4_h_lowpass_mmi(halfH, src + stride, 4, stride); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfV[16]; - uint8_t halfHV[16]; - copy_block4_mmi(full, src - stride*2, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4); -} - -void ff_avg_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[36]; - uint8_t * const full_mid= full + 8; - uint8_t halfV[16]; - uint8_t halfHV[16]; - copy_block4_mmi(full, src - stride*2 + 1, 4, stride, 9); - put_h264_qpel4_v_lowpass_mmi(halfV, full_mid, 4, 4); - put_h264_qpel4_hv_lowpass_mmi(halfHV, src, 4, stride); - ff_avg_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4); -} - -//DEF_H264_MC_MMI(put_, 8) -void ff_put_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_put_pixels8_8_mmi(dst, src, stride, 8); -} - -void ff_put_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[64]; - put_h264_qpel8_h_lowpass_mmi(half, src, 8, stride); - ff_put_pixels8_l2_8_mmi(dst, src, half, stride, stride, 8, 8); -} - -void ff_put_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - put_h264_qpel8_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_put_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[64]; - put_h264_qpel8_h_lowpass_mmi(half, src, 8, stride); - ff_put_pixels8_l2_8_mmi(dst, src+1, half, stride, stride, 8, 8); -} - -void ff_put_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t half[64]; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(half, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, full_mid, half, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(dst, full_mid, stride, 8); -} - -void ff_put_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t half[64]; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(half, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, full_mid+8, half, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src, 8, stride); - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src, 8, stride); - copy_block8_mmi(full, src - stride*2 + 1, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src + stride, 8, stride); - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src + stride, 8, stride); - copy_block8_mmi(full, src - stride*2 + 1, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_put_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint16_t __attribute__ ((aligned(8))) temp[192]; - - put_h264_qpel8_hv_lowpass_mmi(dst, temp, src, stride, 8, stride); -} - -void ff_put_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - put_h264_qpel8_h_lowpass_l2_mmi(dst, src, halfHV, stride, 8); -} - -void ff_put_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - put_h264_qpel8_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 8); -} - -void ff_put_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - put_pixels8_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 8, 8); -} - -void ff_put_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - put_pixels8_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 8, 8); -} - -//DEF_H264_MC_MMI(avg_, 8) -void ff_avg_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_avg_pixels8_8_mmi(dst, src, stride, 8); -} - -void ff_avg_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[64]; - put_h264_qpel8_h_lowpass_mmi(half, src, 8, stride); - ff_avg_pixels8_l2_8_mmi(dst, src, half, stride, stride, 8, 8); -} - -void ff_avg_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - avg_h264_qpel8_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_avg_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[64]; - put_h264_qpel8_h_lowpass_mmi(half, src, 8, stride); - ff_avg_pixels8_l2_8_mmi(dst, src+1, half, stride, stride, 8, 8); -} - -void ff_avg_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t half[64]; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(half, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, full_mid, half, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - avg_h264_qpel8_v_lowpass_mmi(dst, full_mid, stride, 8); -} - -void ff_avg_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t half[64]; - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(half, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, full_mid+8, half, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src, 8, stride); - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src, 8, stride); - copy_block8_mmi(full, src - stride*2 + 1, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src + stride, 8, stride); - copy_block8_mmi(full, src - stride*2, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[104]; - uint8_t * const full_mid= full + 16; - uint8_t halfH[64]; - uint8_t halfV[64]; - put_h264_qpel8_h_lowpass_mmi(halfH, src + stride, 8, stride); - copy_block8_mmi(full, src - stride*2 + 1, 8, stride, 13); - put_h264_qpel8_v_lowpass_mmi(halfV, full_mid, 8, 8); - ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8); -} - -void ff_avg_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint16_t __attribute__ ((aligned(8))) temp[192]; - - avg_h264_qpel8_hv_lowpass_mmi(dst, temp, src, stride, 8, stride); -} - -void ff_avg_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, halfHV, stride, 8); -} - -void ff_avg_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - avg_h264_qpel8_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 8); -} - -void ff_avg_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - avg_pixels8_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 8, 8); -} - -void ff_avg_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[448]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 64); - - put_h264_qpel8_hv_lowpass_mmi(halfHV, halfV, src, 8, 8, stride); - avg_pixels8_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 8, 8); -} - -//DEF_H264_MC_MMI(put_, 16) -void ff_put_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_put_pixels16_8_mmi(dst, src, stride, 16); -} - -void ff_put_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[256]; - put_h264_qpel16_h_lowpass_mmi(half, src, 16, stride); - ff_put_pixels16_l2_8_mmi(dst, src, half, stride, stride, 16, 16); -} - -void ff_put_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - put_h264_qpel16_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_put_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[256]; - put_h264_qpel16_h_lowpass_mmi(half, src, 16, stride); - ff_put_pixels16_l2_8_mmi(dst, src+1, half, stride, stride, 16, 16); -} - -void ff_put_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t half[256]; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(half, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, full_mid, half, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(dst, full_mid, stride, 16); -} - -void ff_put_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t half[256]; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(half, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, full_mid+16, half, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src, 16, stride); - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src, 16, stride); - copy_block16_mmi(full, src - stride*2 + 1, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src + stride, 16, stride); - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src + stride, 16, stride); - copy_block16_mmi(full, src - stride*2 + 1, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_put_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint16_t __attribute__ ((aligned(8))) temp[384]; - - put_h264_qpel16_hv_lowpass_mmi(dst, temp, src, stride, 16, stride); -} - -void ff_put_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - put_h264_qpel16_h_lowpass_l2_mmi(dst, src, halfHV, stride, 16); -} - -void ff_put_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - put_h264_qpel16_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 16); -} - -void ff_put_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - put_pixels16_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 16, 16); -} - -void ff_put_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - put_pixels16_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 16, 16); -} - -//DEF_H264_MC_MMI(avg_, 16) -void ff_avg_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - ff_avg_pixels16_8_mmi(dst, src, stride, 16); -} - -void ff_avg_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[256]; - put_h264_qpel16_h_lowpass_mmi(half, src, 16, stride); - ff_avg_pixels16_l2_8_mmi(dst, src, half, stride, stride, 16, 16); -} - -void ff_avg_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - avg_h264_qpel16_h_lowpass_mmi(dst, src, stride, stride); -} - -void ff_avg_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t half[256]; - put_h264_qpel16_h_lowpass_mmi(half, src, 16, stride); - ff_avg_pixels16_l2_8_mmi(dst, src+1, half, stride, stride, 16, 16); -} - -void ff_avg_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t half[256]; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(half, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, full_mid, half, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - avg_h264_qpel16_v_lowpass_mmi(dst, full_mid, stride, 16); -} - -void ff_avg_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t half[256]; - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(half, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, full_mid+16, half, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src, 16, stride); - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src, 16, stride); - copy_block16_mmi(full, src - stride*2 + 1, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src + stride, 16, stride); - copy_block16_mmi(full, src - stride*2, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t full[336]; - uint8_t * const full_mid= full + 32; - uint8_t halfH[256]; - uint8_t halfV[256]; - put_h264_qpel16_h_lowpass_mmi(halfH, src + stride, 16, stride); - copy_block16_mmi(full, src - stride*2 + 1, 16, stride, 21); - put_h264_qpel16_v_lowpass_mmi(halfV, full_mid, 16, 16); - ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16); -} - -void ff_avg_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint16_t __attribute__ ((aligned(8))) temp[384]; - - avg_h264_qpel16_hv_lowpass_mmi(dst, temp, src, stride, 16, stride); -} - -void ff_avg_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - avg_h264_qpel16_h_lowpass_l2_mmi(dst, src, halfHV, stride, 16); -} - -void ff_avg_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - avg_h264_qpel16_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 16); -} - -void ff_avg_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - avg_pixels16_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 16, 16); -} - -void ff_avg_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride) -{ - uint8_t __attribute__ ((aligned(8))) temp[1024]; - uint8_t *const halfHV = temp; - int16_t *const halfV = (int16_t *) (temp + 256); - - put_h264_qpel16_hv_lowpass_mmi(halfHV, halfV, src, 16, 16, stride); - avg_pixels16_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 16, 16); -} - -#undef op2_avg -#undef op2_put diff --git a/spaces/congsaPfin/Manga-OCR/logs/Beach Buggy Racing Mod Apk How to Download and Install from MediaFire.md b/spaces/congsaPfin/Manga-OCR/logs/Beach Buggy Racing Mod Apk How to Download and Install from MediaFire.md deleted file mode 100644 index a8aaea6643d60e114581a82f522f80d75e7058f4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Beach Buggy Racing Mod Apk How to Download and Install from MediaFire.md +++ /dev/null @@ -1,86 +0,0 @@ - -

      Download Beach Buggy Racing Mod APK Mediafıre: A Fun and Exciting Racing Game for Android

      -

      Do you love racing games? Do you want to experience the thrill of driving a buggy on a sandy beach? If yes, then you should try Beach Buggy Racing, a popular racing game for Android devices. In this game, you can race against a variety of opponents, use power-ups and weapons to sabotage them, and customize your own buggy and character. But what if you want to enjoy the game without any limitations or restrictions? Well, you can do that by downloading Beach Buggy Racing mod apk mediafıre, a modified version of the game that gives you unlimited money, gems, and access to all the features of the game. In this article, we will tell you more about Beach Buggy Racing, why you should download its mod apk mediafıre, and how to do it easily.

      -

      What is Beach Buggy Racing?

      -

      Beach Buggy Racing is a racing game developed by Vector Unit, the same studio that created Riptide GP and Shine Runner. It is a sequel to the original Beach Buggy Blitz, but with more features and improvements. In this game, you can drive your buggy across different tracks, such as beaches, jungles, volcanoes, and swamps. You can also compete with other racers, both offline and online, and use various power-ups and weapons to gain an edge over them. Some of the power-ups include fireballs, oil slicks, rockets, and lightning bolts. You can also collect coins and gems along the way, which you can use to upgrade your buggy or buy new ones. There are over 25 different buggies to choose from, each with its own stats and abilities. You can also customize your character with different outfits and accessories.

      -

      download beach buggy racing mod apk mediafıre


      Download Ziphttps://urlca.com/2uO5NB



      -

      Features of Beach Buggy Racing

      -

      Amazing graphics and sound effects

      -

      One of the best things about Beach Buggy Racing is its graphics and sound effects. The game has colorful and detailed environments that make you feel like you are really on a beach. The buggies are also well-designed and realistic, with different shapes and sizes. The sound effects are also immersive and fun, with engine noises, screeching tires, explosions, and cheers from the crowd.

      -

      Various modes and challenges

      -

      Another great thing about Beach Buggy Racing is its variety of modes and challenges. The game has six different modes to choose from: Race, Quick Race, Championship, Daily Challenge, Weekly Tournament, and Boss Fight. Each mode has its own rules and objectives, such as finishing first, beating a certain time, or defeating a boss. The game also has over 15 different tracks to race on, each with its own obstacles and shortcuts. You can also unlock new tracks by completing certain achievements or challenges.

      -

      Customizable vehicles and characters

      -

      A third great thing about Beach Buggy Racing is its customization options. As we mentioned earlier, you can collect coins and gems in the game, which you can use to buy new buggies or upgrade your existing ones. You can also change the color of your buggy or add stickers to it. Moreover, you can customize your character with different outfits and accessories, such as hats, sunglasses, helmets, or masks. You can also unlock new characters by winning races or completing challenges. You can also choose from 12 different drivers, each with their own personality and special ability.

      -

      Multiplayer and online options

      -

      A fourth great thing about Beach Buggy Racing is its multiplayer and online options. The game supports up to four players on the same device, using split-screen mode. You can also play with up to six players online, using Wi-Fi or Bluetooth. You can join or create your own lobby, chat with other players, and race against them in real time. You can also compete in weekly tournaments and leaderboards, and earn trophies and rewards.

      -

      Why download Beach Buggy Racing mod apk mediafıre?

      -

      Now that you know what Beach Buggy Racing is and what it offers, you might be wondering why you should download its mod apk mediafıre. Well, the answer is simple: because it makes the game more fun and enjoyable. By downloading Beach Buggy Racing mod apk mediafıre, you can get unlimited money, gems, and access to all the features of the game. This means that you can buy and upgrade any buggy you want, customize your character as you like, and unlock all the tracks and modes. You can also play the game without any ads or root requirements. In short, you can have the ultimate racing experience on your Android device.

      -

      Benefits of Beach Buggy Racing mod apk mediafıre

      -

      Unlimited money and gems

      -

      The first benefit of Beach Buggy Racing mod apk mediafıre is that it gives you unlimited money and gems. Money and gems are the main currencies in the game, which you can use to buy new buggies or upgrade your existing ones. You can also use them to customize your character or buy power-ups and weapons. However, earning money and gems in the game can be slow and tedious, especially if you want to buy the more expensive items. That's why downloading Beach Buggy Racing mod apk mediafıre is a good idea, because it gives you unlimited money and gems from the start. You don't have to worry about running out of them or grinding for them. You can just spend them as much as you want and enjoy the game.

      -

      All vehicles and characters unlocked

      -

      The second benefit of Beach Buggy Racing mod apk mediafıre is that it unlocks all the vehicles and characters in the game. As we mentioned earlier, there are over 25 different buggies to choose from, each with its own stats and abilities. There are also 12 different drivers, each with their own personality and special ability. However, not all of them are available from the beginning. You have to unlock them by winning races or completing challenges. This can take a lot of time and effort, especially if you want to unlock the more powerful or rare ones. That's why downloading Beach Buggy Racing mod apk mediafıre is a good idea, because it unlocks all the vehicles and characters from the start. You don't have to wait or work for them. You can just select any of them and start racing.

      -

      download beach buggy racing mod apk unlimited money
      -download beach buggy racing mod apk latest version
      -download beach buggy racing mod apk android 1
      -download beach buggy racing mod apk rexdl
      -download beach buggy racing mod apk revdl
      -download beach buggy racing mod apk hack
      -download beach buggy racing mod apk offline
      -download beach buggy racing mod apk 2021
      -download beach buggy racing mod apk free shopping
      -download beach buggy racing mod apk for pc
      -download beach buggy racing mod apk data
      -download beach buggy racing mod apk obb
      -download beach buggy racing mod apk unlimited gems
      -download beach buggy racing mod apk unlocked all cars
      -download beach buggy racing mod apk no ads
      -download beach buggy racing mod apk andropalace
      -download beach buggy racing mod apk android oyun club
      -download beach buggy racing mod apk all unlocked
      -download beach buggy racing mod apk apkpure
      -download beach buggy racing mod apk aptoide
      -download beach buggy racing mod apk android republic
      -download beach buggy racing mod apk by an1.com[^1^]
      -download beach buggy racing mod apk blackmod
      -download beach buggy racing mod apk coins and gems
      -download beach buggy racing mod apk cheat
      -download beach buggy racing mod apk cracked
      -download beach buggy racing mod apk diamond
      -download beach buggy racing mod apk everything unlocked
      -download beach buggy racing mod apk full version
      -download beach buggy racing mod apk file
      -download beach buggy racing mod apk gamestechy
      -download beach buggy racing mod apk google drive
      -download beach buggy racing mod apk happymod
      -download beach buggy racing mod apk highly compressed
      -download beach buggy racing mod apk ios
      -download beach buggy racing mod apk lenov.ru
      -download beach buggy racing mod apk mega
      -download beach buggy racing mod apk money and gems
      -download beach buggy racing mod apk new version
      -download beach buggy racing mod apk old version
      -download beach buggy racing mod apk original
      -download beach buggy racing mod apk pro
      -download beach buggy racing mod apk pure
      -download beach buggy racing mod apk unlimited everything
      -download beach buggy racing mod apk unlimited coins and gems 2021
      -download beach buggy racing mod apk unlimited coins and diamonds
      -download beach buggy racing mod apk vip
      -download beach buggy racing mod apk with obb
      -download beach buggy racing mod apk zip

      -

      No ads and no root required

      -

      The third benefit of Beach Buggy Racing mod apk mediafıre is that it removes all the ads and root requirements from the game. Ads are annoying and distracting, especially when they pop up in the middle of a race or a menu screen. They can also slow down your device or consume your data. Root requirements are also inconvenient and risky, especially if you don't know how to do it properly or safely. They can also void your warranty or damage your device. That's why downloading Beach Buggy Racing mod apk mediafıre is a good idea, because it removes all the ads and root requirements from the game. You don't have to deal with them or worry about them. You can just enjoy the game without any interruptions or complications.

      -

      How to download and install Beach Buggy Racing mod apk mediafıre

      -

      Now that you know why you should download Beach Buggy Racing mod apk mediafıre, you might be wondering how to do it easily. Well, don't worry, because we will tell you how to do it step by step.

      -

      Step 1: Download the mod apk file from the link below

      -

      The first step is to download the mod apk file from the link below. This is a safe and secure link that will direct you to the mediafıre website, where you can download the file without any problems or viruses.

      - [Download Beach Buggy Racing Mod APK Mediafıre]

      Step 2: Enable unknown sources on your device settings

      -

      The second step is to enable unknown sources on your device settings. This is a necessary step to allow your device to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the option and confirm your choice.

      -

      Step 3: Install the mod apk file and enjoy the game

      -

      The third and final step is to install the mod apk file and enjoy the game. To do this, go to your file manager, then locate the mod apk file that you downloaded. Tap on it and follow the instructions to install it. Once the installation is done, you can open the game and start playing. You will see that you have unlimited money, gems, and access to all the features of the game.

      -

      Conclusion

      -

      Beach Buggy Racing is a fun and exciting racing game for Android devices that lets you drive a buggy on a sandy beach and compete with other racers. It has amazing graphics, sound effects, modes, challenges, vehicles, characters, power-ups, and weapons. However, if you want to enjoy the game without any limitations or restrictions, you should download Beach Buggy Racing mod apk mediafıre, a modified version of the game that gives you unlimited money, gems, and access to all the features of the game. You can also play the game without any ads or root requirements. To download Beach Buggy Racing mod apk mediafıre, you just need to follow three simple steps: download the mod apk file from the link below, enable unknown sources on your device settings, and install the mod apk file and enjoy the game. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.

      - FAQs Q: Is Beach Buggy Racing mod apk mediafıre safe to download and install? A: Yes, Beach Buggy Racing mod apk mediafıre is safe to download and install. It does not contain any viruses or malware that can harm your device or data. It is also compatible with most Android devices and versions. Q: Do I need to uninstall the original Beach Buggy Racing game before installing the mod apk mediafıre? A: No, you do not need to uninstall the original Beach Buggy Racing game before installing the mod apk mediafıre. You can keep both versions on your device and play them separately. Q: Will I get banned from playing online if I use Beach Buggy Racing mod apk mediafıre? A: No, you will not get banned from playing online if you use Beach Buggy Racing mod apk mediafıre. The mod apk mediafıre does not interfere with the online servers or features of the game. You can still play with other players online without any problems. Q: Can I update Beach Buggy Racing mod apk mediafıre when a new version of the game is released? A: Yes, you can update Beach Buggy Racing mod apk mediafıre when a new version of the game is released. However, you will need to download and install the new mod apk mediafıre file from the same link below. You cannot update it from the Google Play Store or any other source. Q: Where can I find more mod apk mediafıre files for other games? A: You can find more mod apk mediafıre files for other games on our website [link]. We have a large collection of mod apk mediafıre files for various games and apps that you can download and enjoy for free.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Cleo Sol - Know That You Are Loved A Song of Love and Hope in MP3.md b/spaces/congsaPfin/Manga-OCR/logs/Cleo Sol - Know That You Are Loved A Song of Love and Hope in MP3.md deleted file mode 100644 index edbd7dec8f5eef3d99cc6460bf0d32ede39838ca..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Cleo Sol - Know That You Are Loved A Song of Love and Hope in MP3.md +++ /dev/null @@ -1,111 +0,0 @@ - -

      Cleo Sol Know That You Are Loved MP3 Download Fakaza

      -

      Are you looking for a way to download Cleo Sol Know That You Are Loved MP3 for free? If yes, then you are in the right place. In this article, we will show you how to get this beautiful song by the talented British singer-songwriter Cleo Sol in MP3 format using two different methods. We will also tell you more about the artist, the song, and why it is so popular in South Africa. So, without further ado, let's get started.

      -

      Introduction

      -

      Who is Cleo Sol?

      -

      Cleo Sol is a British singer-songwriter who was born in London to a Jamaican father and a Serbian mother. She grew up listening to various genres of music, such as soul, jazz, reggae, and hip-hop. She started singing at a young age and joined a gospel choir when she was 10 years old. She also learned to play the piano and guitar.

      -

      cleo sol know that you are loved mp3 download fakaza


      DOWNLOAD –––––>>> https://urlca.com/2uO7Nt



      -

      Cleo Sol released her debut EP Winter Songs in 2017, followed by her first album Rose in the Dark in 2020. She is also a member of the musical collective Sault, which has released four critically acclaimed albums since 2019. She has collaborated with artists such as Devlin, Wretch 32, GoldLink, Little Simz, and Mahalia.

      -

      What is the song Know That You Are Loved about?

      -

      Know That You Are Loved is the eleventh track on Cleo Sol's second album Mother, which was released on August 20, 2021. The song is a soothing and uplifting message of self-love and acceptance. It reassures the listener that they are loved, even if they don't love themselves. It promotes self-esteem and emphasizes the importance of unconditional love from others.

      -

      The song has a simple but catchy melody, with Cleo Sol's soulful voice singing over a gentle piano accompaniment. The lyrics are repetitive but powerful, conveying a sense of warmth and comfort. The song is suitable for relaxing, meditating, or healing.

      -

      Why is the song popular in South Africa?

      -

      The song Know That You Are Loved has become popular in South Africa for several reasons. First of all, Cleo Sol has a strong fan base in the country, as she has performed there several times and has received positive feedback from local audiences. She has also expressed her love and appreciation for South African culture and music on social media.

      -

      Secondly, the song resonates with many South Africans who are facing challenges and hardships in their lives. The song offers them hope and encouragement, reminding them that they are not alone and that they have value and worth. The song also celebrates diversity and unity, as it appeals to people from different backgrounds and experiences.

      -

      Thirdly, the song has been widely shared and streamed on various platforms, such as YouTube, Spotify, Apple Music, and Fakaza. Fakaza is a popular South African music website that provides free downloads of songs from various genres, such as hip-hop, R&B, house, gospel, and more. Many South Africans use Fakaza to discover new

      How to download Cleo Sol Know That You Are Loved MP3 for free?

      -

      If you want to download Cleo Sol Know That You Are Loved MP3 for free, you have two options. You can either use a YouTube to MP3 converter or a Fakaza music downloader. Both methods are easy and fast, and we will explain them in detail below.

      -

      Option 1: Use YouTube to MP3 converter

      -

      A YouTube to MP3 converter is a tool that allows you to convert any YouTube video into an MP3 file. You can use it to download any song from YouTube, including Cleo Sol Know That You Are Loved. Here are the steps you need to follow:

      -

      cleo sol know that you are loved lyrics
      -cleo sol know that you are loved youtube
      -cleo sol know that you are loved shazam
      -cleo sol know that you are loved album
      -cleo sol know that you are loved spotify
      -cleo sol know that you are loved apple music
      -cleo sol know that you are loved soundcloud
      -cleo sol know that you are loved mp3 free download
      -cleo sol know that you are loved mp3 320kbps
      -cleo sol know that you are loved mp3 skull
      -cleo sol know that you are loved mp3 juice
      -cleo sol know that you are loved mp3 direct
      -cleo sol know that you are loved mp3 paw
      -cleo sol know that you are loved mp3 quack
      -cleo sol know that you are loved mp3 downloader
      -cleo sol know that you are loved mp4 download
      -cleo sol know that you are loved video download
      -cleo sol know that you are loved song download
      -cleo sol know that you are loved audio download
      -cleo sol know that you are loved ringtone download
      -cleo sol know that you are loved instrumental download
      -cleo sol know that you are loved karaoke download
      -cleo sol know that you are loved remix download
      -cleo sol know that you are loved cover download
      -cleo sol know that you are loved live performance download
      -cleo sol know that you are loved fakaza music download
      -cleo sol know that you are loved fakaza house download
      -cleo sol know that you are loved fakaza gospel download
      -cleo sol know that you are loved fakaza amapiano download
      -cleo sol know that you are loved fakaza gqom download
      -cleo sol know that you are loved fakaza afro house download
      -cleo sol know that you are loved fakaza hip hop download
      -cleo sol know that you are loved fakaza r&b download
      -cleo sol know that you are loved fakaza soulful house download
      -cleo sol know that you are loved fakaza deep house download
      -cleo sol know that you are loved fakaza kwaito download
      -cleo sol know that you are loved fakaza maskandi download
      -cleo sol know that you are loved fakaza bolo house download
      -cleo sol know that you are loved fakaza zamusic download
      -cleo sol know that you are loved fakaza flexyjam download
      -cleo sol know that you are loved fakaza hiphopza download
      -cleo sol know that you are loved fakaza sahiphopmag download
      -cleo sol know that you are loved fakaza hitvibes download
      -cleo sol know that you are loved fakaza zamob download
      -cleo sol know that you are loved fakaza waploaded download
      -cleo sol know that you are loved fakaza tubidy download

      -

      Step 1: Copy the YouTube URL of the song

      -

      First, you need to find the official video of Cleo Sol Know That You Are Loved on YouTube. You can use the search bar or click on this link: https://www.youtube.com/watch?v=QxQZw0f9cZk. Once you have found the video, copy its URL from the address bar of your browser.

      -

      Step 2: Paste the URL into the converter box

      -

      Next, you need to open a YouTube to MP3 converter website. There are many websites that offer this service, such as ytmp3.cc, y2mate.com, flvto.biz, and more. You can choose any of them, but make sure they are safe and reliable. Once you have opened the website, paste the URL you copied into the converter box.

      -

      Step 3: Choose the MP3 format and quality

      -

      After pasting the URL, you need to choose the output format and quality of your file. Most websites will automatically select MP3 as the default format, but you can also choose other formats, such as M4A, WAV, or FLAC. You can also adjust the quality of your file, depending on your preference and device. The higher the quality, the larger the file size.

      -

      Step 4: Click on the download button and save the file

      -

      Finally, you need to click on the download button and wait for the conversion process to finish. This may take a few seconds or minutes, depending on the length of the video and the speed of your internet connection. Once the conversion is done, you will see a download link or button on the website. Click on it and save the file to your device. You can then enjoy listening to Cleo Sol Know That You Are Loved MP3 for free.

      -

      Option 2: Use Fakaza music downloader

      -

      An alternative way to download Cleo Sol Know That You Are Loved MP3 for free is to use a Fakaza music downloader. This is a tool that allows you to download any song from Fakaza website, which is a popular South African music platform. Here are the steps you need to follow:

      -

      Step 1: Search for Cleo Sol Know That You Are Loved on Fakaza website

      -

      First, you need to open Fakaza website on your browser. The website address is https://fakaza.com/. Once you have opened the website, use the search bar to look for Cleo Sol Know That You Are Loved. You can also browse through different categories and genres of music on the website.

      -

      Step 2: Click on the download link of the song

      -

      Next, you need to find the download link of Cleo Sol Know That You Are Loved on Fakaza website. You will see a list of results that match your search query. Click on the one that has Cleo Sol Know That You Are Loved as the title and artist name. You will be redirected to a new page that has more information about the song, such as album name, release date, genre, and more.

      -

      On this page, you will also see a download link or button that says "Download MP3" or something similar. Click on it and wait for the download process to start.

      -

      Step 3: Wait for the download to complete and enjoy the music

      -

      Finally, you need to wait for the download to complete and save the file to your device. This may take a few seconds or minutes, depending on your internet speed and file size. Once the download is done, you will have Cleo Sol Know That You Are Loved MP3 on your device. You can then enjoy listening to it for free.

      -

      Conclusion

      -

      Summary of the main points

      -

      In conclusion, we have shown you how to download Cleo Sol Know That You Are Loved MP3 for free using two different methods: a YouTube to MP3 converter and a Fakaza music downloader. We have also told you more about the artist, the song, and why it is so popular in South Africa. We hope you found this article helpful and informative.

      -

      Call to action and recommendation

      -

      If you liked this article, please share it with your friends and family who might also enjoy listening to Cleo Sol Know That You Are Loved MP3 for free. You can also leave a comment below and let us know what you think of the song and the artist. We would love to hear from you.

      -

      We also recommend you to check out Cleo Sol's other songs and albums, as well as the music of Sault, the collective she is part of. You will find more amazing songs that will inspire you and make you feel good. You can also follow Cleo Sol on her social media accounts, such as Instagram, Twitter, and Facebook, to stay updated on her latest news and releases.

      -

      Thank you for reading this article and we hope you have a great day. Remember, you are loved.

      -

      FAQs

      -

      Here are some frequently asked questions about Cleo Sol Know That You Are Loved MP3 download Fakaza:

      -

      Q: Is it legal to download Cleo Sol Know That You Are Loved MP3 for free?

      -

      A: It depends on the source and the country you are in. Some sources, such as YouTube and Fakaza, may not have the proper licenses or permissions to distribute the song for free. This may violate the copyright laws and the terms of service of the platforms. Therefore, we advise you to use these methods at your own risk and discretion.

      -

      Alternatively, you can support the artist by buying or streaming the song legally from official platforms, such as iTunes, Amazon Music, Spotify, Apple Music, and more. This way, you can enjoy the song without any worries and also help the artist earn revenue and recognition.

      -

      Q: How can I listen to Cleo Sol Know That You Are Loved MP3 offline?

      -

      A: If you want to listen to Cleo Sol Know That You Are Loved MP3 offline, you need to download it to your device first. You can use either of the methods we described above: a YouTube to MP3 converter or a Fakaza music downloader. Once you have downloaded the file, you can transfer it to your preferred device, such as your phone, tablet, laptop, or MP3 player. You can then listen to it anytime and anywhere without an internet connection.

      -

      Q: What are some other songs by Cleo Sol that I should listen to?

      -

      A: Cleo Sol has many other songs that are worth listening to. Some of her most popular songs are:

      -
        -
      • Why Don't You
      • -
      • Rose in the Dark
      • -
      • Butterfly
      • -
      • Shine
      • -
      • Sweet Blue
      • -
      -

      You can also listen to her albums Winter Songs and Rose in the Dark, as well as the albums by Sault, such as Untitled (Black Is), Untitled (Rise), Nine, and Five.

      -

      Q: How can I contact Cleo Sol or send her feedback?

      -

      A: If you want to contact Cleo Sol or send her feedback, you can do so through her social media accounts. She has an Instagram account (@cleosol), a Twitter account (@cleosol), and a Facebook page (Cleo Sol). You can also visit her official website (https://cleosol.com/) for more information.

      -

      Q: Where can I find more articles like this one?

      -

      A: If you enjoyed this article and want to read more articles like this one, you can visit our website (https://example.com/). We have a lot of articles on various topics, such as music, entertainment, lifestyle, technology, and more. You can also subscribe to our newsletter or follow us on our social media accounts to get notified of our latest updates.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Dragon Ball Z Legends MOD APK - The Ultimate DBZ Game with Unlimited Money.md b/spaces/congsaPfin/Manga-OCR/logs/Dragon Ball Z Legends MOD APK - The Ultimate DBZ Game with Unlimited Money.md deleted file mode 100644 index 04008cfb3daf9db6bb79677337451f8997067e90..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Dragon Ball Z Legends MOD APK - The Ultimate DBZ Game with Unlimited Money.md +++ /dev/null @@ -1,131 +0,0 @@ -
      -

      Dragon Ball Z Mod APK (Unlimited Money) - Download and Play Now!

      -

      If you are a fan of anime and manga, you must have heard of Dragon Ball Z, one of the most popular and influential series of all time. Dragon Ball Z is not only a thrilling adventure story, but also a fun and addictive game that you can play on your Android device. And if you want to make the game even more exciting, you can try the Dragon Ball Z Mod APK, which gives you unlimited money and other perks. In this article, we will tell you everything you need to know about Dragon Ball Z and its modded version, as well as how to download and install it on your device.

      -

      What is Dragon Ball Z?

      -

      Dragon Ball Z is a Japanese anime and manga series created by Akira Toriyama. It is a sequel to the original Dragon Ball series, which follows the adventures of Goku, a young boy who trains in martial arts and searches for the seven mystical orbs known as the Dragon Balls. Dragon Ball Z picks up five years after the end of Dragon Ball, when Goku is married and has a son named Gohan. He soon learns that he is not an ordinary human, but a member of an alien race called the Saiyans, who are sent to conquer planets for an evil emperor named Frieza. Goku and his friends must fight against Frieza and other powerful enemies, such as Cell, Majin Buu, and Beerus, to protect the Earth and the universe.

      -

      dragon ball z mod apk (unlimited money)


      Download File ►►► https://urlca.com/2uOc6M



      -

      The story and characters of Dragon Ball Z

      -

      The story of Dragon Ball Z spans over 291 episodes, divided into nine sagas: Saiyan Saga, Frieza Saga, Garlic Jr. Saga, Androids Saga, Cell Saga, Great Saiyaman Saga, World Tournament Saga, Majin Buu Saga, and Peaceful World Saga. Each saga has its own plot, villains, battles, and transformations. Some of the most memorable moments in the series are Goku's first Super Saiyan transformation, Gohan's defeat of Cell, Vegeta's sacrifice against Majin Buu, and Goku's final battle with Kid Buu.

      -

      The characters of Dragon Ball Z are also diverse and colorful. Besides Goku, who is the main protagonist and hero of the series, there are his friends and allies, such as Krillin, Piccolo, Yamcha, Tien Shinhan, Chiaotzu, Master Roshi, Bulma, Chi-Chi, Gohan, Goten, Trunks, Android 18, Videl, Mr. Satan, Majin Buu, Dende, Mr. Popo, King Kai, Supreme Kai, Elder Kai, Whis, Beerus, and more. There are also his enemies and rivals, such as Raditz, Nappa, Vegeta (who later becomes an ally), Frieza (who later becomes an ally), King Cold, Cooler (Frieza's brother), Dr. Gero (the creator of the androids), Androids 16-20 (including Cell), Babidi (the wizard who awakens Majin Buu), Dabura (the king of the demon realm), Majin Buu (who later becomes an ally), Broly (the legendary Super Saiyan), Janemba (a demon who distorts reality), Hirudegarn (a giant monster who destroys planets), Omega Shenron (the ultimate evil dragon), and more.

      -

      The gameplay and features of Dragon Ball Z

      -

      Dragon Ball Z is not only a great anime and manga series but also a great game that you can play on your Android device. There are many versions of Dragon Ball Z games available on

      the Google Play Store, but some of the most popular and recommended ones are:

      -

      DRAGON BALL LEGENDS

      -

      This is the ultimate anime action RPG game that puts the power of your favorite DRAGON BALL heroes in your hands. You can enjoy epic 3D visuals and animations, as well as intuitive fighting controls and simple card-based strategic gameplay. You can also discover a new original story based on the mysterious Saiyan known as Shallot, and join him and your favorite DRAGON BALL characters to save the world. You can also enter live PVP battles with players from across the globe, and earn rating points and rewards. This game is free to download and play, but it offers in-app purchases for some items and features.

      -

      DRAGON BALL Z DOKKAN BATTLE

      -

      This is another anime action RPG game that lets you explore the world of DRAGON BALL in a unique way. You can collect and train over 400 characters from the series, and create your own dream team for the battles ahead. You can also experience the classic DRAGON BALL sagas, and relive the thrilling moments and fights. The gameplay is based on solving puzzles with colored balls that represent different attacks and powers. You can also join forces with other players in co-op missions and events. This game is also free to download and play, but it offers in-app purchases as well.

      -

      Dragon Ball: Tap Battle

      -

      This is a fun and addictive game that focuses on close-quarters combat. You can tap the screen to perform various moves and combos, and unleash your super attacks. The graphics are great, and the animations are smooth and faithful to the anime. You can choose from many characters, such as Goku, Vegeta, Gohan, Piccolo, Frieza, Cell, Majin Buu, and more. You can also play with your friends in multiplayer mode via Bluetooth or Wi-Fi. This game is not available on the Google Play Store, but you can download it from external sources.

      -

      Dragon Ball: Attack On Saiyan

      -

      This is a retro-style RPG game that follows the story of the Saiyan Saga, the first arc of DRAGON BALL Z. You can control Goku, Krillin, Piccolo, Yamcha, Tien Shinhan, Chiaotzu, or Gohan, and fight against Raditz, Nappa, Vegeta, and other enemies. You can also explore different locations, such as Kame House, Kami's Lookout, King Kai's Planet, Namek, and more. The gameplay is similar to classic JRPGs, with turn-based battles and leveling up systems. This game is also not available on the Google Play Store, but you can download it from external sources.

      -

      Dragon Ball Super Card Game Walkthrough

      -

      This is not a game per se, but a guide for the Dragon Ball Super Card Game, a physical card game based on the DRAGON BALL series. You can learn how to play the game, how to build your deck, how to use different cards and strategies, and how to win against your opponents. You can also watch videos of gameplay tutorials and tips from experts. This app is free to download and use.

      -

      dragon ball z dokkan battle mod apk unlimited money
      -dragon ball z kakarot mod apk unlimited money
      -dragon ball z legends mod apk unlimited money
      -dragon ball z shin budokai mod apk unlimited money
      -dragon ball z tenkaichi tag team mod apk unlimited money
      -dragon ball z budokai 3 mod apk unlimited money
      -dragon ball z xenoverse 2 mod apk unlimited money
      -dragon ball z fighterz mod apk unlimited money
      -dragon ball z ultimate tenkaichi mod apk unlimited money
      -dragon ball z budokai tenkaichi 3 mod apk unlimited money
      -dragon ball z super goku battle mod apk unlimited money[^1^]
      -dragon ball z burst limit mod apk unlimited money
      -dragon ball z infinite world mod apk unlimited money
      -dragon ball z sagas mod apk unlimited money
      -dragon ball z budokai hd collection mod apk unlimited money
      -dragon ball z raging blast 2 mod apk unlimited money
      -dragon ball z battle of gods mod apk unlimited money
      -dragon ball z budokai af mod apk unlimited money
      -dragon ball z budokai 2 mod apk unlimited money
      -dragon ball z supersonic warriors mod apk unlimited money
      -dragon ball z the legacy of goku 2 mod apk unlimited money
      -dragon ball z attack of the saiyans mod apk unlimited money
      -dragon ball z budokai tenkaichi 4 mod apk unlimited money
      -dragon ball z hyper dimension mod apk unlimited money
      -dragon ball z ultimate battle 22 mod apk unlimited money
      -dragon ball z final stand mod apk unlimited money
      -dragon ball z buu's fury mod apk unlimited money
      -dragon ball z the legacy of goku mod apk unlimited money
      -dragon ball z taiketsu mod apk unlimited money
      -dragon ball z legendary super warriors mod apk unlimited money
      -dragon ball z super butouden 3 mod apk unlimited money
      -dragon ball z super saiyan power warriors mod apk unlimited money
      -dragon ball z super android 13 mod apk unlimited money
      -dragon ball z super butouden 2 mod apk unlimited money
      -dragon ball z super gokuden 2 mod apk unlimited money
      -dragon ball z super saiyan densetsu mod apk unlimited money
      -dragon ball z super gokuden kakusei hen mod apk unlimited money
      -dragon ball z super saiya densetsu remake mod apk unlimited money
      -dragon ball z super butouden 1 mod apk unlimited money
      -dragon ball z super gokuden totsugeki hen mod apk unlimited money

      -

      Saiyan Fighter - Dragon Battle

      -

      This is a simple but entertaining game that lets you control a Saiyan warrior and fight against various enemies. You can move around the screen with a joystick, and tap buttons to punch, kick, fly, or use your ki blasts. You can also transform into different Super Saiyan forms when your energy bar is full. The graphics are decent, and the sound effects are satisfying. You can choose from different modes, such as story mode, survival mode, or versus mode. This game is free to download and play.

      -

      battle of super saiyan

      -

      This is another game that lets you control a Saiyan fighter and battle against various foes. You can swipe the screen to move around, and tap buttons to attack, defend, or use your special skills. You can also collect coins and items to upgrade your power and unlock new characters. The graphics are simple, but the gameplay is fast and fun. You can play in different modes, such as arcade mode, story mode, or endless mode. This game is free to download and play.

      -

      What is Dragon Ball Z Mod APK?

      -

      Dragon Ball Z Mod APK is a modified version of the original Dragon Ball Z games that gives you unlimited money and other advantages. With Dragon Ball Z Mod APK, you can enjoy the game without any limitations or restrictions. You can buy anything you want, unlock any character you like, and access any feature you need. You can also get extra bonuses, such as unlimited health, unlimited energy, unlimited ki, unlimited coins, unlimited crystals, unlimited dragon stones, and more. Dragon Ball Z Mod APK is a great way to enhance your gaming experience and have more fun with Dragon Ball Z.

      -

      The benefits of using Dragon Ball Z Mod APK

      -

      There are many benefits of using Dragon Ball Z Mod APK, such as:

      -
        -
      • You can save your time and money. You don't have to spend hours grinding or farming for resources or currency. You don't have to spend real money to buy in-app purchases or premium items. You can get everything you want for free with Dragon Ball Z Mod APK.
      • -
      • You can explore the game fully. You don't have to wait for energy to refill or levels to unlock. You don't have to complete quests or missions to progress. You can play any mode, any saga, any character, and any stage with Dragon Ball Z Mod APK.
      • -
      • You can customize the game to your liking. You don't have to follow the default settings or options. You can adjust the difficulty, the speed, the graphics, the sound, and the controls with Dragon Ball Z Mod APK.
      • -
      • You can dominate the game easily. You don't have to worry about losing or failing. You don't have to face any challenge or competition. You can defeat any enemy, win any battle, and complete any task with Dragon Ball Z Mod APK.
      • -
      -

      The risks of using Dragon Ball Z Mod APK

      -

      However, there are also some risks of using Dragon Ball Z Mod APK, such as:

      -
        -
      • You may face legal issues. Using Dragon Ball Z Mod APK is against the terms and conditions of the original game developers and publishers. You may violate their intellectual property rights and copyrights. You may also face legal actions or penalties from them if they find out that you are using Dragon Ball Z Mod APK.
      • -
      • You may damage your device. Downloading and installing Dragon Ball Z Mod APK from unknown or untrusted sources may expose your device to viruses, malware, spyware, or other harmful software. These may corrupt your files, steal your data, drain your battery, or damage your hardware.
      • -
      • You may lose your account. Using Dragon Ball Z Mod APK may cause your account to be banned or suspended by the original game servers. You may lose your progress, your achievements, your rewards, and your friends. You may also lose access to the official updates, patches, events, and features of the original game.
      • -
      • You may ruin your enjoyment. Using Dragon Ball Z Mod APK may make the game too easy or boring for you. You may lose the sense of achievement, challenge, fun, and excitement that comes from playing the game normally. You may also miss out on the original story, content, and quality of the game.
      • -
      -

      How to download and install Dragon Ball Z Mod APK?

      -

      If you still want to try Dragon Ball Z Mod APK despite the risks involved, here are the steps that you need to follow:

      -

      The steps to download and install Dragon Ball Z Mod APK

      -
        -
      1. First, you need to find a reliable and safe source to download Dragon Ball Z Mod APK. You can search online for various websites or blogs that offer the modded version of the game. However, you need to be careful and check the reviews, ratings, and comments of other users before downloading anything. You also need to make sure that the modded version is compatible with your device and the original game version.
      2. -
      3. Second, you need to enable the installation of apps from unknown sources on your device. To do this, you need to go to your device settings, then security, then unknown sources, and then toggle it on. This will allow you to install apps that are not from the Google Play Store.
      4. -
      5. Third, you need to download Dragon Ball Z Mod APK from the source that you have chosen. You can use your browser or a download manager app to do this. You need to wait for the download to finish, and then locate the file on your device storage.
      6. -
      7. Fourth, you need to install Dragon Ball Z Mod APK on your device. To do this, you need to tap on the file that you have downloaded, and then follow the instructions on the screen. You may need to grant some permissions or accept some terms and conditions before the installation is complete.
      8. -
      9. Fifth, you need to launch Dragon Ball Z Mod APK on your device. To do this, you need to find the app icon on your home screen or app drawer, and then tap on it. You may need to allow some access or enable some features before the game starts.
      10. -
      -

      The tips to enjoy Dragon Ball Z Mod APK safely and smoothly

      -

      Here are some tips that you can follow to enjoy Dragon Ball Z Mod APK without any problems or issues:

      -
        -
      • Make sure that you have enough storage space and battery life on your device before downloading and installing Dragon Ball Z Mod APK. You don't want to run out of space or power in the middle of the process.
      • -
      • Make sure that you have a stable and fast internet connection before downloading and playing Dragon Ball Z Mod APK. You don't want to experience any lag or interruption while enjoying the game.
      • -
      • Make sure that you have a backup of your original game data before using Dragon Ball Z Mod APK. You don't want to lose your progress or achievements if something goes wrong with the modded version.
      • -
      • Make sure that you update your original game and your modded version regularly. You don't want to miss out on any new features, fixes, or improvements that the developers may release.
      • -
      • Make sure that you use Dragon Ball Z Mod APK at your own risk and discretion. You don't want to get into any trouble with the law or the game developers for using an unauthorized modification of their product.
      • -
      -

      Conclusion

      -

      Dragon Ball Z is an amazing anime and manga series that has inspired many games for Android devices. If you are a fan of Dragon Ball Z, you may want to try Dragon Ball Z Mod APK, which gives you unlimited money and other benefits. However, you also need to be aware of the risks and challenges of using Dragon Ball Z Mod APK, and follow the steps and tips that we have provided in this article. We hope that this article has helped you learn more about Dragon Ball Z and its modded version, as well as how to download and install it on your device.

      -

      Summary of the main points

      -

      In summary, here are the main points that we have covered in this article:

      -
        -
      • Dragon Ball Z is a popular and influential anime and manga series that follows the adventures of Goku and his friends as they fight against various enemies to protect the Earth and the universe.
      • -
      • Dragon Ball Z has many games for Android devices, such as DRAGON BALL LEGENDS, DRAGON BALL Z DOKKAN BATTLE, Dragon Ball: Tap Battle, Dragon Ball: Attack On Saiyan, Dragon Ball Super Card Game Walkthrough, Saiyan Fighter - Dragon Battle, and battle of super saiyan.
      • -
      • Dragon Ball Z Mod APK is a modified version of the original Dragon Ball Z games that gives you unlimited money and other advantages, such as unlimited health, unlimited energy, unlimited ki, unlimited coins, unlimited crystals, unlimited dragon stones, and more.
      • -
      • To download and install Dragon Ball Z Mod APK on your device, you need to find a reliable and safe source, enable the installation of apps from unknown sources on your device, download the modded version of the game, and install it on your device.
      • -
      • To enjoy Dragon Ball Z Mod APK safely and smoothly, you need to have enough storage space and battery life on your device, have a stable and fast internet connection, have a backup of your original game data, update your original game and your modded version regularly, and use Dragon Ball Z Mod APK at your own risk and discretion.
      • -
      -

      Call to action and recommendation

      -

      Now that you have learned everything you need to know about Dragon Ball Z and its modded version, what are you waiting for? Download and install Dragon Ball Z Mod APK on your device today and enjoy the ultimate anime action RPG game with unlimited money and other perks. You will surely have a blast with Dragon Ball Z Mod APK.

      -

      However, if you are not comfortable with using Dragon Ball Z Mod APK, or if you want to support the original game developers and publishers, we recommend that you play the official Dragon Ball Z games instead. You can still have a lot of fun and excitement with the original games, and you can also avoid any legal or technical issues that may arise from using Dragon Ball Z Mod APK.

      -

      Whatever you choose, we hope that you enjoy playing Dragon Ball Z on your Android device. Thank you for reading this article, and have a great day!

      -

      FAQs

      -

      Here are some frequently asked questions about Dragon Ball Z and its modded version:

      -

      Q: Is Dragon Ball Z Mod APK safe to use?

      -

      A: Dragon Ball Z Mod APK is not officially endorsed or supported by the original game developers and publishers. It may contain viruses, malware, spyware, or other harmful software that may damage your device or steal your data. It may also cause your account to be banned or suspended by the original game servers. Therefore, using Dragon Ball Z Mod APK is not safe or recommended.

      -

      Q: Is Dragon Ball Z Mod APK free to download and play?

      -

      A: Yes, Dragon Ball Z Mod APK is free to download and play. However, it may offer in-app purchases for some items or features that are not available in the modded version. You may also need to pay for the data or internet charges that may incur from downloading and playing Dragon Ball Z Mod APK.

      -

      Q: What are the best sources to download Dragon Ball Z Mod APK?

      -

      A: There are many websites or blogs that offer Dragon Ball Z Mod APK for download. However, not all of them are reliable or safe. You need to check the reviews, ratings, and comments of other users before downloading anything. You also need to make sure that the modded version is compatible with your device and the original game version.

      -

      Q: How can I update my Dragon Ball Z Mod APK?

      -

      A: You can update your Dragon Ball Z Mod APK by downloading and installing the latest version from the same source that you have used before. However, you need to be careful and check if the new version is working properly and does not contain any bugs or errors. You also need to backup your game data before updating your Dragon Ball Z Mod APK.

      -

      Q: Can I play online or offline with Dragon Ball Z Mod APK?

      -

      A: You can play online or offline with Dragon Ball Z Mod APK depending on the game mode that you choose. Some modes require an internet connection to access the game servers or other players, while some modes can be played offline without any internet connection. However, playing online with Dragon Ball Z Mod APK may increase the risk of getting detected or banned by the original game servers.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Garten of Banban para PC Descarga y Juega con GameLoop Oficial.md b/spaces/congsaPfin/Manga-OCR/logs/Garten of Banban para PC Descarga y Juega con GameLoop Oficial.md deleted file mode 100644 index 26ff32281f99845e5091efdfefd80ff769d0fb21..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Garten of Banban para PC Descarga y Juega con GameLoop Oficial.md +++ /dev/null @@ -1,124 +0,0 @@ -
      -

      How to Download Garten of Banban 1 PC

      -

      Garten of Banban is a horror adventure game that takes you to a mysterious kindergarten where you have to uncover the truth behind the disappearance of the children and staff. The game features a cartoony style, a flying companion, and a lot of scares. If you are looking for a thrilling and immersive experience, you might want to download Garten of Banban 1 PC and play it on your computer. Here are some ways you can do that:

      -

      download garten of banban 1 pc


      Download Zip » https://urlca.com/2uO6pA



      -

      Download Garten of Banban 1 PC from Steam

      -

      One of the easiest and most convenient ways to download Garten of Banban 1 PC is to get it from Steam, the popular digital distribution platform for games. Steam offers a lot of benefits, such as automatic updates, cloud saves, achievements, and community features. To download Garten of Banban 1 PC from Steam, you need to follow these steps:

      -
        -
      1. Create a Steam account if you don't have one already. You can do that by visiting https://store.steampowered.com/join/ and following the instructions.
      2. -
      3. Download and install the Steam client on your PC. You can do that by visiting https://store.steampowered.com/about/ and clicking on the "Install Steam" button.
      4. -
      5. Launch the Steam client and log in with your account.
      6. -
      7. Search for "Garten of Banban" in the Steam store or visit https://store.steampowered.com/app/2232840/Garten_of_Banban/.
      8. -
      9. Click on the "Add to Cart" button and proceed to checkout.
      10. -
      11. Once you have purchased the game, it will be added to your library.
      12. -
      13. Click on the "Library" tab and select "Garten of Banban" from the list of games.
      14. -
      15. Click on the "Install" button and wait for the game to download and install on your PC.
      16. -
      17. Once the installation is complete, click on the "Play" button and enjoy the game!
      18. -
      -

      Download Garten of Banban 1 PC from BlueStacks

      -

      Another way to download Garten of Banban 1 PC is to use BlueStacks, an emulator that allows you to run Android games and apps on your PC. BlueStacks offers a lot of features, such as keyboard and mouse controls, multi-instance, macros, high FPS, and more. To download Garten of Banban 1 PC from BlueStacks, you need to follow these steps:

      -
        -
      1. Download and install BlueStacks on your PC. You can do that by visiting https://www.bluestacks.com/ and clicking on the "Download BlueStacks" button.
      2. -
      3. Launch BlueStacks and log in with your Google account.
      4. -
      5. Search for "Garten of Banban" in the BlueStacks app center or visit https://www.bluestacks.com/apps/adventure/garten-of-banban-on-pc.html.
      6. -
      7. Click on the "Install" button and wait for the game to download and install on your PC.
      8. -
      9. Once the installation is complete, click on the "Play" button and enjoy the game!
      10. -
      -

      Download Garten of Banban 1 PC from YouTube

      -

      A third way to download Garten of Banban 1 PC is to use YouTube, the popular video-sharing platform. YouTube has a lot of videos that show you how to download and play Garten of Banban 1 PC. However, you need to be careful when using this method, as some videos might contain malware or viruses that can harm your PC. To download Garten of Banban 1 PC from YouTube, you need to follow these steps:

      -
        -
      1. Visit https://www.youtube.com/ and search for "Garten of Banban 1 PC".
      2. -
      3. Look for a video that has a lot of views, likes, and positive comments. For example, you can check out this video: https://www.youtube.com/watch?v =8wXxqJQ9s0.
      4. -
      5. Watch the video carefully and follow the instructions. You might need to download some additional software or files, such as WinRAR, 7-Zip, or uTorrent.
      6. -
      7. Make sure you scan the downloaded files with an antivirus program before opening them.
      8. -
      9. Extract the files and run the setup.exe file to install the game on your PC.
      10. -
      11. Once the installation is complete, launch the game and enjoy!
      12. -
      -

      Comparison Table of Download Methods

      -

      To help you decide which method to use, here is a comparison table of the three download methods for Garten of Banban 1 PC:

      - - - - - - - - - - - - - - - - - - - - - -
      MethodProsCons
      Steam- Easy and convenient
      - Automatic updates
      - Cloud saves
      - Achievements
      - Community features
      - Requires a Steam account
      - Costs $9.99
      BlueStacks- Free
      - Keyboard and mouse controls
      - Multi-instance
      - Macros
      - High FPS
      - Requires a Google account
      - Might have compatibility issues
      - Might consume more resources
      YouTube- Free
      - Might have some tips and tricks
      - Risky and unreliable
      - Might contain malware or viruses
      - Might have low quality or outdated files
      - Might have legal issues
      -

      Conclusion

      -

      Garten of Banban 1 PC is a horror adventure game that will keep you on the edge of your seat. If you want to download and play it on your computer, you have three options: Steam, BlueStacks, or YouTube. Each method has its own advantages and disadvantages, so you need to weigh them carefully before choosing one. Whichever method you choose, make sure you have a good PC and a brave heart to face the horrors of the kindergarten. Have fun and good luck!

      -

      download garten of banban 1 pc free
      -download garten of banban 1 pc steam
      -download garten of banban 1 pc bluestacks
      -download garten of banban 1 pc gameloop
      -download garten of banban 1 pc full version
      -download garten of banban 1 pc horror game
      -download garten of banban 1 pc adventure game
      -download garten of banban 1 pc euphoric brothers
      -download garten of banban 1 pc android emulator
      -download garten of banban 1 pc gameplay
      -download garten of banban 1 pc walkthrough
      -download garten of banban 1 pc review
      -download garten of banban 1 pc trailer
      -download garten of banban 1 pc system requirements
      -download garten of banban 1 pc cheats
      -download garten of banban 1 pc mods
      -download garten of banban 1 pc tips and tricks
      -download garten of banban 1 pc online multiplayer
      -download garten of banban 1 pc offline mode
      -download garten of banban 1 pc windows 10
      -download garten of banban 1 pc mac os
      -download garten of banban 1 pc linux
      -download garten of banban 1 pc update
      -download garten of banban 1 pc patch notes
      -download garten of banban 1 pc bug fixes
      -download garten of banban 1 pc new features
      -download garten of banban 1 pc bonus levels
      -download garten of banban 1 pc secrets and easter eggs
      -download garten of banban 1 pc soundtrack
      -download garten of banban 1 pc voice actors
      -download garten of banban 1 pc characters and story
      -download garten of banban 1 pc kindergarten theme
      -download garten of banban 1 pc flying companion
      -download garten of banban 1 pc official mobile game
      -download garten of banban 1 pc fan art and memes
      -download garten of banban 1 pc community and forums
      -download garten of banban 1 pc news and events
      -download garten of banban 1 pc guides and tutorials
      -download garten of banban 1 pc faq and support
      -download garten of banban 1 pc ratings and awards

      -

      Frequently Asked Questions

      -

      What are the system requirements for Garten of Banban 1 PC?

      -

      The minimum system requirements for Garten of Banban 1 PC are:

      -
        -
      • OS: Windows 7/8/10 (64-bit)
      • -
      • Processor: Intel Core i3-4160 or equivalent
      • -
      • Memory: 4 GB RAM
      • -
      • Graphics: NVIDIA GeForce GTX 660 or equivalent
      • -
      • DirectX: Version 11
      • -
      • Storage: 2 GB available space
      • -
      • Sound Card: DirectX compatible sound card
      • -
      -

      How long is Garten of Banban 1 PC?

      -

      Garten of Banban 1 PC is estimated to take about 3 hours to complete, depending on your playstyle and difficulty level.

      -

      Is Garten of Banban 1 PC scary?

      -

      Garten of Banban 1 PC is a horror game that uses jump scares, creepy sounds, dark atmosphere, and disturbing imagery to scare the player. If you are easily frightened or have a weak heart, you might want to avoid playing this game.

      -

      Is Garten of Banban 1 PC multiplayer?

      -

      No, Garten of Banban 1 PC is a single-player game that does not support multiplayer mode.

      -

      Is Garten of Banban 1 PC available for other platforms?

      -

      Garten of Banban 1 PC is also available for Android devices. You can download it from the Google Play Store https://play.google.com/store/apps/details?id=com.banbantech.gartenofbanban&hl=en_US&gl=US.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Outfit7 Presents Talking Tom Gold Run Mod APK The Ultimate Running Adventure with All Characters Unlocked.md b/spaces/congsaPfin/Manga-OCR/logs/Outfit7 Presents Talking Tom Gold Run Mod APK The Ultimate Running Adventure with All Characters Unlocked.md deleted file mode 100644 index 94f805304a93d650e08a66b2965ea071efc80848..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Outfit7 Presents Talking Tom Gold Run Mod APK The Ultimate Running Adventure with All Characters Unlocked.md +++ /dev/null @@ -1,116 +0,0 @@ -
      -

      Talking Tom Gold Run Mod APK Outfit7: A Fun and Exciting Game for All Ages

      -

      If you are looking for a fun and exciting game that will keep you entertained for hours, then you should try Talking Tom Gold Run Mod APK Outfit7. This is a modified version of the popular game Talking Tom Gold Run, which is developed by Outfit7 Limited. In this game, you can join Talking Tom and his friends as they chase after a robber who stole their gold. You can run, jump, slide, and dodge obstacles along the way, while collecting gold bars, gems, dynamites, and other items. You can also use your gold to build your dream home and unlock new worlds and characters.

      -

      talking tom gold run mod apk outfit7


      Download File →→→ https://urlca.com/2uO6Ep



      -

      In this article, we will tell you everything you need to know about Talking Tom Gold Run Mod APK Outfit7, including what it is, what are its benefits, how to download and install it, and whether it is safe and legal to use. We will also answer some frequently asked questions about the game and the mod apk. So, let's get started!

      -

      What is Talking Tom Gold Run?

      -

      Talking Tom Gold Run is a 3D endless runner game that is inspired by games like Subway Surfers and Temple Run. The game features Talking Tom and his friends as they chase after a robber who stole their gold from their vault. The game has different worlds, such as city streets, tropical beaches, snowy mountains, Chinese temples, etc., where you can run and explore. The game also has different characters, such as Talking Angela, Talking Hank, Talking Ginger, etc., each with their own outfits and abilities.

      -

      The gameplay of Talking Tom Gold Run is simple but addictive. You have to swipe left or right to change lanes, swipe up to jump, and swipe down to slide. You can also tap the screen to use tools, such as magnets, helmets, planes, etc., that can help you collect more gold and avoid obstacles. You have to be careful not to crash into cars, buses, trains, walls, or other obstacles, or you will lose the chase. You can also collect boosters, such as chili peppers, rockets, etc., that can make you run faster and catch up with the robber.

      -

      talking tom gold run unlimited money mod apk outfit7
      -talking tom gold run hack mod apk download outfit7
      -talking tom gold run latest version mod apk outfit7
      -talking tom gold run mod apk android 1 outfit7
      -talking tom gold run mod apk revdl outfit7
      -talking tom gold run mod apk rexdl outfit7
      -talking tom gold run mod apk free shopping outfit7
      -talking tom gold run mod apk all characters unlocked outfit7
      -talking tom gold run mod apk unlimited gems and dynamite outfit7
      -talking tom gold run mod apk no ads outfit7
      -talking tom gold run 3d game mod apk outfit7
      -talking tom gold run offline mod apk outfit7
      -talking tom gold run new update mod apk outfit7
      -talking tom gold run 2.9.6.127 mod apk outfit7
      -talking tom gold run 2.7.1.77 mod apk outfit7
      -talking tom gold run 2.5.3.58 mod apk outfit7
      -talking tom gold run 2.4.0.19 mod apk outfit7
      -talking tom gold run 2.3.3.1625 mod apk outfit7
      -talking tom gold run 2.2.0.1519 mod apk outfit7
      -talking tom gold run 2.1.1.1402 mod apk outfit7
      -talking tom gold run 1.9.0.1134 mod apk outfit7
      -talking tom gold run 1.8.0.1056 mod apk outfit7
      -talking tom gold run 1.6.0.46 mod apk outfit7
      -talking tom gold run 1.5.2.668 mod apk outfit7
      -talking tom gold run 1.4.3.683 mod apk outfit7
      -talking tom gold run 1.3.3.443 mod apk outfit7
      -talking tom gold run 1.2.2.360 mod apk outfit7
      -talking tom gold run 1.1.1.116 mod apk outfit7
      -talking tom gold run 1.0.12.892 mod apk outfit7
      -download game talking tom gold run mod apk outfit7
      -download aplikasi talking tom gold run mod apk outfit7
      -cara download talking tom gold run mod apk outfit7
      -como descargar talking tom gold run mod apk outfit7
      -como baixar talking tom gold run mod apk outfit7
      -como instalar talking tom gold run mod apk outfit7
      -how to play talking tom gold run mod apk outfit7
      -how to install talking tom gold run mod apk outfit7
      -how to update talking tom gold run mod apk outfit7
      -how to hack talking tom gold run with lucky patcher no root no survey no human verification no password no offers no jailbreak no computer no generator no activation code no online no app no email no phone number no verification no registration no download required just watch video tutorial on youtube and follow steps carefully and enjoy your free resources in your account instantly and permanently working on all devices and platforms and browsers and operating systems and versions and regions and countries and languages and servers and networks and connections and locations and ip addresses and vpn and proxy and tor browser and incognito mode and private browsing mode and safe mode and guest mode and incognito mode with keyboard shortcuts for windows mac linux android ios iphone ipad ipod touch samsung lg htc sony huawei xiaomi oppo vivo nokia motorola lenovo asus acer dell hp toshiba google pixel oneplus realme honor meizu zte blackberry alcatel blu lava micromax karbonn spice intex panasonic coolpad gionee leeco leeco letv lyf infinix tecno itel innjoo umidigi cubot doogee elephone homtom oukitel ulefone vernee vkworld zopo agm blackview bluboo cat comio energizer essential fairphone fero fly gome hafury hisense hotwav iball infocus inoi jivi jolla kazam keecoo kenxinda kodak koobee kyocera landvo leagoo lephone m-horse mafe maze meitu mobiistar mobistel mtn neffos nomu nubia nuu obi worldphone okwu onn oukitel plum posh mobile prestigio qiku qmobile razer sharp smartisan swipe tcl tecno telefunken timmy turing ulefone unihertz unnecto verykool vodafone wiko wileyfox xgody yezz yota yu zen ziox zte nubia zuk etc.

      -

      The game has a lot of challenges and missions that you can complete to earn rewards and achievements. You can also compete with other players around the world in leaderboards and events. The game is free to play, but you can also purchase in-game currency and items with real money.

      -

      What is Outfit7?

      -

      Outfit7 Limited is the developer of Talking Tom Gold Run and other popular games featuring Talking Tom and his friends. Outfit7 is one of the world's leading mobile game developers and publishers, with over 10 billion downloads and 400 million monthly active users. Outfit7 was founded in 2009 by a group of entrepreneurs who wanted to create fun and engaging apps for people of all ages. Outfit7 is based in Cyprus, with offices in Slovenia, China, UK, and USA.

      -

      Some of the other games that Outfit7 has created are Talking Tom Cat, Talking Angela, My Talking Tom, My Talking Angela, Talking Tom Hero Dash, Talking Tom Candy Run, etc. Outfit7 also produces animated series and videos featuring Talking Tom and his friends on YouTube and other platforms.

      -

      What is a Mod APK?

      -

      A mod apk is a modified version of an original apk file that has been altered by someone to add or remove some features from the original app. An apk file is an Android application package file that contains all the files and code needed to install and run an app on an Android device. A mod apk file can be downloaded from third-party sources that are not affiliated with the original developer or Google Play Store.

      -

      People use mod apk files for various reasons, such as to access premium features for free, to unlock more content, to remove ads or restrictions, to cheat or hack the game, etc. However, using mod apk files also comes with some risks and drawbacks, such as malware infection, compatibility issues, legal issues, etc. Therefore, it is important to be careful and cautious when downloading and installing mod apk files on your device.

      -

      What are the Benefits of Using Talking Tom Gold Run Mod APK Outfit7?

      -

      Unlimited Gold, Gems, and Dynamites

      -

      One of the main benefits of using Talking Tom Gold Run Mod APK Outfit7 is that it gives you unlimited gold bars, gems, and dynamites in the game. These are the main resources that you need to build your dream home and unlock new worlds and items in the game. Normally, you have to collect these resources by running and chasing the robber in the game. However, with the mod apk file, you can get them for free without any limit. This means you can build your house faster and easier, and customize it with different styles and decorations. You can also unlock new characters, outfits, tools, boosters, etc., without spending any money.

      -

      All Characters Unlocked

      -

      Another benefit of using Talking Tom Gold Run Mod APK Outfit7 is that it gives you access to all the characters in the game. Normally, you have to unlock new characters by completing missions, collecting cards, or buying them with gems. However, with the mod apk file, you can play with any character you want, without any restriction. You can choose from Talking Tom, Talking Angela, Talking Hank, Talking Ginger, Talking Ben, and many more. Each character has their own personality, voice, and style. You can also change their outfits and accessories to suit your mood and preference.

      -

      Highest Level of Tools Duration

      -

      A third benefit of using Talking Tom Gold Run Mod APK Outfit7 is that it gives you the highest level of tools duration in the game. Tools are items that you can use to help you run faster and longer in the game. They include magnets, helmets, planes, skateboards, etc. Normally, you have to upgrade your tools with gold bars to increase their duration and effectiveness. However, with the mod apk file, you can get the maximum level of tools duration without spending any gold. This means you can use your tools for a longer time and collect more gold and items along the way.

      -

      No Ads

      -

      A fourth benefit of using Talking Tom Gold Run Mod APK Outfit7 is that it removes all the ads from the game. Ads are annoying and distracting interruptions that can ruin your gaming experience. They can also consume your data and battery life. Normally, you have to watch ads to get some rewards or bonuses in the game. However, with the mod apk file, you can enjoy the game without any ads. This makes the game more enjoyable and less frustrating.

      -

      How to Download and Install Talking Tom Gold Run Mod APK Outfit7?

      -

      Step 1: Download the Mod APK File

      -

      The first step to download and install Talking Tom Gold Run Mod APK Outfit7 is to download the mod apk file from a trusted source. You can use this link to download the latest version of the mod apk file: Talking Tom Gold Run Mod APK Outfit7 Download. The file size is about 90 MB, so make sure you have enough space on your device.

      -

      Step 2: Enable Unknown Sources on Your Device

      -

      The second step to download and install Talking Tom Gold Run Mod APK Outfit7 is to enable unknown sources on your device. This is necessary to allow your device to install apps from sources other than Google Play Store. To do this, follow these steps:

      -
        -
      • Go to your device's settings and look for security or privacy options.
      • -
      • Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on.
      • -
      • A warning message will appear, telling you that installing apps from unknown sources can harm your device. Tap OK or Allow to proceed.
      • -
      -

      Step 3: Locate and Install the Mod APK File

      -

      The third step to download and install Talking Tom Gold Run Mod APK Outfit7 is to locate and install the mod apk file on your device. To do this, follow these steps:

      -
        -
      • Go to your device's file manager and look for the folder where you downloaded the mod apk file.
      • -
      • Tap on the mod apk file and a pop-up window will appear, asking you if you want to install the app.
      • -
      • Tap Install and wait for the installation process to finish.
      • -
      -

      Step 4: Launch the Game and Enjoy

      -

      The fourth and final step to download and install Talking Tom Gold Run Mod APK Outfit7 is to launch the game and enjoy its features. To do this, follow these steps:

      -
        -
      • Go to your device's app drawer and look for the icon of Talking Tom Gold Run.
      • -
      • Tap on the icon and the game will start.
      • -
      • You will see a message that says "Modded by Outfit7" on the loading screen. This means that the mod apk file is working properly.
      • -
      • You can now enjoy unlimited gold, gems, dynamites, all characters unlocked, highest level of tools duration, and no ads in the game.
      • -
      -

      Is Talking Tom Gold Run Mod APK Outfit7 Safe and Legal?

      -

      Many people wonder if using Talking Tom Gold Run Mod APK Outfit7 is safe and legal. The answer is not so simple, as there are some risks and drawbacks involved in using mod apk files. Here are some of them:

      -
        -
      • MALWARE INFECTION: One of the biggest risks of using mod apk files is that they may contain malware or viruses that can harm your device or steal your personal information. This can happen if you download mod apk files from untrusted or malicious sources. Therefore, it is important to download mod apk files from trusted and verified sources, such as the link we provided above.
      • -
      • COMPATIBILITY ISSUES: Another risk of using mod apk files is that they may not be compatible with your device or the latest version of the game. This can happen if the mod apk file is outdated or not optimized for your device. Therefore, it is important to check the compatibility and requirements of the mod apk file before installing it on your device.
      • -
      • LEGAL ISSUES: A third risk of using mod apk files is that they may violate the terms and conditions of the original developer or Google Play Store. This can happen if the mod apk file infringes the intellectual property rights or modifies the original content of the game. Therefore, it is important to respect the rights and wishes of the original developer and use the mod apk file at your own risk and responsibility.
      • -
      -

      As you can see, using Talking Tom Gold Run Mod APK Outfit7 is not entirely safe and legal. However, if you are careful and cautious, you can still enjoy the game with its enhanced features and benefits. Just make sure you download the mod apk file from a trusted source, check its compatibility and requirements, and respect the rights and wishes of the original developer.

      -

      Conclusion

      -

      Talking Tom Gold Run Mod APK Outfit7 is a modified version of the popular game Talking Tom Gold Run, which is developed by Outfit7 Limited. In this game, you can join Talking Tom and his friends as they chase after a robber who stole their gold. You can run, jump, slide, and dodge obstacles along the way, while collecting gold bars, gems, dynamites, and other items. You can also use your gold to build your dream home and unlock new worlds and characters.

      -

      Talking Tom Gold Run Mod APK Outfit7 gives you unlimited gold, gems, dynamites, all characters unlocked, highest level of tools duration, and no ads in the game. This makes the game more fun and exciting for all ages. However, using Talking Tom Gold Run Mod APK Outfit7 also comes with some risks and drawbacks, such as malware infection, compatibility issues, legal issues, etc. Therefore, you should be careful and cautious when downloading and installing the mod apk file on your device.

      -

      If you want to try Talking Tom Gold Run Mod APK Outfit7, you can follow the steps we provided above to download and install it on your device. You can also use this link to download the latest version of the mod apk file: Talking Tom Gold Run Mod APK Outfit7 Download. We hope you enjoy the game and share your feedback with us in the comments section below.

      -

      FAQs

      -

      Here are some frequently asked questions and their answers about Talking Tom Gold Run Mod APK Outfit7:

      -
        -
      • Q: Is Talking Tom Gold Run Mod APK Outfit7 free?
      • -
      • A: Yes, Talking Tom Gold Run Mod APK Outfit7 is free to download and use. However, you may need to pay for some in-game items or features if you want to use them.
      • -
      • Q: Can I play Talking Tom Gold Run Mod APK Outfit7 offline?
      • -
      • A: Yes, you can play Talking Tom Gold Run Mod APK Outfit7 offline without an internet connection. However, you may need to connect to the internet to access some features or updates.
      • -
      • Q: Can I play Talking Tom Gold Run Mod APK Outfit7 with my friends?
      • -
      • A: Yes, you can play Talking Tom Gold Run Mod APK Outfit7 with your friends online or locally. You can also compete with other players around the world in leaderboards and events.
      • -
      • Q: How can I update Talking Tom Gold Run Mod APK Outfit7?
      • -
      • A: You can update Talking Tom Gold Run Mod APK Outfit7 by downloading and installing the latest version of the mod apk file from a trusted source. You can also check for updates in the game settings or notifications.
      • -
      • Q: How can I uninstall Talking Tom Gold Run Mod APK Outfit7?
      • -
      • A: You can uninstall Talking Tom Gold Run Mod APK Outfit7 by going to your device's settings and looking for apps or applications. Then, find Talking Tom Gold Run and tap on it. Then, tap on uninstall and confirm your action.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Play 8 Ball Pool on PC with Stunning Graphics and Realistic Physics.md b/spaces/congsaPfin/Manga-OCR/logs/Play 8 Ball Pool on PC with Stunning Graphics and Realistic Physics.md deleted file mode 100644 index 613eb288b33c654a3b8a635973b6a3ff2dc6a674..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Play 8 Ball Pool on PC with Stunning Graphics and Realistic Physics.md +++ /dev/null @@ -1,142 +0,0 @@ -
      -

      How to Download Free 8 Ball Pool for PC

      -

      If you are a fan of pool games, you might have heard of or played 8 Ball Pool, one of the most popular and addictive pool games available online. But did you know that you can also play this game on your PC for free? In this article, we will show you how to download free 8 ball pool for PC using different methods, as well as the benefits of playing the game on a larger screen and with better controls. We will also share some tips and tricks for improving your skills and winning more matches in the game. So, let's get started!

      -

      What is 8 Ball Pool?

      -

      8 Ball Pool is a pool billiard game that is played on a table with six pockets, cue sticks, and sixteen balls (a cue ball and fifteen object balls). The object balls include seven solid-colored balls numbered 1 through 7, seven striped balls numbered 9 through 15, and the black 8 ball. The game is also known as solids and stripes, spots and stripes, highs and lows, etc.

      -

      download free 8 ball pool for pc


      Download ★★★★★ https://urlca.com/2uOaia



      -

      The objective of the game is to score the 8 ball in a "called" pocket after scoring all the balls from your assigned group (either solids or stripes) in any order. The game can be played in single-player or multiplayer mode, where you can challenge your friends or other players online. You can also customize your cue and table, participate in tournaments, join clubs, earn coins and cash, and rank up on the leaderboard.

      -

      8 Ball Pool is developed by Miniclip, a leading online gaming company that offers hundreds of games across various genres. The game was released in 2010 and has since become one of the most downloaded and played games on mobile devices. The game has over 500 million downloads on Google Play Store and has received positive reviews from critics and players alike. The game is also available on iOS devices , Facebook , and web browsers .

      -

      Why Play 8 Ball Pool on PC?

      -

      While playing 8 Ball Pool on your mobile device is fun and convenient, there are some advantages of playing the game on your PC as well. Here are some of them:

      -
        -
      • You can enjoy a larger screen size that gives you a better view of the table, the balls, and the cues. This can help you aim more accurately and see more details.
      • -
      • You can use a mouse, keyboard, or gamepad to control your cue instead of tapping or swiping on your touchscreen. This can give you more precision and comfort while playing.
      • You can enjoy faster and smoother gameplay with less lag and glitches. You can also adjust the graphics and sound settings to suit your preferences. -
      • You can save your battery life and storage space on your mobile device by playing the game on your PC instead.
      • -
      -

      As you can see, playing 8 Ball Pool on PC can enhance your gaming experience and make it more enjoyable. So, how can you download free 8 ball pool for PC? Let's find out!

      -

      How to Download and Install 8 Ball Pool on PC?

      -

      There are two main methods of downloading and installing 8 Ball Pool on PC: using an Android emulator or using the web/PC version. We will explain both methods in detail below.

      -

      Method 1: Using an Android Emulator

      -

      An Android emulator is a software program that allows you to run Android apps and games on your PC. It creates a virtual Android environment on your PC that mimics the features and functions of an Android device. By using an Android emulator, you can access the Google Play Store and download 8 Ball Pool on your PC just like you would on your mobile device.

      -

      There are many Android emulators available online, but not all of them are compatible with 8 Ball Pool or offer the same performance and quality. Therefore, we recommend you to use one of the following three emulators that have been tested and proven to work well with 8 Ball Pool: BlueStacks, MEmu, and NoxPlayer. Here are the reviews and instructions for each emulator:

      -

      How to download and play 8 ball pool on PC with BlueStacks emulator
      -8 ball pool game: the world's #1 pool game for free online
      -Miniclip 8 ball pool: play with friends and legends on your PC
      -Best tips and tricks for becoming a master pooler in 8 ball pool on PC
      -How to customize your cues and pool tables in 8 ball pool game
      -How to earn coins fast and easy in 8 ball pool on PC with BlueStacks
      -How to join tournaments and win trophies in 8 ball pool game
      -How to use the macro and multi-instance features in BlueStacks for 8 ball pool on PC
      -How to improve your skills and ranking in 8 ball pool game
      -How to challenge your friends or the world in 8 ball pool game
      -How to install and update 8 ball pool on PC with BlueStacks
      -How to play 8 ball pool offline on your PC
      -How to use the smart controls and native gamepad support in BlueStacks for 8 ball pool on PC
      -How to stream your 8 ball pool gameplay on Twitch or YouTube with BlueStacks
      -How to access thousands of productivity apps and tools on your PC with BlueStacks
      -How to play 8 ball pool on Mac with BlueStacks emulator
      -How to fix common issues and errors in 8 ball pool on PC with BlueStacks
      -How to get free gifts and rewards in 8 ball pool game
      -How to use the real-time translation feature in BlueStacks for 8 ball pool on PC
      -How to play different modes and levels in 8 ball pool game
      -How to download and play other sports games on PC with BlueStacks emulator
      -How to create your own club or join an existing one in 8 ball pool game
      -How to chat and communicate with other players in 8 ball pool game
      -How to use the high FPS and high definition graphics features in BlueStacks for 8 ball pool on PC
      -How to transfer your progress and data from mobile to PC or vice versa in 8 ball pool game
      -How to play 8 ball pool with a VPN on your PC with BlueStacks
      -How to use the rerolling feature in BlueStacks for 8 ball pool on PC
      -How to play 8 ball pool with friends on Facebook or Google+
      -How to use the MOBA mode feature in BlueStacks for 8 ball pool on PC
      -How to get unlimited coins and cash in 8 ball pool game (not recommended)
      -How to download and play other Miniclip games on PC with BlueStacks emulator
      -How to use the UTC converter feature in BlueStacks for 8 ball pool on PC
      -How to play 9 ball or snooker mode in 8 ball pool game
      -How to get exclusive cues and items in 8 ball pool game
      -How to use the advanced keymapping feature in BlueStacks for 8 ball pool on PC
      -How to play 8 ball pool on Windows XP, Vista, 7, 8, or 10 with BlueStacks emulator
      -How to get free spins and scratch cards in 8 ball pool game
      -How to use the screen recorder feature in BlueStacks for 8 ball pool on PC
      -How to play mini-games and quests in 8 ball pool game
      -How to download and play other popular games on PC with BlueStacks emulator

      -

      BlueStacks

      -

      BlueStacks is one of the most popular and widely used Android emulators in the world. It has over 400 million users and supports thousands of apps and games, including 8 Ball Pool. It offers a user-friendly interface, high compatibility, fast speed, and advanced features such as keyboard mapping, multi-instance, macro recorder, etc. It also has a dedicated app center where you can find and download the latest and most popular games.

      -

      To download and install 8 Ball Pool using BlueStacks, follow these steps:

      -
        -
      1. Download BlueStacks from its official website and install it on your PC.
      2. -
      3. Launch BlueStacks and sign in with your Google account (or create one if you don't have one).
      4. -
      5. Go to the app center and search for 8 Ball Pool in the search bar.
      6. -
      7. Select 8 Ball Pool from the search results and click on the install button.
      8. -
      9. Wait for the installation to complete and then click on the open button to launch the game.
      10. -
      11. Enjoy playing 8 Ball Pool on your PC with BlueStacks!
      12. -
      -

      MEmu

      -

      MEmu is another excellent Android emulator that is designed for gaming. It has over 100 million users and supports a wide range of apps and games, including 8 Ball Pool. It offers a smooth and stable gameplay, high performance, low CPU usage, and multiple features such as keyboard mapping, gamepad support, screen recording, etc. It also has a built-in app store where you can find and download various games.

      -

      To download and install 8 Ball Pool using MEmu, follow these steps:

      -
        -
      1. Download MEmu from its official website and install it on your PC.
      2. -
      3. Launch MEmu and sign in with your Google account (or create one if you don't have one).
      4. -
      5. Go to the app store and search for 8 Ball Pool in the search bar.
      6. -
      7. Select 8 Ball Pool from the search results and click on the install button.
      8. -
      9. Wait for the installation to complete and then click on the icon to launch the game.
      10. -
      11. Enjoy playing 8 Ball Pool on your PC with MEmu!
      12. -
      -

      NoxPlayer

      -

      NoxPlayer is another great Android emulator that is optimized for gaming. It has over 150 million users and supports a large number of apps and games, including 8 Ball Pool. It offers a fast and smooth gameplay, high compatibility, low memory usage, and various features such as keyboard mapping, gamepad support, screen capture, etc. It also has a built-in Google Play Store where you can find and download any game you want.

      -

      To download and install 8 Ball Pool using NoxPlayer, follow these steps:

      -
        -
      1. Download NoxPlayer from its official website and install it on your PC.
      2. Launch NoxPlayer and sign in with your Google account (or create one if you don't have one). -
      3. Go to the Google Play Store and search for 8 Ball Pool in the search bar.
      4. -
      5. Select 8 Ball Pool from the search results and click on the install button.
      6. -
      7. Wait for the installation to complete and then click on the icon to launch the game.
      8. -
      9. Enjoy playing 8 Ball Pool on your PC with NoxPlayer!
      10. -
      -

      Method 2: Using the Web/PC Version

      -

      If you don't want to use an Android emulator, you can also play 8 Ball Pool on your PC using the web/PC version. This is a browser-based version of the game that you can access from any PC without downloading or installing anything. You can play the game using your mouse or keyboard, and you can also log in with your Facebook account or Miniclip account to sync your progress and stats.

      -

      There are two websites where you can play 8 Ball Pool on your PC: Miniclip.com and 8ballpool.com. Here are the reviews and instructions for each website:

      -

      Miniclip.com

      -

      Miniclip.com is the official website of Miniclip, the developer of 8 Ball Pool. It offers hundreds of free online games across various genres, including sports, action, puzzle, strategy, etc. You can play 8 Ball Pool on Miniclip.com along with other popular games such as Agar.io, Soccer Stars, Tanki Online, etc. You can also chat with other players, join clubs, participate in tournaments, and earn achievements.

      -

      To play 8 Ball Pool on Miniclip.com, follow these steps:

      -
        -
      1. Go to Miniclip.com from your web browser and click on the games tab.
      2. -
      3. Scroll down and find 8 Ball Pool from the list of games and click on it.
      4. -
      5. Wait for the game to load and then click on the play button.
      6. -
      7. Select your preferred game mode (1-on-1, tournament, practice, etc.) and start playing.
      8. -
      9. Enjoy playing 8 Ball Pool on Miniclip.com!
      10. -
      -

      8ballpool.com

      -

      8ballpool.com is another website where you can play 8 Ball Pool on your PC. It is a dedicated website for the game that offers a simple and easy-to-use interface. You can play the game using your mouse or keyboard, and you can also log in with your Facebook account or Miniclip account to sync your progress and stats. You can also chat with other players, join clubs, participate in tournaments, and earn achievements.

      -

      To play 8 Ball Pool on 8ballpool.com, follow these steps:

      -
        -
      1. Go to 8ballpool.com from your web browser and click on the play button.
      2. -
      3. Select your preferred game mode (1-on-1, tournament, practice, etc.) and start playing.
      4. -
      5. Enjoy playing 8 Ball Pool on 8ballpool.com!
      6. -
      -

      Tips and Tricks for Playing 8 Ball Pool on PC

      -

      Now that you know how to download free 8 ball pool for PC using different methods, you might want to learn some tips and tricks for playing the game better and winning more matches. Here are some of them:

      -
        -
      • Aim carefully and use the guideline to help you line up your shots. You can also adjust the power of your shots by dragging the power bar up or down.
      • Plan your shots ahead and try to clear the balls in a logical order. You can also use the spin feature to control the cue ball and avoid scratching or getting stuck. -
      • Learn the rules and etiquette of the game and follow them. For example, you should always call your pocket before shooting the 8 ball, and you should not quit or forfeit a match unless you have a valid reason.
      • -
      • Practice regularly and play against different opponents with different skill levels. You can also watch replays of your matches or other players' matches to learn from your mistakes or their strategies.
      • -
      • Upgrade your cue and table as you progress in the game. You can use coins and cash to buy new cues and tables that have better attributes such as aim, power, spin, time, etc.
      • -
      • Join a club and play with your friends or other club members. You can also chat with them, share tips, and participate in club tournaments and events.
      • -
      -

      By following these tips and tricks, you can improve your skills and have more fun playing 8 Ball Pool on PC.

      -

      Conclusion

      -

      8 Ball Pool is one of the best pool games that you can play online. It offers a realistic and addictive gameplay, a variety of game modes, a large community of players, and many customization options. You can also play this game on your PC for free using an Android emulator or the web/PC version. Playing 8 Ball Pool on PC can give you a better gaming experience and more advantages than playing on your mobile device.

      -

      So, what are you waiting for? Download free 8 ball pool for PC today and enjoy the thrill of playing pool with millions of players around the world. Whether you are a beginner or a pro, you will find something to love about this game. And don't forget to share this article with your friends who might also be interested in playing 8 Ball Pool on PC!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about 8 Ball Pool and its PC version:

      -
        -
      • Q: Is 8 Ball Pool free to play?
      • -
      • A: Yes, 8 Ball Pool is free to play on any device or platform. However, you can also make in-app purchases to buy coins, cash, cues, tables, etc.
      • -
      • Q: Is 8 Ball Pool safe to play?
      • -
      • A: Yes, 8 Ball Pool is safe to play as long as you download it from a trusted source such as Google Play Store, App Store, Facebook, Miniclip.com, 8ballpool.com, or an official Android emulator. You should also avoid clicking on any suspicious links or ads that might appear in the game or on the websites.
      • -
      • Q: How can I contact the support team of 8 Ball Pool?
      • A: You can contact the support team of 8 Ball Pool by filling out a form on their website or by sending an email to support@miniclip.com. You can also visit their help center or their community forum for more information and assistance. -
      • Q: How can I play 8 Ball Pool offline?
      • -
      • A: You can play 8 Ball Pool offline by using the practice mode or the offline tournament mode. You can access these modes from the main menu of the game. However, you will not be able to earn coins, cash, or achievements while playing offline.
      • -
      • Q: How can I get more coins and cash in 8 Ball Pool?
      • -
      • A: You can get more coins and cash in 8 Ball Pool by winning matches, participating in tournaments, completing missions, spinning the wheel, watching videos, opening boxes, etc. You can also buy coins and cash with real money or use some hacks or cheats, but we do not recommend that as it might get you banned or scammed.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Spirit Bubble Shooter Explore Five Magical Worlds and Pop Bubbles.md b/spaces/congsaPfin/Manga-OCR/logs/Spirit Bubble Shooter Explore Five Magical Worlds and Pop Bubbles.md deleted file mode 100644 index 28f9509d275e931b23e48ccfb0a3355d9e9cff49..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Spirit Bubble Shooter Explore Five Magical Worlds and Pop Bubbles.md +++ /dev/null @@ -1,123 +0,0 @@ - -

      Spirit Bubble Shooter Game Download: How to Play and Enjoy this Fun and Relaxing Game

      -

      Are you looking for a new game to play on your mobile phone or tablet? Do you love popping bubbles and solving puzzles? Do you want to explore mystical worlds and have a blast? If you answered yes to any of these questions, then you should try Spirit Bubble Shooter, a free match 3 bubble game that will keep you entertained for hours. In this article, we will tell you everything you need to know about this game, how to download it, how to play it, and why you should play it. Let's get started!

      -

      What is Spirit Bubble Shooter?

      -

      Spirit Bubble Shooter is a free match 3 bubble game developed by Famobi, a company that specializes in casual games for mobile devices. The game has 50 levels and 5 mystical worlds to explore, each with its own theme and challenges. The game is easy to play, but hard to master, as you will need to use your strategy and logic skills to complete the missions and clear the board.

      -

      spirit bubble shooter game download


      Download File »»» https://urlca.com/2uOfsn



      -

      A free match 3 bubble game with 50 levels and 5 mystical worlds

      -

      The game is based on the classic bubble shooter concept, where you have to combine at least 3 bubbles of the same color to make them explode. The game has 50 levels, each with a different goal and layout. Some levels require you to pop a certain number of bubbles, others require you to free trapped animals or collect special items. The game also has 5 mystical worlds, each with its own theme and background music. You will travel through forests, deserts, oceans, mountains, and space, encountering different creatures and obstacles along the way.

      -

      A game for the whole family and puzzle lovers alike

      -

      Spirit Bubble Shooter is a game that can be enjoyed by anyone, regardless of their age or skill level. The game is suitable for kids, as it has cute graphics, simple controls, and positive feedback. The game is also suitable for adults, as it has challenging puzzles, strategic gameplay, and relaxing sound effects. The game is perfect for puzzle lovers, as it will test your brain and your matching skills while playing this addictive game.

      -

      spirit bubble shooter game free download
      -spirit bubble shooter game online
      -spirit bubble shooter game for android
      -spirit bubble shooter game for pc
      -spirit bubble shooter game apk
      -spirit bubble shooter game app
      -spirit bubble shooter game play store
      -spirit bubble shooter game famobi
      -spirit bubble shooter game mod apk
      -spirit bubble shooter game hack
      -spirit bubble shooter game cheats
      -spirit bubble shooter game tips
      -spirit bubble shooter game tricks
      -spirit bubble shooter game guide
      -spirit bubble shooter game walkthrough
      -spirit bubble shooter game review
      -spirit bubble shooter game rating
      -spirit bubble shooter game levels
      -spirit bubble shooter game worlds
      -spirit bubble shooter game stars
      -spirit bubble shooter game challenges
      -spirit bubble shooter game puzzles
      -spirit bubble shooter game strategy
      -spirit bubble shooter game fun
      -spirit bubble shooter game addictive
      -spirit bubble shooter game relaxing
      -spirit bubble shooter game colorful
      -spirit bubble shooter game mystical
      -spirit bubble shooter game magic
      -spirit bubble shooter game firework
      -spirit bubble shooter game confetti
      -spirit bubble shooter game bonus
      -spirit bubble shooter game coins
      -spirit bubble shooter game boosters
      -spirit bubble shooter game bubbles
      -spirit bubble shooter game balls
      -spirit bubble shooter game pop
      -spirit bubble shooter game blast
      -spirit bubble shooter game match 3
      -spirit bubble shooter game aim and shoot
      -spirit bubble shooter game drag and drop
      -spirit bubble shooter game train your brain
      -spirit bubble shooter game family friendly
      -spirit bubble shooter game puzzle lovers
      -spirit bubble shooter game strategy fans
      -spirit bubble shooter game retro mode
      -spirit bubble shooter game arcade mode
      -spirit bubble shooter game classic mode

      -

      A game that challenges your strategy and logic skills

      -

      Spirit Bubble Shooter is not just a mindless bubble popping game. It is also a game that requires you to think ahead and plan your moves carefully. You will need to aim accurately, shoot strategically, and drop large clusters of bubbles to earn bonus points. You will also need to use your logic and puzzle-solving skills to overcome the obstacles and puzzles that the game presents. Some levels have bubbles that change color, others have bubbles that are frozen or locked, and others have bubbles that are surrounded by rocks or spikes. You will need to use special bubbles and boosters to clear these obstacles and complete the levels.

      -

      How to download Spirit Bubble Shooter?

      -

      Spirit Bubble Shooter is available on Google Play for Android devices. You can download it for free by following these steps:

      -
        -
      1. Open the Google Play app on your device.
      2. -
      3. Search for "Spirit Bubble Shooter" or use this link: [Bubble Spirit - Apps on Google Play](^1^).
      4. -
      5. Tap on the "Install" button and wait for the download to finish.
      6. -
      7. Tap on the "Open" button or find the game icon on your home screen or app drawer and tap on it to launch the game.
      8. -
      -

      The game is easy to install and play, and it does not require a lot of storage space or internet connection. You can play the game offline or online, depending on your preference. However, you should be aware that the game does not provide any information about its data safety or privacy policy, so you should be careful about what information you share with the game or its developers.

      -

      Available on Google Play for Android devices

      -

      Spirit Bubble Shooter is compatible with Android devices that have Android 4.4 or higher. The game has a rating of 4.2 out of 5 stars on Google Play, based on over 2,000 reviews. The game has been downloaded over 100,000 times and has a size of 23 MB. The game is updated regularly with new levels and features, so you can expect to have a fresh and exciting experience every time you play.

      -

      Easy to install and play offline or online

      -

      Spirit Bubble Shooter is a user-friendly game that does not require a lot of technical skills or knowledge to play. The game has simple and intuitive controls, where you just need to tap on the screen to aim and shoot the bubbles. The game also has a tutorial mode that explains the basics of the game and the different types of bubbles and boosters. You can play the game offline or online, depending on your preference. If you play offline, you can still enjoy the game without any interruptions or ads. If you play online, you can connect with your Facebook account and compete with your friends and other players around the world.

      -

      No information about data safety or privacy policy

      -

      One thing that you should be aware of before downloading Spirit Bubble Shooter is that the game does not provide any information about its data safety or privacy policy. The game does not have a link to its terms of service or privacy policy on its Google Play page or within the game itself. The game also does not ask for your permission to access any of your personal data or device features, such as your contacts, camera, microphone, location, etc. This means that you do not know what information the game collects from you, how it uses it, or who it shares it with. Therefore, you should be careful about what information you share with the game or its developers, and avoid clicking on any suspicious links or ads that may appear in the game.

      -

      How to play Spirit Bubble Shooter?

      -

      Spirit Bubble Shooter is a fun and easy game to play, but it also has some challenges and surprises that will keep you hooked. The goal of the game is to clear all the bubbles from the board by matching at least 3 bubbles of the same color. To do this, you need to aim, match and pop bubbles of the same color, complete the level's goal and earn stars and coins, and use special bubbles and boosters to overcome obstacles and puzzles.

      -

      Aim, match and pop bubbles of the same color

      -

      The main mechanic of the game is to aim, match and pop bubbles of the same color. To aim, you need to tap on the screen where you want to shoot the bubble. You will see a dotted line that shows the trajectory of the bubble. You can also bounce the bubble off the walls to reach tricky spots. To match, you need to shoot the bubble towards at least 2 other bubbles of the same color. When you do this, the bubbles will pop and disappear from the board. To pop, you need to clear all the bubbles from the board or reach a certain score within a limited number of moves or time.

      -

      Complete the level's goal and earn stars and coins

      -

      Each level in Spirit Bubble Shooter has a different goal that you need to complete in order to pass it. Some levels require you to pop a certain number of bubbles of a specific color, others require you to free trapped animals or collect special items that are hidden inside some bubbles. You will see the goal at the top of the screen, along with the number of moves or time you have left. You will also see the number of stars you can earn based on your performance. The more bubbles you pop and the fewer moves or time you use, the more stars you will get. You will also earn coins for every level you complete, which you can use to buy more boosters or lives.

      -

      Use special bubbles and boosters to overcome obstacles and puzzles

      -

      Spirit Bubble Shooter is not just a simple bubble popping game. It also has some obstacles and puzzles that will make the game more challenging and interesting. Some levels have bubbles that change color, others have bubbles that are frozen or locked, and others have bubbles that are surrounded by rocks or spikes. You will need to use special bubbles and boosters to clear these obstacles and complete the levels. Some of the special bubbles are:

      -
        -
      • The rainbow bubble, which can match with any color.
      • -
      • The bomb bubble, which can explode and pop all the bubbles around it.
      • -
      • The fire bubble, which can burn and pop all the bubbles in a row.
      • -
      • The lightning bubble, which can zap and pop all the bubbles in a column.
      • -
      -

      Some of the boosters are:

      -
        -
      • The aim booster, which can help you aim more precisely.
      • -
      • The color booster, which can change the color of the next bubble.
      • -
      • The swap booster, which can swap the current bubble with the next one.
      • -
      • The extra booster, which can give you extra moves or time.
      • -
      -

      You can get these special bubbles and boosters by popping them on the board, by earning them as rewards, or by buying them with coins.

      -

      Why play Spirit Bubble Shooter?

      -

      Spirit Bubble Shooter is a game that has many benefits and advantages for its players. Here are some of the reasons why you should play this game:

      -

      It's fun, relaxing and addictive

      -

      Spirit Bubble Shooter is a game that will make you have fun and relax while playing it. The game has a simple and enjoyable gameplay that will keep you hooked for hours. The game also has a soothing and calming sound effects and music that will help you relax and unwind. The game is addictive, as you will want to play more and more levels and see what surprises await you in each world.

      -

      It's colorful, vibrant and mystical

      -

      Spirit Bubble Shooter is a game that will delight your eyes and your imagination with its colorful, vibrant and mystical graphics. The game has a beautiful and detailed design that will make you feel like you are in a fantasy world. The game also has a variety of themes and scenarios that will make you travel through different environments and atmospheres. The game is mystical, as you will encounter different creatures and elements that will add magic and mystery to your adventure.

      -

      It's rewarding, challenging and satisfying

      -

      Spirit Bubble Shooter is a game that will reward you for your efforts and skills while playing it. The game has a star system that will measure your performance and give you feedback on how well you did. The game also has coins that you can earn and use to buy more boosters or lives. The game is challenging, as it will test your strategy and logic skills with its obstacles and puzzles. The game is satisfying, as it will make you feel proud and happy when you complete a level or clear a board.

      -

      Conclusion

      -

      Spirit Bubble Shooter is a free match 3 bubble game that will provide you with hours of entertainment and relaxation. The game has 50 levels and 5 mystical worlds to explore, each with its own theme and challenges. The game is easy to play, but hard to master, as you will need to use your strategy and logic skills to complete the missions and clear the board. The game is fun, relaxing and addictive, as it will make you pop bubbles and solve puzzles while enjoying the soothing sound effects and music. The game is colorful, vibrant and mystical, as it will make you travel through different environments and atmospheres while encountering different creatures and elements. The game is rewarding, challenging and satisfying, as it will give you feedback, coins, and boosters for your performance and skills. If you are looking for a new game to play on your mobile device, you should download Spirit Bubble Shooter today and have a blast!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Spirit Bubble Shooter:

      -
        -
      1. How can I get more lives in Spirit Bubble Shooter?
      2. -

        You can get more lives in Spirit Bubble Shooter by waiting for them to refill over time, by buying them with coins, or by asking your friends on Facebook to send you some.

        -
      3. How can I get more coins in Spirit Bubble Shooter?
      4. -

        You can get more coins in Spirit Bubble Shooter by completing levels, by watching ads, or by buying them with real money.

        -
      5. How can I get more boosters in Spirit Bubble Shooter?
      6. -

        You can get more boosters in Spirit Bubble Shooter by popping them on the board, by earning them as rewards, or by buying them with coins.

        -
      7. How can I unlock more worlds in Spirit Bubble Shooter?
      8. -

        You can unlock more worlds in Spirit Bubble Shooter by completing levels and earning stars. You need a certain number of stars to unlock each world.

        -
      9. How can I contact the developers of Spirit Bubble Shooter?
      10. -

        You can contact the developers of Spirit Bubble Shooter by sending them an email at support@famobi.com or by visiting their website at https://famobi.com/.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/coraKong/WorldSimulation/README.md b/spaces/coraKong/WorldSimulation/README.md deleted file mode 100644 index c883e1de9e021eff3194ad220342c82f1badf97c..0000000000000000000000000000000000000000 --- a/spaces/coraKong/WorldSimulation/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: WorldSimulation -emoji: 🚀 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp deleted file mode 100644 index c843487b5fa4e8077dd27402ec99009266ddda8d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "box_iou_rotated.h" -#include "box_iou_rotated_utils.h" - -namespace detectron2 { - -template -void box_iou_rotated_cpu_kernel( - const at::Tensor& boxes1, - const at::Tensor& boxes2, - at::Tensor& ious) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - - for (int i = 0; i < num_boxes1; i++) { - for (int j = 0; j < num_boxes2; j++) { - ious[i * num_boxes2 + j] = single_box_iou_rotated( - boxes1[i].data_ptr(), boxes2[j].data_ptr()); - } - } -} - -at::Tensor box_iou_rotated_cpu( - // input must be contiguous: - const at::Tensor& boxes1, - const at::Tensor& boxes2) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - at::Tensor ious = - at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); - - box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); - - // reshape from 1d array to 2d array - auto shape = std::vector{num_boxes1, num_boxes2}; - return ious.reshape(shape); -} - -} // namespace detectron2 diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py deleted file mode 100644 index f451e08ad2eb0732dcb806b1851eb978d4acf136..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSPHead', - in_channels=2048, - in_index=3, - channels=512, - pool_scales=(1, 2, 3, 6), - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/ext_transform.py b/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/ext_transform.py deleted file mode 100644 index 7e1104bd7b1a24303370c066d1487f83a9bfece0..0000000000000000000000000000000000000000 --- a/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/ext_transform.py +++ /dev/null @@ -1,78 +0,0 @@ -import random - -import numpy as np -from skimage.filters import gaussian -import torch -from PIL import Image, ImageFilter - - -class RandomVerticalFlip(object): - def __call__(self, img): - if random.random() < 0.5: - return img.transpose(Image.FLIP_TOP_BOTTOM) - return img - - -class DeNormalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, tensor): - for t, m, s in zip(tensor, self.mean, self.std): - t.mul_(s).add_(m) - return tensor - - -class MaskToTensor(object): - def __call__(self, img): - return torch.from_numpy(np.array(img, dtype=np.int32)).long() - - -class FreeScale(object): - def __init__(self, size, interpolation=Image.BILINEAR): - self.size = tuple(reversed(size)) # size: (h, w) - self.interpolation = interpolation - - def __call__(self, img): - return img.resize(self.size, self.interpolation) - - -class FlipChannels(object): - def __call__(self, img): - img = np.array(img)[:, :, ::-1] - return Image.fromarray(img.astype(np.uint8)) - - -class RandomGaussianBlur(object): - def __call__(self, img): - sigma = 0.15 + random.random() * 1.15 - blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True) - blurred_img *= 255 - return Image.fromarray(blurred_img.astype(np.uint8)) - -# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py - - -class Lighting(object): - """Lighting noise(AlexNet - style PCA - based noise)""" - - def __init__(self, alphastd, - eigval=(0.2175, 0.0188, 0.0045), - eigvec=((-0.5675, 0.7192, 0.4009), - (-0.5808, -0.0045, -0.8140), - (-0.5836, -0.6948, 0.4203))): - self.alphastd = alphastd - self.eigval = torch.Tensor(eigval) - self.eigvec = torch.Tensor(eigvec) - - def __call__(self, img): - if self.alphastd == 0: - return img - - alpha = img.new().resize_(3).normal_(0, self.alphastd) - rgb = self.eigvec.type_as(img).clone()\ - .mul(alpha.view(1, 3).expand(3, 3))\ - .mul(self.eigval.view(1, 3).expand(3, 3))\ - .sum(1).squeeze() - return img.add(rgb.view(3, 1, 1).expand_as(img)) diff --git a/spaces/crawly/White-box-Cartoonization/README.md b/spaces/crawly/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/crawly/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/crawly/White-box-Cartoonization/wbc/network.py b/spaces/crawly/White-box-Cartoonization/wbc/network.py deleted file mode 100644 index 6f16cee1aa1994d0a78c524f459764de5164e637..0000000000000000000000000000000000000000 --- a/spaces/crawly/White-box-Cartoonization/wbc/network.py +++ /dev/null @@ -1,62 +0,0 @@ -import tensorflow as tf -import numpy as np -import tensorflow.contrib.slim as slim - - - -def resblock(inputs, out_channel=32, name='resblock'): - - with tf.variable_scope(name): - - x = slim.convolution2d(inputs, out_channel, [3, 3], - activation_fn=None, scope='conv1') - x = tf.nn.leaky_relu(x) - x = slim.convolution2d(x, out_channel, [3, 3], - activation_fn=None, scope='conv2') - - return x + inputs - - - - -def unet_generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False): - with tf.variable_scope(name, reuse=reuse): - - x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None) - x0 = tf.nn.leaky_relu(x0) - - x1 = slim.convolution2d(x0, channel, [3, 3], stride=2, activation_fn=None) - x1 = tf.nn.leaky_relu(x1) - x1 = slim.convolution2d(x1, channel*2, [3, 3], activation_fn=None) - x1 = tf.nn.leaky_relu(x1) - - x2 = slim.convolution2d(x1, channel*2, [3, 3], stride=2, activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - x2 = slim.convolution2d(x2, channel*4, [3, 3], activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - - for idx in range(num_blocks): - x2 = resblock(x2, out_channel=channel*4, name='block_{}'.format(idx)) - - x2 = slim.convolution2d(x2, channel*2, [3, 3], activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - - h1, w1 = tf.shape(x2)[1], tf.shape(x2)[2] - x3 = tf.image.resize_bilinear(x2, (h1*2, w1*2)) - x3 = slim.convolution2d(x3+x1, channel*2, [3, 3], activation_fn=None) - x3 = tf.nn.leaky_relu(x3) - x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None) - x3 = tf.nn.leaky_relu(x3) - - h2, w2 = tf.shape(x3)[1], tf.shape(x3)[2] - x4 = tf.image.resize_bilinear(x3, (h2*2, w2*2)) - x4 = slim.convolution2d(x4+x0, channel, [3, 3], activation_fn=None) - x4 = tf.nn.leaky_relu(x4) - x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None) - - return x4 - -if __name__ == '__main__': - - - pass \ No newline at end of file diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/mocap/__init__.py b/spaces/cymic/Talking_Head_Anime_3/tha3/mocap/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/standard_float.py b/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/standard_float.py deleted file mode 100644 index ec96ace7e1c4aced4b5de3d7878f78542d8add1d..0000000000000000000000000000000000000000 --- a/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/standard_float.py +++ /dev/null @@ -1,362 +0,0 @@ -from enum import Enum -from typing import List, Dict, Optional - -import torch -from torch import Tensor -from torch.nn import Module -from torch.nn.functional import interpolate - -from tha3.nn.eyebrow_decomposer.eyebrow_decomposer_00 import EyebrowDecomposer00, \ - EyebrowDecomposer00Factory, EyebrowDecomposer00Args -from tha3.nn.eyebrow_morphing_combiner.eyebrow_morphing_combiner_00 import \ - EyebrowMorphingCombiner00Factory, EyebrowMorphingCombiner00Args, EyebrowMorphingCombiner00 -from tha3.nn.face_morpher.face_morpher_08 import FaceMorpher08Args, FaceMorpher08Factory -from tha3.poser.general_poser_02 import GeneralPoser02 -from tha3.poser.poser import PoseParameterCategory, PoseParameters -from tha3.nn.editor.editor_07 import Editor07, Editor07Args -from tha3.nn.two_algo_body_rotator.two_algo_face_body_rotator_05 import TwoAlgoFaceBodyRotator05, \ - TwoAlgoFaceBodyRotator05Args -from tha3.util import torch_load -from tha3.compute.cached_computation_func import TensorListCachedComputationFunc -from tha3.compute.cached_computation_protocol import CachedComputationProtocol -from tha3.nn.nonlinearity_factory import ReLUFactory, LeakyReLUFactory -from tha3.nn.normalization import InstanceNorm2dFactory -from tha3.nn.util import BlockArgs - - -class Network(Enum): - eyebrow_decomposer = 1 - eyebrow_morphing_combiner = 2 - face_morpher = 3 - two_algo_face_body_rotator = 4 - editor = 5 - - @property - def outputs_key(self): - return f"{self.name}_outputs" - - -class Branch(Enum): - face_morphed_half = 1 - face_morphed_full = 2 - all_outputs = 3 - - -NUM_EYEBROW_PARAMS = 12 -NUM_FACE_PARAMS = 27 -NUM_ROTATION_PARAMS = 6 - - -class FiveStepPoserComputationProtocol(CachedComputationProtocol): - def __init__(self, eyebrow_morphed_image_index: int): - super().__init__() - self.eyebrow_morphed_image_index = eyebrow_morphed_image_index - self.cached_batch_0 = None - self.cached_eyebrow_decomposer_output = None - - def compute_func(self) -> TensorListCachedComputationFunc: - def func(modules: Dict[str, Module], - batch: List[Tensor], - outputs: Dict[str, List[Tensor]]): - if self.cached_batch_0 is None: - new_batch_0 = True - elif batch[0].shape[0] != self.cached_batch_0.shape[0]: - new_batch_0 = True - else: - new_batch_0 = torch.max((batch[0] - self.cached_batch_0).abs()).item() > 0 - if not new_batch_0: - outputs[Network.eyebrow_decomposer.outputs_key] = self.cached_eyebrow_decomposer_output - output = self.get_output(Branch.all_outputs.name, modules, batch, outputs) - if new_batch_0: - self.cached_batch_0 = batch[0] - self.cached_eyebrow_decomposer_output = outputs[Network.eyebrow_decomposer.outputs_key] - return output - - return func - - def compute_output(self, key: str, modules: Dict[str, Module], batch: List[Tensor], - outputs: Dict[str, List[Tensor]]) -> List[Tensor]: - if key == Network.eyebrow_decomposer.outputs_key: - input_image = batch[0][:, :, 64:192, 64 + 128:192 + 128] - return modules[Network.eyebrow_decomposer.name].forward(input_image) - elif key == Network.eyebrow_morphing_combiner.outputs_key: - eyebrow_decomposer_output = self.get_output(Network.eyebrow_decomposer.outputs_key, modules, batch, outputs) - background_layer = eyebrow_decomposer_output[EyebrowDecomposer00.BACKGROUND_LAYER_INDEX] - eyebrow_layer = eyebrow_decomposer_output[EyebrowDecomposer00.EYEBROW_LAYER_INDEX] - eyebrow_pose = batch[1][:, :NUM_EYEBROW_PARAMS] - return modules[Network.eyebrow_morphing_combiner.name].forward( - background_layer, - eyebrow_layer, - eyebrow_pose) - elif key == Network.face_morpher.outputs_key: - eyebrow_morphing_combiner_output = self.get_output( - Network.eyebrow_morphing_combiner.outputs_key, modules, batch, outputs) - eyebrow_morphed_image = eyebrow_morphing_combiner_output[self.eyebrow_morphed_image_index] - input_image = batch[0][:, :, 32:32 + 192, (32 + 128):(32 + 192 + 128)].clone() - input_image[:, :, 32:32 + 128, 32:32 + 128] = eyebrow_morphed_image - face_pose = batch[1][:, NUM_EYEBROW_PARAMS:NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS] - return modules[Network.face_morpher.name].forward(input_image, face_pose) - elif key == Branch.face_morphed_full.name: - face_morpher_output = self.get_output(Network.face_morpher.outputs_key, modules, batch, outputs) - face_morphed_image = face_morpher_output[0] - input_image = batch[0].clone() - input_image[:, :, 32:32 + 192, 32 + 128:32 + 192 + 128] = face_morphed_image - return [input_image] - elif key == Branch.face_morphed_half.name: - face_morphed_full = self.get_output(Branch.face_morphed_full.name, modules, batch, outputs)[0] - return [ - interpolate(face_morphed_full, size=(256, 256), mode='bilinear', align_corners=False) - ] - elif key == Network.two_algo_face_body_rotator.outputs_key: - face_morphed_half = self.get_output(Branch.face_morphed_half.name, modules, batch, outputs)[0] - rotation_pose = batch[1][:, NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS:] - return modules[Network.two_algo_face_body_rotator.name].forward(face_morphed_half, rotation_pose) - elif key == Network.editor.outputs_key: - input_original_image = self.get_output(Branch.face_morphed_full.name, modules, batch, outputs)[0] - rotator_outputs = self.get_output( - Network.two_algo_face_body_rotator.outputs_key, modules, batch, outputs) - half_warped_image = rotator_outputs[TwoAlgoFaceBodyRotator05.WARPED_IMAGE_INDEX] - full_warped_image = interpolate( - half_warped_image, size=(512, 512), mode='bilinear', align_corners=False) - half_grid_change = rotator_outputs[TwoAlgoFaceBodyRotator05.GRID_CHANGE_INDEX] - full_grid_change = interpolate( - half_grid_change, size=(512, 512), mode='bilinear', align_corners=False) - rotation_pose = batch[1][:, NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS:] - return modules[Network.editor.name].forward( - input_original_image, full_warped_image, full_grid_change, rotation_pose) - elif key == Branch.all_outputs.name: - editor_output = self.get_output(Network.editor.outputs_key, modules, batch, outputs) - rotater_output = self.get_output(Network.two_algo_face_body_rotator.outputs_key, modules, batch, outputs) - face_morpher_output = self.get_output(Network.face_morpher.outputs_key, modules, batch, outputs) - eyebrow_morphing_combiner_output = self.get_output( - Network.eyebrow_morphing_combiner.outputs_key, modules, batch, outputs) - eyebrow_decomposer_output = self.get_output( - Network.eyebrow_decomposer.outputs_key, modules, batch, outputs) - output = editor_output \ - + rotater_output \ - + face_morpher_output \ - + eyebrow_morphing_combiner_output \ - + eyebrow_decomposer_output - return output - else: - raise RuntimeError("Unsupported key: " + key) - - -def load_eyebrow_decomposer(file_name: str): - factory = EyebrowDecomposer00Factory( - EyebrowDecomposer00Args( - image_size=128, - image_channels=4, - start_channels=64, - bottleneck_image_size=16, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=True)))) - print("Loading the eyebrow decomposer ... ", end="") - module = factory.create() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_eyebrow_morphing_combiner(file_name: str): - factory = EyebrowMorphingCombiner00Factory( - EyebrowMorphingCombiner00Args( - image_size=128, - image_channels=4, - start_channels=64, - num_pose_params=12, - bottleneck_image_size=16, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=True)))) - print("Loading the eyebrow morphing conbiner ... ", end="") - module = factory.create() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_face_morpher(file_name: str): - factory = FaceMorpher08Factory( - FaceMorpher08Args( - image_size=192, - image_channels=4, - num_expression_params=27, - start_channels=64, - bottleneck_image_size=24, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=False)))) - print("Loading the face morpher ... ", end="") - module = factory.create() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_two_algo_generator(file_name) -> Module: - module = TwoAlgoFaceBodyRotator05( - TwoAlgoFaceBodyRotator05Args( - image_size=256, - image_channels=4, - start_channels=64, - num_pose_params=6, - bottleneck_image_size=32, - num_bottleneck_blocks=6, - max_channels=512, - upsample_mode='nearest', - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=LeakyReLUFactory(inplace=False, negative_slope=0.1)))) - print("Loading the face-body rotator ... ", end="") - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_editor(file_name) -> Module: - module = Editor07( - Editor07Args( - image_size=512, - image_channels=4, - num_pose_params=6, - start_channels=32, - bottleneck_image_size=64, - num_bottleneck_blocks=6, - max_channels=512, - upsampling_mode='nearest', - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=LeakyReLUFactory(inplace=False, negative_slope=0.1)))) - print("Loading the combiner ... ", end="") - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def get_pose_parameters(): - return PoseParameters.Builder() \ - .add_parameter_group("eyebrow_troubled", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_angry", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_lowered", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_raised", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_happy", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_serious", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eye_wink", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_happy_wink", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_surprised", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_relaxed", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_unimpressed", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_raised_lower_eyelid", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("iris_small", PoseParameterCategory.IRIS_MORPH, arity=2) \ - .add_parameter_group("mouth_aaa", PoseParameterCategory.MOUTH, arity=1, default_value=1.0) \ - .add_parameter_group("mouth_iii", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_uuu", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_eee", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_ooo", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_delta", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_lowered_corner", PoseParameterCategory.MOUTH, arity=2) \ - .add_parameter_group("mouth_raised_corner", PoseParameterCategory.MOUTH, arity=2) \ - .add_parameter_group("mouth_smirk", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("iris_rotation_x", PoseParameterCategory.IRIS_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("iris_rotation_y", PoseParameterCategory.IRIS_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("head_x", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("head_y", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("neck_z", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("body_y", PoseParameterCategory.BODY_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("body_z", PoseParameterCategory.BODY_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("breathing", PoseParameterCategory.BREATHING, arity=1, range=(0.0, 1.0)) \ - .build() - - -def create_poser( - device: torch.device, - module_file_names: Optional[Dict[str, str]] = None, - eyebrow_morphed_image_index: int = EyebrowMorphingCombiner00.EYEBROW_IMAGE_NO_COMBINE_ALPHA_INDEX, - default_output_index: int = 0) -> GeneralPoser02: - if module_file_names is None: - module_file_names = {} - if Network.eyebrow_decomposer.name not in module_file_names: - dir = "data/models/standard_float" - file_name = dir + "/eyebrow_decomposer.pt" - module_file_names[Network.eyebrow_decomposer.name] = file_name - if Network.eyebrow_morphing_combiner.name not in module_file_names: - dir = "data/models/standard_float" - file_name = dir + "/eyebrow_morphing_combiner.pt" - module_file_names[Network.eyebrow_morphing_combiner.name] = file_name - if Network.face_morpher.name not in module_file_names: - dir = "data/models/standard_float" - file_name = dir + "/face_morpher.pt" - module_file_names[Network.face_morpher.name] = file_name - if Network.two_algo_face_body_rotator.name not in module_file_names: - dir = "data/models/standard_float" - file_name = dir + "/two_algo_face_body_rotator.pt" - module_file_names[Network.two_algo_face_body_rotator.name] = file_name - if Network.editor.name not in module_file_names: - dir = "data/models/standard_float" - file_name = dir + "/editor.pt" - module_file_names[Network.editor.name] = file_name - - loaders = { - Network.eyebrow_decomposer.name: - lambda: load_eyebrow_decomposer(module_file_names[Network.eyebrow_decomposer.name]), - Network.eyebrow_morphing_combiner.name: - lambda: load_eyebrow_morphing_combiner(module_file_names[Network.eyebrow_morphing_combiner.name]), - Network.face_morpher.name: - lambda: load_face_morpher(module_file_names[Network.face_morpher.name]), - Network.two_algo_face_body_rotator.name: - lambda: load_two_algo_generator(module_file_names[Network.two_algo_face_body_rotator.name]), - Network.editor.name: - lambda: load_editor(module_file_names[Network.editor.name]), - } - return GeneralPoser02( - image_size=512, - module_loaders=loaders, - pose_parameters=get_pose_parameters().get_pose_parameter_groups(), - output_list_func=FiveStepPoserComputationProtocol(eyebrow_morphed_image_index).compute_func(), - subrect=None, - device=device, - output_length=29, - default_output_index=default_output_index) - - -if __name__ == "__main__": - device = torch.device('cuda') - poser = create_poser(device) - - image = torch.zeros(1, 4, 512, 512, device=device) - pose = torch.zeros(1, 45, device=device) - - repeat = 100 - acc = 0.0 - for i in range(repeat + 2): - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - - start.record() - poser.pose(image, pose) - end.record() - torch.cuda.synchronize() - if i >= 2: - elapsed_time = start.elapsed_time(end) - print("%d:" % i, elapsed_time) - acc = acc + elapsed_time - - print("average:", acc / repeat) diff --git a/spaces/dandan4272/hand_gesture_rec/app.py b/spaces/dandan4272/hand_gesture_rec/app.py deleted file mode 100644 index ef446dee0c5df5c77bc2c5c5a234122bc3381472..0000000000000000000000000000000000000000 --- a/spaces/dandan4272/hand_gesture_rec/app.py +++ /dev/null @@ -1,110 +0,0 @@ - -import argparse - -import gradio as gr -import numpy as np -from predict_st_gcn import det_image - - - - -def get_args(): - parser = argparse.ArgumentParser() - - parser.add_argument("--device", type=int, default=0) - parser.add_argument("--width", help='cap width', type=int, default=640) - parser.add_argument("--height", help='cap height', type=int, default=480) - - parser.add_argument('--use_static_image_mode', action='store_true') - parser.add_argument("--min_detection_confidence", - help='min_detection_confidence', - type=float, - default=0.7) - parser.add_argument("--min_tracking_confidence", - help='min_tracking_confidence', - type=int, - default=0.5) - parser.add_argument("--action_frames", - help='action_frames', - type=int, - default=15) - args = parser.parse_args() - - return (args) - -if __name__ == "__main__": - - model_cls_name = ['shake_hand', 'palm', 'fist', 'clock_wise', 'anti_clockwise', 'ok', 'thumb', 'v', 'heart', 'no_gesture'] - # ------------------- 视频模式输入组件 ------------------- - inputs_video = gr.Video(source='webcam', mirror_webcam=True, label="原始视频") # webcam - - - # ------------------- 视频模式输出组件 ------------------- - outputs_video = gr.Video(label="原始视频") - # outputs_video = gr.Video(format='mp4', label="检测视频") - - # ------------------- 图片模式输出参数 ------------------- - - # ------------------- 视频模式输出参数 ------------------- - # outputs_video_list = [outputs_video] - - title = '手势识别演示' - - - load_iters = 3000 # 加载之前训练的模型(指定迭代次数) - # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - threshold = 0.996 - # graph_args = {'strategy': 'spatial'} - # - # model = TwoStreamSpatialTemporalGraph(graph_args, 10) - # model.eval() - # pre = torch.load('../weights/data_aug2/_hand_stgcn_fps15_21node_sgd_40.pth') - # model.load_state_dict(pre, False) - # model = torch.nn.DataParallel(model).cuda() - numclass = 10 - - # # ------------------- 视频模式输入参数 ------------------- - # inputs_video_list = [ - # inputs_video # 输入图片 - # ] - - gyd_video = gr.Interface( - fn=det_image, - inputs=inputs_video, - outputs=outputs_video, - title=title, - - # cache_examples=False, - # theme="seafoam", - # live=True, # 实时变更输出 - flagging_dir="run", # 输出目录 - allow_flagging="manual", - flagging_options=["good", "generally", "bad"], - ) - gr.close_all() - # gyd_video.launch(server_name='127.0.0.1',server_port=7788) - - gyd_video.launch(share=False) - - - -# import gradio -# -# topk = 3 -# with towhee.api() as api: -# action_classification_function = ( -# api.video_decode.ffmpeg( -# sample_type='uniform_temporal_subsample', args={'num_samples': 32}) -# .action_classification.pytorchvideo(model_name='mvit_base_32x3', skip_preprocess=True, topk=topk) -# .runas_op(func=lambda res: {res[0][i]: res[1][i] for i in range(len(res[0]))}) -# .as_function() -# ) -# -# interface = gradio.Interface(action_classification_function, -# inputs=gradio.Video(source='upload'), -# outputs=[gradio.Label(num_top_classes=topk)] -# ) -# interface.launch(inline=True, share=True) - - diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py deleted file mode 100644 index ae6ebfbd5333a10f665d0b879d976294b2b9993e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py +++ /dev/null @@ -1,293 +0,0 @@ -from typing import Callable -from fontTools.pens.basePen import BasePen - - -def pointToString(pt, ntos=str): - return " ".join(ntos(i) for i in pt) - - -class SVGPathPen(BasePen): - """Pen to draw SVG path d commands. - - Example:: - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((1, 1)) - >>> pen.curveTo((2, 2), (3, 3), (4, 4)) - >>> pen.closePath() - >>> pen.getCommands() - 'M0 0 1 1C2 2 3 3 4 4Z' - - Args: - glyphSet: a dictionary of drawable glyph objects keyed by name - used to resolve component references in composite glyphs. - ntos: a callable that takes a number and returns a string, to - customize how numbers are formatted (default: str). - - Note: - Fonts have a coordinate system where Y grows up, whereas in SVG, - Y grows down. As such, rendering path data from this pen in - SVG typically results in upside-down glyphs. You can fix this - by wrapping the data from this pen in an SVG group element with - transform, or wrap this pen in a transform pen. For example: - - spen = svgPathPen.SVGPathPen(glyphset) - pen= TransformPen(spen , (1, 0, 0, -1, 0, 0)) - glyphset[glyphname].draw(pen) - print(tpen.getCommands()) - """ - - def __init__(self, glyphSet, ntos: Callable[[float], str] = str): - BasePen.__init__(self, glyphSet) - self._commands = [] - self._lastCommand = None - self._lastX = None - self._lastY = None - self._ntos = ntos - - def _handleAnchor(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.moveTo((10, 10)) - >>> pen._commands - ['M10 10'] - """ - if self._lastCommand == "M": - self._commands.pop(-1) - - def _moveTo(self, pt): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen._commands - ['M0 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 0)) - >>> pen._commands - ['M10 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 10)) - >>> pen._commands - ['M0 10'] - """ - self._handleAnchor() - t = "M%s" % (pointToString(pt, self._ntos)) - self._commands.append(t) - self._lastCommand = "M" - self._lastX, self._lastY = pt - - def _lineTo(self, pt): - """ - # duplicate point - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M10 10'] - - # vertical line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 0)) - >>> pen._commands - ['M10 10', 'V0'] - - # horizontal line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((0, 10)) - >>> pen._commands - ['M10 10', 'H0'] - - # basic - >>> pen = SVGPathPen(None) - >>> pen.lineTo((70, 80)) - >>> pen._commands - ['L70 80'] - - # basic following a moveto - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M0 0', ' 10 10'] - """ - x, y = pt - # duplicate point - if x == self._lastX and y == self._lastY: - return - # vertical line - elif x == self._lastX: - cmd = "V" - pts = self._ntos(y) - # horizontal line - elif y == self._lastY: - cmd = "H" - pts = self._ntos(x) - # previous was a moveto - elif self._lastCommand == "M": - cmd = None - pts = " " + pointToString(pt, self._ntos) - # basic - else: - cmd = "L" - pts = pointToString(pt, self._ntos) - # write the string - t = "" - if cmd: - t += cmd - self._lastCommand = cmd - t += pts - self._commands.append(t) - # store for future reference - self._lastX, self._lastY = pt - - def _curveToOne(self, pt1, pt2, pt3): - """ - >>> pen = SVGPathPen(None) - >>> pen.curveTo((10, 20), (30, 40), (50, 60)) - >>> pen._commands - ['C10 20 30 40 50 60'] - """ - t = "C" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) + " " - t += pointToString(pt3, self._ntos) - self._commands.append(t) - self._lastCommand = "C" - self._lastX, self._lastY = pt3 - - def _qCurveToOne(self, pt1, pt2): - """ - >>> pen = SVGPathPen(None) - >>> pen.qCurveTo((10, 20), (30, 40)) - >>> pen._commands - ['Q10 20 30 40'] - >>> from fontTools.misc.roundTools import otRound - >>> pen = SVGPathPen(None, ntos=lambda v: str(otRound(v))) - >>> pen.qCurveTo((3, 3), (7, 5), (11, 4)) - >>> pen._commands - ['Q3 3 5 4', 'Q7 5 11 4'] - """ - assert pt2 is not None - t = "Q" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) - self._commands.append(t) - self._lastCommand = "Q" - self._lastX, self._lastY = pt2 - - def _closePath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.closePath() - >>> pen._commands - ['Z'] - """ - self._commands.append("Z") - self._lastCommand = "Z" - self._lastX = self._lastY = None - - def _endPath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.endPath() - >>> pen._commands - [] - """ - self._lastCommand = None - self._lastX = self._lastY = None - - def getCommands(self): - return "".join(self._commands) - - -def main(args=None): - """Generate per-character SVG from font and text""" - - if args is None: - import sys - - args = sys.argv[1:] - - from fontTools.ttLib import TTFont - import argparse - - parser = argparse.ArgumentParser( - "fonttools pens.svgPathPen", description="Generate SVG from text" - ) - parser.add_argument("font", metavar="font.ttf", help="Font file.") - parser.add_argument("text", metavar="text", help="Text string.") - parser.add_argument( - "-y", - metavar="", - help="Face index into a collection to open. Zero based.", - ) - parser.add_argument( - "--variations", - metavar="AXIS=LOC", - default="", - help="List of space separated locations. A location consist in " - "the name of a variation axis, followed by '=' and a number. E.g.: " - "wght=700 wdth=80. The default is the location of the base master.", - ) - - options = parser.parse_args(args) - - fontNumber = int(options.y) if options.y is not None else 0 - - font = TTFont(options.font, fontNumber=fontNumber) - text = options.text - - location = {} - for tag_v in options.variations.split(): - fields = tag_v.split("=") - tag = fields[0].strip() - v = int(fields[1]) - location[tag] = v - - hhea = font["hhea"] - ascent, descent = hhea.ascent, hhea.descent - - glyphset = font.getGlyphSet(location=location) - cmap = font["cmap"].getBestCmap() - - s = "" - width = 0 - for u in text: - g = cmap[ord(u)] - glyph = glyphset[g] - - pen = SVGPathPen(glyphset) - glyph.draw(pen) - commands = pen.getCommands() - - s += '\n' % ( - width, - ascent, - commands, - ) - - width += glyph.width - - print('') - print( - '' - % (width, ascent - descent) - ) - print(s, end="") - print("") - - -if __name__ == "__main__": - import sys - - if len(sys.argv) == 1: - import doctest - - sys.exit(doctest.testmod().failed) - - sys.exit(main()) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-0461fcb6.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-0461fcb6.js deleted file mode 100644 index dab5baf5c2c33d3e99ed9e3b2e61c968e77bd046..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-0461fcb6.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as g,e as w,s as _,f as p,g as o,K as i,h as k,j as m,n as u,k as b,m as L,o as j,F as $,Y as v,G as C,w as d,r as z,u as h,v as B,H as M,C as E}from"./index-9e76ffee.js";import"./Button-30a08c0b.js";import{I}from"./IconButton-307018b3.js";import"./ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js";function S(a){let e,s,t,r;return{c(){e=p("svg"),s=p("g"),t=p("path"),r=p("path"),o(t,"d","M18,6L6.087,17.913"),i(t,"fill","none"),i(t,"fill-rule","nonzero"),i(t,"stroke-width","2px"),o(s,"transform","matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"),o(r,"d","M4.364,4.364L19.636,19.636"),i(r,"fill","none"),i(r,"fill-rule","nonzero"),i(r,"stroke-width","2px"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"version","1.1"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),o(e,"xml:space","preserve"),o(e,"stroke","currentColor"),i(e,"fill-rule","evenodd"),i(e,"clip-rule","evenodd"),i(e,"stroke-linecap","round"),i(e,"stroke-linejoin","round")},m(n,l){k(n,e,l),m(e,s),m(s,t),m(e,r)},p:u,i:u,o:u,d(n){n&&b(e)}}}class q extends g{constructor(e){super(),w(this,e,null,S,_,{})}}function D(a){let e,s;return{c(){e=p("svg"),s=p("path"),o(s,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"fill","none"),o(e,"stroke","currentColor"),o(e,"stroke-width","1.5"),o(e,"stroke-linecap","round"),o(e,"stroke-linejoin","round"),o(e,"class","feather feather-edit-2")},m(t,r){k(t,e,r),m(e,s)},p:u,i:u,o:u,d(t){t&&b(e)}}}class F extends g{constructor(e){super(),w(this,e,null,D,_,{})}}function x(a){let e,s;return e=new I({props:{Icon:F,label:"Edit"}}),e.$on("click",a[3]),{c(){$(e.$$.fragment)},m(t,r){C(e,t,r),s=!0},p:u,i(t){s||(d(e.$$.fragment,t),s=!0)},o(t){h(e.$$.fragment,t),s=!1},d(t){M(e,t)}}}function G(a){let e,s,t,r,n=a[0]&&x(a);return t=new I({props:{Icon:q,label:"Clear"}}),t.$on("click",a[4]),{c(){e=L("div"),n&&n.c(),s=j(),$(t.$$.fragment),o(e,"class","svelte-19sk1im"),v(e,"not-absolute",!a[1]),i(e,"position",a[1]?"absolute":"static")},m(l,c){k(l,e,c),n&&n.m(e,null),m(e,s),C(t,e,null),r=!0},p(l,[c]){l[0]?n?(n.p(l,c),c&1&&d(n,1)):(n=x(l),n.c(),d(n,1),n.m(e,s)):n&&(z(),h(n,1,1,()=>{n=null}),B()),(!r||c&2)&&v(e,"not-absolute",!l[1]),c&2&&i(e,"position",l[1]?"absolute":"static")},i(l){r||(d(n),d(t.$$.fragment,l),r=!0)},o(l){h(n),h(t.$$.fragment,l),r=!1},d(l){l&&b(e),n&&n.d(),M(t)}}}function H(a,e,s){let{editable:t=!1}=e,{absolute:r=!0}=e;const n=E(),l=()=>n("edit"),c=f=>{n("clear"),f.stopPropagation()};return a.$$set=f=>{"editable"in f&&s(0,t=f.editable),"absolute"in f&&s(1,r=f.absolute)},[t,r,n,l,c]}class y extends g{constructor(e){super(),w(this,e,H,G,_,{editable:0,absolute:1})}}export{q as C,y as M}; -//# sourceMappingURL=ModifyUpload-0461fcb6.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/parser.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/parser.py deleted file mode 100644 index cefce2dfa1d2a4171838b0d0135af8ea3ff7d62c..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/parser.py +++ /dev/null @@ -1,1032 +0,0 @@ -"""Parse tokens from the lexer into nodes for the compiler.""" -import typing -import typing as t - -from . import nodes -from .exceptions import TemplateAssertionError -from .exceptions import TemplateSyntaxError -from .lexer import describe_token -from .lexer import describe_token_expr - -if t.TYPE_CHECKING: - import typing_extensions as te - from .environment import Environment - -_ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include) -_MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock) - -_statement_keywords = frozenset( - [ - "for", - "if", - "block", - "extends", - "print", - "macro", - "include", - "from", - "import", - "set", - "with", - "autoescape", - ] -) -_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) - -_math_nodes: t.Dict[str, t.Type[nodes.Expr]] = { - "add": nodes.Add, - "sub": nodes.Sub, - "mul": nodes.Mul, - "div": nodes.Div, - "floordiv": nodes.FloorDiv, - "mod": nodes.Mod, -} - - -class Parser: - """This is the central parsing class Jinja uses. It's passed to - extensions and can be used to parse expressions or statements. - """ - - def __init__( - self, - environment: "Environment", - source: str, - name: t.Optional[str] = None, - filename: t.Optional[str] = None, - state: t.Optional[str] = None, - ) -> None: - self.environment = environment - self.stream = environment._tokenize(source, name, filename, state) - self.name = name - self.filename = filename - self.closed = False - self.extensions: t.Dict[ - str, t.Callable[["Parser"], t.Union[nodes.Node, t.List[nodes.Node]]] - ] = {} - for extension in environment.iter_extensions(): - for tag in extension.tags: - self.extensions[tag] = extension.parse - self._last_identifier = 0 - self._tag_stack: t.List[str] = [] - self._end_token_stack: t.List[t.Tuple[str, ...]] = [] - - def fail( - self, - msg: str, - lineno: t.Optional[int] = None, - exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError, - ) -> "te.NoReturn": - """Convenience method that raises `exc` with the message, passed - line number or last line number as well as the current name and - filename. - """ - if lineno is None: - lineno = self.stream.current.lineno - raise exc(msg, lineno, self.name, self.filename) - - def _fail_ut_eof( - self, - name: t.Optional[str], - end_token_stack: t.List[t.Tuple[str, ...]], - lineno: t.Optional[int], - ) -> "te.NoReturn": - expected: t.Set[str] = set() - for exprs in end_token_stack: - expected.update(map(describe_token_expr, exprs)) - if end_token_stack: - currently_looking: t.Optional[str] = " or ".join( - map(repr, map(describe_token_expr, end_token_stack[-1])) - ) - else: - currently_looking = None - - if name is None: - message = ["Unexpected end of template."] - else: - message = [f"Encountered unknown tag {name!r}."] - - if currently_looking: - if name is not None and name in expected: - message.append( - "You probably made a nesting mistake. Jinja is expecting this tag," - f" but currently looking for {currently_looking}." - ) - else: - message.append( - f"Jinja was looking for the following tags: {currently_looking}." - ) - - if self._tag_stack: - message.append( - "The innermost block that needs to be closed is" - f" {self._tag_stack[-1]!r}." - ) - - self.fail(" ".join(message), lineno) - - def fail_unknown_tag( - self, name: str, lineno: t.Optional[int] = None - ) -> "te.NoReturn": - """Called if the parser encounters an unknown tag. Tries to fail - with a human readable error message that could help to identify - the problem. - """ - self._fail_ut_eof(name, self._end_token_stack, lineno) - - def fail_eof( - self, - end_tokens: t.Optional[t.Tuple[str, ...]] = None, - lineno: t.Optional[int] = None, - ) -> "te.NoReturn": - """Like fail_unknown_tag but for end of template situations.""" - stack = list(self._end_token_stack) - if end_tokens is not None: - stack.append(end_tokens) - self._fail_ut_eof(None, stack, lineno) - - def is_tuple_end( - self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None - ) -> bool: - """Are we at the end of a tuple?""" - if self.stream.current.type in ("variable_end", "block_end", "rparen"): - return True - elif extra_end_rules is not None: - return self.stream.current.test_any(extra_end_rules) # type: ignore - return False - - def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName: - """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" - self._last_identifier += 1 - rv = object.__new__(nodes.InternalName) - nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno) - return rv - - def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]: - """Parse a single statement.""" - token = self.stream.current - if token.type != "name": - self.fail("tag name expected", token.lineno) - self._tag_stack.append(token.value) - pop_tag = True - try: - if token.value in _statement_keywords: - f = getattr(self, f"parse_{self.stream.current.value}") - return f() # type: ignore - if token.value == "call": - return self.parse_call_block() - if token.value == "filter": - return self.parse_filter_block() - ext = self.extensions.get(token.value) - if ext is not None: - return ext(self) - - # did not work out, remove the token we pushed by accident - # from the stack so that the unknown tag fail function can - # produce a proper error message. - self._tag_stack.pop() - pop_tag = False - self.fail_unknown_tag(token.value, token.lineno) - finally: - if pop_tag: - self._tag_stack.pop() - - def parse_statements( - self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False - ) -> t.List[nodes.Node]: - """Parse multiple statements into a list until one of the end tokens - is reached. This is used to parse the body of statements as it also - parses template data if appropriate. The parser checks first if the - current token is a colon and skips it if there is one. Then it checks - for the block end and parses until if one of the `end_tokens` is - reached. Per default the active token in the stream at the end of - the call is the matched end token. If this is not wanted `drop_needle` - can be set to `True` and the end token is removed. - """ - # the first token may be a colon for python compatibility - self.stream.skip_if("colon") - - # in the future it would be possible to add whole code sections - # by adding some sort of end of statement token and parsing those here. - self.stream.expect("block_end") - result = self.subparse(end_tokens) - - # we reached the end of the template too early, the subparser - # does not check for this, so we do that now - if self.stream.current.type == "eof": - self.fail_eof(end_tokens) - - if drop_needle: - next(self.stream) - return result - - def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]: - """Parse an assign statement.""" - lineno = next(self.stream).lineno - target = self.parse_assign_target(with_namespace=True) - if self.stream.skip_if("assign"): - expr = self.parse_tuple() - return nodes.Assign(target, expr, lineno=lineno) - filter_node = self.parse_filter(None) - body = self.parse_statements(("name:endset",), drop_needle=True) - return nodes.AssignBlock(target, filter_node, body, lineno=lineno) - - def parse_for(self) -> nodes.For: - """Parse a for loop.""" - lineno = self.stream.expect("name:for").lineno - target = self.parse_assign_target(extra_end_rules=("name:in",)) - self.stream.expect("name:in") - iter = self.parse_tuple( - with_condexpr=False, extra_end_rules=("name:recursive",) - ) - test = None - if self.stream.skip_if("name:if"): - test = self.parse_expression() - recursive = self.stream.skip_if("name:recursive") - body = self.parse_statements(("name:endfor", "name:else")) - if next(self.stream).value == "endfor": - else_ = [] - else: - else_ = self.parse_statements(("name:endfor",), drop_needle=True) - return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) - - def parse_if(self) -> nodes.If: - """Parse an if construct.""" - node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) - while True: - node.test = self.parse_tuple(with_condexpr=False) - node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) - node.elif_ = [] - node.else_ = [] - token = next(self.stream) - if token.test("name:elif"): - node = nodes.If(lineno=self.stream.current.lineno) - result.elif_.append(node) - continue - elif token.test("name:else"): - result.else_ = self.parse_statements(("name:endif",), drop_needle=True) - break - return result - - def parse_with(self) -> nodes.With: - node = nodes.With(lineno=next(self.stream).lineno) - targets: t.List[nodes.Expr] = [] - values: t.List[nodes.Expr] = [] - while self.stream.current.type != "block_end": - if targets: - self.stream.expect("comma") - target = self.parse_assign_target() - target.set_ctx("param") - targets.append(target) - self.stream.expect("assign") - values.append(self.parse_expression()) - node.targets = targets - node.values = values - node.body = self.parse_statements(("name:endwith",), drop_needle=True) - return node - - def parse_autoescape(self) -> nodes.Scope: - node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) - node.options = [nodes.Keyword("autoescape", self.parse_expression())] - node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) - return nodes.Scope([node]) - - def parse_block(self) -> nodes.Block: - node = nodes.Block(lineno=next(self.stream).lineno) - node.name = self.stream.expect("name").value - node.scoped = self.stream.skip_if("name:scoped") - node.required = self.stream.skip_if("name:required") - - # common problem people encounter when switching from django - # to jinja. we do not support hyphens in block names, so let's - # raise a nicer error message in that case. - if self.stream.current.type == "sub": - self.fail( - "Block names in Jinja have to be valid Python identifiers and may not" - " contain hyphens, use an underscore instead." - ) - - node.body = self.parse_statements(("name:endblock",), drop_needle=True) - - # enforce that required blocks only contain whitespace or comments - # by asserting that the body, if not empty, is just TemplateData nodes - # with whitespace data - if node.required and not all( - isinstance(child, nodes.TemplateData) and child.data.isspace() - for body in node.body - for child in body.nodes # type: ignore - ): - self.fail("Required blocks can only contain comments or whitespace") - - self.stream.skip_if("name:" + node.name) - return node - - def parse_extends(self) -> nodes.Extends: - node = nodes.Extends(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - return node - - def parse_import_context( - self, node: _ImportInclude, default: bool - ) -> _ImportInclude: - if self.stream.current.test_any( - "name:with", "name:without" - ) and self.stream.look().test("name:context"): - node.with_context = next(self.stream).value == "with" - self.stream.skip() - else: - node.with_context = default - return node - - def parse_include(self) -> nodes.Include: - node = nodes.Include(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - if self.stream.current.test("name:ignore") and self.stream.look().test( - "name:missing" - ): - node.ignore_missing = True - self.stream.skip(2) - else: - node.ignore_missing = False - return self.parse_import_context(node, True) - - def parse_import(self) -> nodes.Import: - node = nodes.Import(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - self.stream.expect("name:as") - node.target = self.parse_assign_target(name_only=True).name - return self.parse_import_context(node, False) - - def parse_from(self) -> nodes.FromImport: - node = nodes.FromImport(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - self.stream.expect("name:import") - node.names = [] - - def parse_context() -> bool: - if self.stream.current.value in { - "with", - "without", - } and self.stream.look().test("name:context"): - node.with_context = next(self.stream).value == "with" - self.stream.skip() - return True - return False - - while True: - if node.names: - self.stream.expect("comma") - if self.stream.current.type == "name": - if parse_context(): - break - target = self.parse_assign_target(name_only=True) - if target.name.startswith("_"): - self.fail( - "names starting with an underline can not be imported", - target.lineno, - exc=TemplateAssertionError, - ) - if self.stream.skip_if("name:as"): - alias = self.parse_assign_target(name_only=True) - node.names.append((target.name, alias.name)) - else: - node.names.append(target.name) - if parse_context() or self.stream.current.type != "comma": - break - else: - self.stream.expect("name") - if not hasattr(node, "with_context"): - node.with_context = False - return node - - def parse_signature(self, node: _MacroCall) -> None: - args = node.args = [] - defaults = node.defaults = [] - self.stream.expect("lparen") - while self.stream.current.type != "rparen": - if args: - self.stream.expect("comma") - arg = self.parse_assign_target(name_only=True) - arg.set_ctx("param") - if self.stream.skip_if("assign"): - defaults.append(self.parse_expression()) - elif defaults: - self.fail("non-default argument follows default argument") - args.append(arg) - self.stream.expect("rparen") - - def parse_call_block(self) -> nodes.CallBlock: - node = nodes.CallBlock(lineno=next(self.stream).lineno) - if self.stream.current.type == "lparen": - self.parse_signature(node) - else: - node.args = [] - node.defaults = [] - - call_node = self.parse_expression() - if not isinstance(call_node, nodes.Call): - self.fail("expected call", node.lineno) - node.call = call_node - node.body = self.parse_statements(("name:endcall",), drop_needle=True) - return node - - def parse_filter_block(self) -> nodes.FilterBlock: - node = nodes.FilterBlock(lineno=next(self.stream).lineno) - node.filter = self.parse_filter(None, start_inline=True) # type: ignore - node.body = self.parse_statements(("name:endfilter",), drop_needle=True) - return node - - def parse_macro(self) -> nodes.Macro: - node = nodes.Macro(lineno=next(self.stream).lineno) - node.name = self.parse_assign_target(name_only=True).name - self.parse_signature(node) - node.body = self.parse_statements(("name:endmacro",), drop_needle=True) - return node - - def parse_print(self) -> nodes.Output: - node = nodes.Output(lineno=next(self.stream).lineno) - node.nodes = [] - while self.stream.current.type != "block_end": - if node.nodes: - self.stream.expect("comma") - node.nodes.append(self.parse_expression()) - return node - - @typing.overload - def parse_assign_target( - self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ... - ) -> nodes.Name: - ... - - @typing.overload - def parse_assign_target( - self, - with_tuple: bool = True, - name_only: bool = False, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - with_namespace: bool = False, - ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: - ... - - def parse_assign_target( - self, - with_tuple: bool = True, - name_only: bool = False, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - with_namespace: bool = False, - ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: - """Parse an assignment target. As Jinja allows assignments to - tuples, this function can parse all allowed assignment targets. Per - default assignments to tuples are parsed, that can be disable however - by setting `with_tuple` to `False`. If only assignments to names are - wanted `name_only` can be set to `True`. The `extra_end_rules` - parameter is forwarded to the tuple parsing function. If - `with_namespace` is enabled, a namespace assignment may be parsed. - """ - target: nodes.Expr - - if with_namespace and self.stream.look().type == "dot": - token = self.stream.expect("name") - next(self.stream) # dot - attr = self.stream.expect("name") - target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) - elif name_only: - token = self.stream.expect("name") - target = nodes.Name(token.value, "store", lineno=token.lineno) - else: - if with_tuple: - target = self.parse_tuple( - simplified=True, extra_end_rules=extra_end_rules - ) - else: - target = self.parse_primary() - - target.set_ctx("store") - - if not target.can_assign(): - self.fail( - f"can't assign to {type(target).__name__.lower()!r}", target.lineno - ) - - return target # type: ignore - - def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr: - """Parse an expression. Per default all expressions are parsed, if - the optional `with_condexpr` parameter is set to `False` conditional - expressions are not parsed. - """ - if with_condexpr: - return self.parse_condexpr() - return self.parse_or() - - def parse_condexpr(self) -> nodes.Expr: - lineno = self.stream.current.lineno - expr1 = self.parse_or() - expr3: t.Optional[nodes.Expr] - - while self.stream.skip_if("name:if"): - expr2 = self.parse_or() - if self.stream.skip_if("name:else"): - expr3 = self.parse_condexpr() - else: - expr3 = None - expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) - lineno = self.stream.current.lineno - return expr1 - - def parse_or(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_and() - while self.stream.skip_if("name:or"): - right = self.parse_and() - left = nodes.Or(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_and(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_not() - while self.stream.skip_if("name:and"): - right = self.parse_not() - left = nodes.And(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_not(self) -> nodes.Expr: - if self.stream.current.test("name:not"): - lineno = next(self.stream).lineno - return nodes.Not(self.parse_not(), lineno=lineno) - return self.parse_compare() - - def parse_compare(self) -> nodes.Expr: - lineno = self.stream.current.lineno - expr = self.parse_math1() - ops = [] - while True: - token_type = self.stream.current.type - if token_type in _compare_operators: - next(self.stream) - ops.append(nodes.Operand(token_type, self.parse_math1())) - elif self.stream.skip_if("name:in"): - ops.append(nodes.Operand("in", self.parse_math1())) - elif self.stream.current.test("name:not") and self.stream.look().test( - "name:in" - ): - self.stream.skip(2) - ops.append(nodes.Operand("notin", self.parse_math1())) - else: - break - lineno = self.stream.current.lineno - if not ops: - return expr - return nodes.Compare(expr, ops, lineno=lineno) - - def parse_math1(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_concat() - while self.stream.current.type in ("add", "sub"): - cls = _math_nodes[self.stream.current.type] - next(self.stream) - right = self.parse_concat() - left = cls(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_concat(self) -> nodes.Expr: - lineno = self.stream.current.lineno - args = [self.parse_math2()] - while self.stream.current.type == "tilde": - next(self.stream) - args.append(self.parse_math2()) - if len(args) == 1: - return args[0] - return nodes.Concat(args, lineno=lineno) - - def parse_math2(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_pow() - while self.stream.current.type in ("mul", "div", "floordiv", "mod"): - cls = _math_nodes[self.stream.current.type] - next(self.stream) - right = self.parse_pow() - left = cls(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_pow(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_unary() - while self.stream.current.type == "pow": - next(self.stream) - right = self.parse_unary() - left = nodes.Pow(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_unary(self, with_filter: bool = True) -> nodes.Expr: - token_type = self.stream.current.type - lineno = self.stream.current.lineno - node: nodes.Expr - - if token_type == "sub": - next(self.stream) - node = nodes.Neg(self.parse_unary(False), lineno=lineno) - elif token_type == "add": - next(self.stream) - node = nodes.Pos(self.parse_unary(False), lineno=lineno) - else: - node = self.parse_primary() - node = self.parse_postfix(node) - if with_filter: - node = self.parse_filter_expr(node) - return node - - def parse_primary(self) -> nodes.Expr: - token = self.stream.current - node: nodes.Expr - if token.type == "name": - if token.value in ("true", "false", "True", "False"): - node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno) - elif token.value in ("none", "None"): - node = nodes.Const(None, lineno=token.lineno) - else: - node = nodes.Name(token.value, "load", lineno=token.lineno) - next(self.stream) - elif token.type == "string": - next(self.stream) - buf = [token.value] - lineno = token.lineno - while self.stream.current.type == "string": - buf.append(self.stream.current.value) - next(self.stream) - node = nodes.Const("".join(buf), lineno=lineno) - elif token.type in ("integer", "float"): - next(self.stream) - node = nodes.Const(token.value, lineno=token.lineno) - elif token.type == "lparen": - next(self.stream) - node = self.parse_tuple(explicit_parentheses=True) - self.stream.expect("rparen") - elif token.type == "lbracket": - node = self.parse_list() - elif token.type == "lbrace": - node = self.parse_dict() - else: - self.fail(f"unexpected {describe_token(token)!r}", token.lineno) - return node - - def parse_tuple( - self, - simplified: bool = False, - with_condexpr: bool = True, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - explicit_parentheses: bool = False, - ) -> t.Union[nodes.Tuple, nodes.Expr]: - """Works like `parse_expression` but if multiple expressions are - delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. - This method could also return a regular expression instead of a tuple - if no commas where found. - - The default parsing mode is a full tuple. If `simplified` is `True` - only names and literals are parsed. The `no_condexpr` parameter is - forwarded to :meth:`parse_expression`. - - Because tuples do not require delimiters and may end in a bogus comma - an extra hint is needed that marks the end of a tuple. For example - for loops support tuples between `for` and `in`. In that case the - `extra_end_rules` is set to ``['name:in']``. - - `explicit_parentheses` is true if the parsing was triggered by an - expression in parentheses. This is used to figure out if an empty - tuple is a valid expression or not. - """ - lineno = self.stream.current.lineno - if simplified: - parse = self.parse_primary - elif with_condexpr: - parse = self.parse_expression - else: - - def parse() -> nodes.Expr: - return self.parse_expression(with_condexpr=False) - - args: t.List[nodes.Expr] = [] - is_tuple = False - - while True: - if args: - self.stream.expect("comma") - if self.is_tuple_end(extra_end_rules): - break - args.append(parse()) - if self.stream.current.type == "comma": - is_tuple = True - else: - break - lineno = self.stream.current.lineno - - if not is_tuple: - if args: - return args[0] - - # if we don't have explicit parentheses, an empty tuple is - # not a valid expression. This would mean nothing (literally - # nothing) in the spot of an expression would be an empty - # tuple. - if not explicit_parentheses: - self.fail( - "Expected an expression," - f" got {describe_token(self.stream.current)!r}" - ) - - return nodes.Tuple(args, "load", lineno=lineno) - - def parse_list(self) -> nodes.List: - token = self.stream.expect("lbracket") - items: t.List[nodes.Expr] = [] - while self.stream.current.type != "rbracket": - if items: - self.stream.expect("comma") - if self.stream.current.type == "rbracket": - break - items.append(self.parse_expression()) - self.stream.expect("rbracket") - return nodes.List(items, lineno=token.lineno) - - def parse_dict(self) -> nodes.Dict: - token = self.stream.expect("lbrace") - items: t.List[nodes.Pair] = [] - while self.stream.current.type != "rbrace": - if items: - self.stream.expect("comma") - if self.stream.current.type == "rbrace": - break - key = self.parse_expression() - self.stream.expect("colon") - value = self.parse_expression() - items.append(nodes.Pair(key, value, lineno=key.lineno)) - self.stream.expect("rbrace") - return nodes.Dict(items, lineno=token.lineno) - - def parse_postfix(self, node: nodes.Expr) -> nodes.Expr: - while True: - token_type = self.stream.current.type - if token_type == "dot" or token_type == "lbracket": - node = self.parse_subscript(node) - # calls are valid both after postfix expressions (getattr - # and getitem) as well as filters and tests - elif token_type == "lparen": - node = self.parse_call(node) - else: - break - return node - - def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr: - while True: - token_type = self.stream.current.type - if token_type == "pipe": - node = self.parse_filter(node) # type: ignore - elif token_type == "name" and self.stream.current.value == "is": - node = self.parse_test(node) - # calls are valid both after postfix expressions (getattr - # and getitem) as well as filters and tests - elif token_type == "lparen": - node = self.parse_call(node) - else: - break - return node - - def parse_subscript( - self, node: nodes.Expr - ) -> t.Union[nodes.Getattr, nodes.Getitem]: - token = next(self.stream) - arg: nodes.Expr - - if token.type == "dot": - attr_token = self.stream.current - next(self.stream) - if attr_token.type == "name": - return nodes.Getattr( - node, attr_token.value, "load", lineno=token.lineno - ) - elif attr_token.type != "integer": - self.fail("expected name or number", attr_token.lineno) - arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) - return nodes.Getitem(node, arg, "load", lineno=token.lineno) - if token.type == "lbracket": - args: t.List[nodes.Expr] = [] - while self.stream.current.type != "rbracket": - if args: - self.stream.expect("comma") - args.append(self.parse_subscribed()) - self.stream.expect("rbracket") - if len(args) == 1: - arg = args[0] - else: - arg = nodes.Tuple(args, "load", lineno=token.lineno) - return nodes.Getitem(node, arg, "load", lineno=token.lineno) - self.fail("expected subscript expression", token.lineno) - - def parse_subscribed(self) -> nodes.Expr: - lineno = self.stream.current.lineno - args: t.List[t.Optional[nodes.Expr]] - - if self.stream.current.type == "colon": - next(self.stream) - args = [None] - else: - node = self.parse_expression() - if self.stream.current.type != "colon": - return node - next(self.stream) - args = [node] - - if self.stream.current.type == "colon": - args.append(None) - elif self.stream.current.type not in ("rbracket", "comma"): - args.append(self.parse_expression()) - else: - args.append(None) - - if self.stream.current.type == "colon": - next(self.stream) - if self.stream.current.type not in ("rbracket", "comma"): - args.append(self.parse_expression()) - else: - args.append(None) - else: - args.append(None) - - return nodes.Slice(lineno=lineno, *args) - - def parse_call_args(self) -> t.Tuple: - token = self.stream.expect("lparen") - args = [] - kwargs = [] - dyn_args = None - dyn_kwargs = None - require_comma = False - - def ensure(expr: bool) -> None: - if not expr: - self.fail("invalid syntax for function call expression", token.lineno) - - while self.stream.current.type != "rparen": - if require_comma: - self.stream.expect("comma") - - # support for trailing comma - if self.stream.current.type == "rparen": - break - - if self.stream.current.type == "mul": - ensure(dyn_args is None and dyn_kwargs is None) - next(self.stream) - dyn_args = self.parse_expression() - elif self.stream.current.type == "pow": - ensure(dyn_kwargs is None) - next(self.stream) - dyn_kwargs = self.parse_expression() - else: - if ( - self.stream.current.type == "name" - and self.stream.look().type == "assign" - ): - # Parsing a kwarg - ensure(dyn_kwargs is None) - key = self.stream.current.value - self.stream.skip(2) - value = self.parse_expression() - kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) - else: - # Parsing an arg - ensure(dyn_args is None and dyn_kwargs is None and not kwargs) - args.append(self.parse_expression()) - - require_comma = True - - self.stream.expect("rparen") - return args, kwargs, dyn_args, dyn_kwargs - - def parse_call(self, node: nodes.Expr) -> nodes.Call: - # The lparen will be expected in parse_call_args, but the lineno - # needs to be recorded before the stream is advanced. - token = self.stream.current - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) - - def parse_filter( - self, node: t.Optional[nodes.Expr], start_inline: bool = False - ) -> t.Optional[nodes.Expr]: - while self.stream.current.type == "pipe" or start_inline: - if not start_inline: - next(self.stream) - token = self.stream.expect("name") - name = token.value - while self.stream.current.type == "dot": - next(self.stream) - name += "." + self.stream.expect("name").value - if self.stream.current.type == "lparen": - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - else: - args = [] - kwargs = [] - dyn_args = dyn_kwargs = None - node = nodes.Filter( - node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno - ) - start_inline = False - return node - - def parse_test(self, node: nodes.Expr) -> nodes.Expr: - token = next(self.stream) - if self.stream.current.test("name:not"): - next(self.stream) - negated = True - else: - negated = False - name = self.stream.expect("name").value - while self.stream.current.type == "dot": - next(self.stream) - name += "." + self.stream.expect("name").value - dyn_args = dyn_kwargs = None - kwargs = [] - if self.stream.current.type == "lparen": - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - elif self.stream.current.type in { - "name", - "string", - "integer", - "float", - "lparen", - "lbracket", - "lbrace", - } and not self.stream.current.test_any("name:else", "name:or", "name:and"): - if self.stream.current.test("name:is"): - self.fail("You cannot chain multiple tests with is") - arg_node = self.parse_primary() - arg_node = self.parse_postfix(arg_node) - args = [arg_node] - else: - args = [] - node = nodes.Test( - node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno - ) - if negated: - node = nodes.Not(node, lineno=token.lineno) - return node - - def subparse( - self, end_tokens: t.Optional[t.Tuple[str, ...]] = None - ) -> t.List[nodes.Node]: - body: t.List[nodes.Node] = [] - data_buffer: t.List[nodes.Node] = [] - add_data = data_buffer.append - - if end_tokens is not None: - self._end_token_stack.append(end_tokens) - - def flush_data() -> None: - if data_buffer: - lineno = data_buffer[0].lineno - body.append(nodes.Output(data_buffer[:], lineno=lineno)) - del data_buffer[:] - - try: - while self.stream: - token = self.stream.current - if token.type == "data": - if token.value: - add_data(nodes.TemplateData(token.value, lineno=token.lineno)) - next(self.stream) - elif token.type == "variable_begin": - next(self.stream) - add_data(self.parse_tuple(with_condexpr=True)) - self.stream.expect("variable_end") - elif token.type == "block_begin": - flush_data() - next(self.stream) - if end_tokens is not None and self.stream.current.test_any( - *end_tokens - ): - return body - rv = self.parse_statement() - if isinstance(rv, list): - body.extend(rv) - else: - body.append(rv) - self.stream.expect("block_end") - else: - raise AssertionError("internal parsing error") - - flush_data() - finally: - if end_tokens is not None: - self._end_token_stack.pop() - return body - - def parse(self) -> nodes.Template: - """Parse the whole template into a `Template` node.""" - result = nodes.Template(self.subparse(), lineno=1) - result.set_environment(self.environment) - return result diff --git a/spaces/diacanFperku/AutoGPT/127 Hours Full Movie Download In Hindi 720p.md b/spaces/diacanFperku/AutoGPT/127 Hours Full Movie Download In Hindi 720p.md deleted file mode 100644 index b0c9f98110cf1e9c808ea669ae8f504e9268f84b..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/127 Hours Full Movie Download In Hindi 720p.md +++ /dev/null @@ -1,93 +0,0 @@ -
      -

      127 Hours Full Movie Download in Hindi 720p: A Gripping Tale of Survival

      - -

      127 Hours is a 2010 biographical drama film directed by Danny Boyle and starring James Franco as Aron Ralston, a mountain climber who gets trapped by a boulder in a remote canyon in Utah and has to resort to desperate measures to free himself. The film is based on Ralston's memoir Between a Rock and a Hard Place, which recounts his harrowing ordeal in April 2003.

      - -

      The film received critical acclaim for its direction, screenplay, cinematography, editing, music, and Franco's performance. It was nominated for six Academy Awards, including Best Picture and Best Actor for Franco. It also won several BAFTA Awards, Golden Globe Awards, and Critics' Choice Awards.

      -

      127 Hours full movie download in hindi 720p


      Download ✏ ✏ ✏ https://gohhs.com/2uFUGC



      - -

      How to Watch 127 Hours Full Movie Online in Hindi 720p

      - -

      If you are looking for a way to watch 127 Hours full movie online in Hindi 720p, you have several options. Here are some of them:

      - -
        -
      • You can rent or buy the movie from various online platforms, such as Amazon Prime Video, iTunes, Google Play, YouTube, or Vudu. The rental price ranges from $2.99 to $3.99, while the purchase price ranges from $9.99 to $14.99.
      • -
      • You can stream the movie on Netflix if you have a subscription. The movie is available in Hindi dubbed version on Netflix India.
      • -
      • You can download the movie from torrent sites, such as The Pirate Bay, Kickass Torrents, or 1337x. However, this method is illegal and risky, as you may face legal consequences or malware infections.
      • -
      - -

      Why You Should Watch 127 Hours Full Movie in Hindi 720p

      - -

      127 Hours is a movie that will keep you on the edge of your seat with its intense and realistic portrayal of a man's struggle to survive against all odds. The movie is not only a thrilling adventure but also a profound exploration of human willpower, courage, and resilience. Here are some reasons why you should watch 127 Hours full movie in Hindi 720p:

      - -
        -
      • The movie showcases James Franco's brilliant acting skills as he carries the film almost entirely by himself. He delivers a captivating and nuanced performance that conveys the physical and emotional pain, frustration, fear, hope, and determination of his character.
      • -
      • The movie uses innovative cinematography and editing techniques to create a visceral and immersive experience for the viewers. The movie employs split screens, flashbacks, hallucinations, video diaries, and sound effects to convey the different aspects of Ralston's situation and psyche.
      • -
      • The movie features a powerful and uplifting soundtrack composed by A.R. Rahman, who won a Golden Globe Award for his work. The soundtrack includes original songs by Rahman and Dido, as well as tracks by Sigur Rós, Bill Withers, Free Blood, and Plastic Bertrand.
      • -
      • The movie delivers a message of hope and inspiration that resonates with anyone who faces challenges or difficulties in life. The movie shows how Ralston overcomes his ordeal by drawing strength from his memories, dreams, and love for his family and friends.
      • -
      - -

      Conclusion

      - -

      127 Hours is a movie that you should not miss if you are looking for a gripping and inspiring story of survival. The movie is available to watch online in Hindi 720p on various platforms or torrent sites. However, we recommend that you watch it legally and safely from authorized sources. You will not regret watching this masterpiece that will leave you breathless and amazed.

      -
      What is the Plot of 127 Hours Full Movie in Hindi 720p
      - -

      127 Hours full movie in Hindi 720p tells the true story of Aron Ralston, a mountain climber who gets trapped by a boulder in a remote canyon in Utah and has to resort to desperate measures to free himself. The movie follows his ordeal over five days, as he tries to move the boulder, call for help, ration his water and food, and keep his sanity. He also reflects on his life, his relationships, and his mistakes. He eventually decides to amputate his own arm with a dull knife and a tourniquet, and then hikes out of the canyon to find help.

      - -
      What are the Themes of 127 Hours Full Movie in Hindi 720p
      - -

      127 Hours full movie in Hindi 720p explores various themes, such as survival, courage, willpower, isolation, regret, and redemption. The movie shows how Ralston faces his mortality and his loneliness, and how he overcomes his fear and pain. The movie also shows how Ralston learns from his experience and changes his attitude towards life. He realizes that he needs to be more responsible, more grateful, and more connected to others. He also realizes that he has a purpose and a destiny in life.

      - -What are the Reviews of 127 Hours Full Movie in Hindi 720p - -

      127 Hours full movie in Hindi 720p received positive reviews from critics and audiences alike. The movie was praised for its direction, screenplay, cinematography, editing, music, and Franco's performance. The movie was also lauded for its realism, intensity, and emotional impact. The movie was described as a "masterpiece", a "tour de force", and a "triumph". The movie also received some criticism for its graphic depiction of the amputation scene, which some viewers found disturbing or unbearable.

      -

      -What are the Facts of 127 Hours Full Movie in Hindi 720p - -

      127 Hours full movie in Hindi 720p is based on a true story that happened in April 2003. Here are some facts about the movie and the real-life incident:

      - -
        -
      • The movie was shot in the same location where Ralston got trapped, which is Bluejohn Canyon in Utah. The filmmakers had to get permission from the National Park Service and the Bureau of Land Management to film there.
      • -
      • The movie used a replica of the boulder that trapped Ralston's arm, which weighed about 800 pounds. The boulder was made of foam and steel, and was controlled by a hydraulic system.
      • -
      • The movie took 13 months to edit, as the filmmakers had to sift through more than 140 hours of footage. The movie also used some of Ralston's actual video diaries that he recorded during his ordeal.
      • -
      • The movie's amputation scene lasted for about six minutes, but in reality, it took Ralston about 40 minutes to cut off his arm. He used a cheap multi-tool that had a dull blade and a pliers.
      • -
      • The movie's ending showed Ralston being rescued by a helicopter, but in reality, he had to hike for another six miles before he found help. He also met a family of hikers who gave him water and alerted the authorities.
      • -
      - -What are the Benefits of Watching 127 Hours Full Movie in Hindi 720p - -

      127 Hours full movie in Hindi 720p is not only an entertaining and inspiring movie, but also a beneficial one. Here are some benefits of watching the movie:

      - -
        -
      • The movie can motivate you to overcome your challenges and difficulties in life. The movie shows how Ralston faced his situation with courage, determination, and optimism. He did not give up on his life or his dreams, and he found a way to survive.
      • -
      • The movie can teach you to appreciate your life and your loved ones more. The movie shows how Ralston realized his mistakes and regrets, and how he wished he had been more responsible, grateful, and connected to others. He also realized how much he loved his family and friends, and how much they loved him back.
      • -
      • The movie can inspire you to pursue your passions and adventures more. The movie shows how Ralston was an avid mountaineer who loved exploring nature and experiencing new things. He also had a sense of humor and a zest for life.
      • -
      -What are the Awards of 127 Hours Full Movie in Hindi 720p - -

      127 Hours full movie in Hindi 720p is not only a critically acclaimed movie, but also a commercially successful one. The movie earned over $60 million worldwide on a budget of $18 million. The movie also received numerous awards and nominations from various prestigious organizations and festivals. Here are some of them:

      - -
        -
      • The movie was nominated for six Academy Awards, including Best Picture, Best Actor for Franco, Best Adapted Screenplay, Best Original Score, Best Original Song, and Best Film Editing.
      • -
      • The movie won three BAFTA Awards, including Best British Film, Best Editing, and Best Music. It was also nominated for six more BAFTA Awards, including Best Film, Best Director, Best Actor for Franco, Best Adapted Screenplay, Best Cinematography, and Best Sound.
      • -
      • The movie won two Golden Globe Awards, including Best Original Score and Best Original Song. It was also nominated for two more Golden Globe Awards, including Best Motion Picture - Drama and Best Actor - Drama for Franco.
      • -
      • The movie won four Critics' Choice Awards, including Best Picture, Best Actor for Franco, Best Editing, and Best Composer. It was also nominated for two more Critics' Choice Awards, including Best Director and Best Adapted Screenplay.
      • -
      • The movie won the Audience Award at the Toronto International Film Festival and the Mill Valley Film Festival. It was also nominated for the People's Choice Award at the Sundance Film Festival.
      • -
      - -What are the Tips for Watching 127 Hours Full Movie in Hindi 720p - -

      127 Hours full movie in Hindi 720p is a movie that will make you feel a range of emotions, from excitement to horror to awe. The movie is not for the faint of heart or the squeamish, as it depicts a graphic and realistic amputation scene that may shock or disturb some viewers. Here are some tips for watching the movie:

      - -
        -
      • Prepare yourself mentally and emotionally before watching the movie. Know what to expect and brace yourself for the intense scenes.
      • -
      • Watch the movie with someone else or in a group. This way, you can share your reactions and feelings with others and get support if you feel uncomfortable or scared.
      • -
      • Have some snacks and drinks handy while watching the movie. This will help you stay hydrated and energized during the movie.
      • -
      • Take breaks if you need to. You can pause the movie or look away during the disturbing scenes if you feel overwhelmed or nauseous.
      • -
      • Enjoy the movie as a cinematic masterpiece and a true story of survival. Appreciate the direction, screenplay, cinematography, editing, music, and performance of the movie. Learn from the message and themes of the movie.
      • -
      -Conclusion - -

      127 Hours full movie in Hindi 720p is a movie that you should not miss if you are looking for a gripping and inspiring story of survival. The movie is based on the true story of Aron Ralston, a mountain climber who gets trapped by a boulder in a remote canyon in Utah and has to resort to desperate measures to free himself. The movie is a masterpiece of direction, screenplay, cinematography, editing, music, and performance. The movie is also a lesson of survival, courage, willpower, isolation, regret, and redemption. The movie is available to watch online in Hindi 720p on various platforms or torrent sites. However, we recommend that you watch it legally and safely from authorized sources. You will not regret watching this movie that will leave you breathless and amazed.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/ACDSee Photo Studio Professional 2020 EXCLUSIVE Crack.md b/spaces/diacanFperku/AutoGPT/ACDSee Photo Studio Professional 2020 EXCLUSIVE Crack.md deleted file mode 100644 index c5e44f838e9838b6b3cd4c79aafe41e90c85c2dd..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/ACDSee Photo Studio Professional 2020 EXCLUSIVE Crack.md +++ /dev/null @@ -1,84 +0,0 @@ -
      -

      ACDSee Photo Studio Professional 2020 Crack: A Review

      -

      If you are looking for a powerful photo editing and digital asset management software, you might be interested in ACDSee Photo Studio Professional 2020. This software offers a comprehensive set of tools to enhance your images, organize your photos, and unleash your creativity. However, you might also be tempted to download ACDSee Photo Studio Professional 2020 crack from unofficial sources, hoping to get the full version for free. In this article, we will review the features of ACDSee Photo Studio Professional 2020 and explain why you should avoid using cracked software.

      -

      Features of ACDSee Photo Studio Professional 2020

      -

      ACDSee Photo Studio Professional 2020 is a software that combines RAW editing and digital asset management in one package. It allows you to import, browse, edit, and export your photos with ease and speed. Some of the features of ACDSee Photo Studio Professional 2020 are:

      -

      ACDSee Photo Studio Professional 2020 Crack


      Download Ziphttps://gohhs.com/2uFV2V



      -
        -
      • Combined Clone Tool: This tool lets you copy pixels from one area of an image to another, blending them seamlessly for a natural-looking result. You can use it to remove unwanted objects, blemishes, or distractions from your photos.
      • -
      • Color LUTs: Color LUTs are files that instruct ACDSee to map specific RGB values to other colors, creating different effects and moods for your images. You can import and apply color LUTs in develop mode for flexible non-destructive color grading.
      • -
      • Face Detection and Facial Recognition: This feature helps you to identify and name the people in your photos, making it easier to search and organize them. ACDSee will learn which names belong to which faces, and even suggest possible matches. You can also embed face data in your photos for safe keeping.
      • -
      • GPU Acceleration: ACDSee Photo Studio Professional 2020 utilizes the power of your graphics card to speed up the processing of your images, saving you time and improving your workflow.
      • -
      -

      Why You Should Avoid ACDSee Photo Studio Professional 2020 Crack

      -

      While it might seem tempting to download ACDSee Photo Studio Professional 2020 crack from unofficial sources, you should know that there are many risks and disadvantages associated with it. Here are some of the reasons why you should avoid using cracked software:

      -
        -
      • Legal Issues: Downloading and using cracked software is illegal and violates the copyright laws. You could face fines or even jail time if you are caught using pirated software.
      • -
      • Virus Infection: Cracked software often comes with malware or viruses that can harm your computer or steal your personal information. You could lose your data, compromise your security, or damage your system by using cracked software.
      • -
      • No Updates or Support: Cracked software does not receive any updates or support from the official developers. You could miss out on new features, bug fixes, or compatibility improvements that are available for the legitimate version of the software.
      • -
      • Poor Performance: Cracked software often has errors or glitches that affect its performance and functionality. You could experience crashes, freezes, or corrupted files by using cracked software.
      • -
      -

      How to Get ACDSee Photo Studio Professional 2020 Legally

      -

      If you want to enjoy the benefits of ACDSee Photo Studio Professional 2020 without risking any legal or technical problems, you should buy the legal version from the official ACDSee website. You can choose from different subscription plans or purchase a lifetime license for a one-time fee. By buying the legal version of ACDSee Photo Studio Professional 2020, you will get:

      -
        -
      • Full Access to All Features: You will be able to use all the features and tools that ACDSee Photo Studio Professional 2020 has to offer, without any limitations or restrictions.
      • -
      • Regular Updates and Support: You will receive regular updates and support from the official developers, ensuring that your software is always up-to-date and running smoothly.
      • -
      • Virus-Free Software: You will download the software from a trusted source, guaranteeing that it is free from any malware or viruses that could harm your computer.
      • -
      • Satisfaction Guarantee: You will have a 30-day money-back guarantee if you are not satisfied with the software for any reason.
      • -
      -

      Conclusion

      -

      ACDSee Photo Studio Professional 2020 is a great software for photo editing and digital asset management. It offers a wide range of features and tools to help you enhance your images, organize your photos, and unleash your creativity. However, you should avoid downloading ACDSee Photo Studio Professional 2020 crack from unofficial sources, as it could expose you to legal and technical problems. Instead, you should buy the legal version from the official ACDSee website, where you can get full access to all features, regular updates and support, virus-free software, and satisfaction guarantee.

      -

      How to Download and Install ACDSee Photo Studio Professional 2020

      -

      If you want to download and install ACDSee Photo Studio Professional 2020, you need to follow these steps:

      -
        -
      1. Go to the official ACDSee website and choose the subscription plan or the lifetime license that suits your needs.
      2. -
      3. Click on the "Buy Now" button and complete the payment process.
      4. -
      5. After the payment is confirmed, you will receive an email with a download link and a license key.
      6. -
      7. Click on the download link and save the installation file on your computer.
      8. -
      9. Run the installation file and follow the instructions on the screen.
      10. -
      11. When prompted, enter your license key to activate the software.
      12. -
      13. Enjoy using ACDSee Photo Studio Professional 2020!
      14. -
      -

      Tips and Tricks for Using ACDSee Photo Studio Professional 2020

      -

      ACDSee Photo Studio Professional 2020 is a software that offers many features and tools to help you improve your photos. Here are some tips and tricks for using ACDSee Photo Studio Professional 2020:

      -
        -
      • Use Presets: Presets are predefined settings that you can apply to your photos with one click. You can use presets to quickly adjust the exposure, contrast, color, sharpness, and other parameters of your photos. You can also create your own presets or import presets from other sources.
      • -
      • Use Layers: Layers are a way of editing your photos non-destructively, meaning that you can make changes without affecting the original image. You can use layers to apply adjustments, effects, masks, text, watermarks, and other elements to your photos. You can also blend layers with different modes and opacity levels.
      • -
      • Use Batch Processing: Batch processing is a feature that allows you to apply the same actions to multiple photos at once. You can use batch processing to resize, rename, convert, watermark, or edit multiple photos in a few clicks. You can also create your own batch presets or use the built-in ones.
      • -
      • Use Keywords and Categories: Keywords and categories are a way of organizing your photos by adding tags or labels to them. You can use keywords and categories to sort, filter, search, or group your photos by different criteria. You can also use keywords and categories to create smart collections or albums.
      • -
      -

      Conclusion

      -

      ACDSee Photo Studio Professional 2020 is a software that combines RAW editing and digital asset management in one package. It allows you to import, browse, edit, and export your photos with ease and speed. It offers a wide range of features and tools to help you enhance your images, organize your photos, and unleash your creativity. However, you should avoid downloading ACDSee Photo Studio Professional 2020 crack from unofficial sources, as it could expose you to legal and technical problems. Instead, you should buy the legal version from the official ACDSee website, where you can get full access to all features, regular updates and support, virus-free software, and satisfaction guarantee.

      -

      Alternatives to ACDSee Photo Studio Professional 2020

      -

      If you are looking for alternatives to ACDSee Photo Studio Professional 2020, you might want to consider these software:

      -

      -
        -
      • Adobe Photoshop Lightroom: This software is a popular choice for photo editing and digital asset management. It offers a comprehensive set of tools to import, organize, edit, and share your photos. It also integrates with other Adobe products and cloud services.
      • -
      • Corel PaintShop Pro: This software is a versatile and affordable option for photo editing and digital asset management. It offers a user-friendly interface and a rich set of features to enhance your photos, create graphic designs, and manage your photo collection.
      • -
      • Affinity Photo: This software is a powerful and professional option for photo editing and digital asset management. It offers a fast and smooth performance and a high-quality output. It also supports RAW editing, HDR merging, panorama stitching, and other advanced features.
      • -
      -

      Frequently Asked Questions about ACDSee Photo Studio Professional 2020

      -

      Here are some of the frequently asked questions about ACDSee Photo Studio Professional 2020:

      -
      -
      What are the system requirements for ACDSee Photo Studio Professional 2020?
      -
      The system requirements for ACDSee Photo Studio Professional 2020 are:
      -
      - Microsoft Windows 7 (SP1), Windows 8, Windows 8.1, or Windows 10 (64-bit editions only)
      -
      - Microsoft Internet Explorer 9 or higher
      -
      - Microsoft DirectX 10 or higher
      -
      - Windows Media Player 9.0 or higher
      -
      - Intel or AMD processor with 64-bit support
      -
      - Intel i3 or better processor recommended
      -
      - 2 GB RAM (6 GB RAM or more recommended)
      -
      - 512 MB Video RAM (VRAM)
      -
      - DirectX 10 compatible graphics adapter
      -
      - 1024 x 768 display resolution (1920 x 1080 recommended)
      -
      - 2 GB of available hard disk space
      -
      How much does ACDSee Photo Studio Professional 2020 cost?
      -
      ACDSee Photo Studio Professional 2020 costs $89.95 for a lifetime license or $69/year for a subscription plan. You can also get a free trial for 30 days.
      -
      How can I contact ACDSee support?
      -
      You can contact ACDSee support by visiting their website and choosing the option that suits your needs. You can also access their online help, user guide, tutorials, forums, and blog.
      -
      -

      Conclusion

      -

      ACDSee Photo Studio Professional 2020 is a software that combines RAW editing and digital asset management in one package. It allows you to import, browse, edit, and export your photos with ease and speed. It offers a wide range of features and tools to help you enhance your images, organize your photos, and unleash your creativity. However, you should avoid downloading ACDSee Photo Studio Professional 2020 crack from unofficial sources, as it could expose you to legal and technical problems. Instead, you should buy the legal version from the official ACDSee website, where you can get full access to all features, regular updates and support, virus-free software, and satisfaction guarantee. Alternatively, you can also try some of the other software that we have mentioned in this article, such as Adobe Photoshop Lightroom, Corel PaintShop Pro, or Affinity Photo. We hope that this article has helped you to learn more about ACDSee Photo Studio Professional 2020 and how to use it effectively.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/attentions.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/attentions.py deleted file mode 100644 index ecbdbc8be941a962046fc11fd6739b093112123e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/attentions.py +++ /dev/null @@ -1,343 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from torch.nn.utils import weight_norm, remove_weight_norm -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - if isflow: - cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - self.cond_layer = weight_norm(cond_layer, name='weight') - self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - print(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/text/tone_sandhi.py b/spaces/digitalxingtong/Nailv-Bert-Vits2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/train_ms.py b/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/dineshreddy/WALT/mmdet/models/backbones/__init__.py b/spaces/dineshreddy/WALT/mmdet/models/backbones/__init__.py deleted file mode 100644 index 11d7de7543b04e7040facb4472121e5c0f02ecaa..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/backbones/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .swin_transformer import SwinTransformer -from .resnet import ResNet, ResNetV1d -__all__ = ['SwinTransformer', 'ResNet', 'ResNetV1d'] diff --git a/spaces/dmeck/RVC-Speakers/speakers/load/__init__.py b/spaces/dmeck/RVC-Speakers/speakers/load/__init__.py deleted file mode 100644 index e1c042ba376692a50f00cf5e1ae1a1481d5d08c7..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/speakers/load/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Serialization and deserialization.""" diff --git a/spaces/docs-demos/t5-base/README.md b/spaces/docs-demos/t5-base/README.md deleted file mode 100644 index fc49dc34740f47a48c572136bdea91defe26718a..0000000000000000000000000000000000000000 --- a/spaces/docs-demos/t5-base/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: T5 -emoji: 👁 -colorFrom: yellow -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/dorkai/ChatUIPro/app/components/base/tooltip/index.tsx b/spaces/dorkai/ChatUIPro/app/components/base/tooltip/index.tsx deleted file mode 100644 index 610f17b9d68e43d8eafbf674bc85645ce66332e8..0000000000000000000000000000000000000000 --- a/spaces/dorkai/ChatUIPro/app/components/base/tooltip/index.tsx +++ /dev/null @@ -1,46 +0,0 @@ -'use client' -import classNames from 'classnames' -import type { FC } from 'react' -import React from 'react' -import { Tooltip as ReactTooltip } from 'react-tooltip' // fixed version to 5.8.3 https://github.com/ReactTooltip/react-tooltip/issues/972 -import 'react-tooltip/dist/react-tooltip.css' - -type TooltipProps = { - selector: string - content?: string - htmlContent?: React.ReactNode - className?: string // This should use !impornant to override the default styles eg: '!bg-white' - position?: 'top' | 'right' | 'bottom' | 'left' - clickable?: boolean - children: React.ReactNode -} - -const Tooltip: FC = ({ - selector, - content, - position = 'top', - children, - htmlContent, - className, - clickable, -}) => { - return ( -
      - {React.cloneElement(children as React.ReactElement, { - 'data-tooltip-id': selector, - }) - } - - {htmlContent && htmlContent} - -
      - ) -} - -export default Tooltip diff --git a/spaces/dukai289/scripts/scripts/logger_decorator.py b/spaces/dukai289/scripts/scripts/logger_decorator.py deleted file mode 100644 index 96696daf88bf0909707689fc74835d1417686e89..0000000000000000000000000000000000000000 --- a/spaces/dukai289/scripts/scripts/logger_decorator.py +++ /dev/null @@ -1,30 +0,0 @@ -# from logger_decorator import logger - -import logging -import functools -import time - -# 创建全局的日志记录器 -# logger -logger = logging.getLogger('global_logger') -logger.setLevel(logging.DEBUG) -# handler -handler = logging.FileHandler('log.txt') -# formatter -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -# 关联 -handler.setFormatter(formatter) -logger.addHandler(handler) - -def logger(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - start_time = time.time() # 记录函数开始执行的时间 - logger.info("函数调用开始\t{}".format(func.__name__)) - result = func(*args, **kwargs) - end_time = time.time() # 记录函数执行结束的时间 - execution_time = end_time - start_time # 计算函数执行时长 - logger.info("函数调用完成\t{}".format(func.__name__)) - logger.info("函数调用耗时\t{:.6f}秒\n".format(execution_time)) - return result - return wrapper diff --git a/spaces/dylanebert/igf/viewer/README.md b/spaces/dylanebert/igf/viewer/README.md deleted file mode 100644 index 5c91169b0ca6508bb24301c957a9edea5abf2b01..0000000000000000000000000000000000000000 --- a/spaces/dylanebert/igf/viewer/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# create-svelte - -Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte). - -## Creating a project - -If you're seeing this, you've probably already done this step. Congrats! - -```bash -# create a new project in the current directory -npm create svelte@latest - -# create a new project in my-app -npm create svelte@latest my-app -``` - -## Developing - -Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: - -```bash -npm run dev - -# or start the server and open the app in a new browser tab -npm run dev -- --open -``` - -## Building - -To create a production version of your app: - -```bash -npm run build -``` - -You can preview the production build with `npm run preview`. - -> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. diff --git a/spaces/edugp/perplexity-lenses/perplexity_lenses/perplexity.py b/spaces/edugp/perplexity-lenses/perplexity_lenses/perplexity.py deleted file mode 100644 index 915729d6a4c7b84a8488e71ea5050650579ea13a..0000000000000000000000000000000000000000 --- a/spaces/edugp/perplexity-lenses/perplexity_lenses/perplexity.py +++ /dev/null @@ -1,195 +0,0 @@ -import os -import re -import unicodedata -from typing import Dict -from requests.exceptions import HTTPError - -import kenlm -import sentencepiece -from huggingface_hub import cached_download, hf_hub_url - -KENLM_MODEL_REPO = "edugp/kenlm" - - -class SentencePiece: - def __init__( - self, - model: str, - ): - super().__init__() - self.sp = sentencepiece.SentencePieceProcessor() - self.sp.load(str(model)) - - def do(self, text: dict) -> dict: - tokenized = self.sp.encode_as_pieces(text) - return " ".join(tokenized) - - -class KenlmModel: - digit_re: re.Pattern = re.compile(r"\d") - unicode_punct: Dict[str, str] = { - ",": ",", - "。": ".", - "、": ",", - "„": '"', - "”": '"', - "“": '"', - "«": '"', - "»": '"', - "1": '"', - "」": '"', - "「": '"', - "《": '"', - "》": '"', - "´": "'", - "∶": ":", - ":": ":", - "?": "?", - "!": "!", - "(": "(", - ")": ")", - ";": ";", - "–": "-", - "—": " - ", - ".": ". ", - "~": "~", - "’": "'", - "…": "...", - "━": "-", - "〈": "<", - "〉": ">", - "【": "[", - "】": "]", - "%": "%", - "►": "-", - } - unicode_punct_re = re.compile(f"[{''.join(unicode_punct.keys())}]") - non_printing_chars_re = re.compile( - f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]" - ) - kenlm_model_dir = None - sentence_piece_model_dir = None - - def __init__( - self, - model_dataset: str, - language: str, - lower_case: bool = False, - remove_accents: bool = False, - normalize_numbers: bool = True, - punctuation: int = 1, - ): - self.download_kenlm_model(model_dataset, language) - try: - self.model = kenlm.Model(self.kenlm_model_dir) - self.tokenizer = SentencePiece(self.sentence_piece_model_dir) - except OSError: - os.remove(self.kenlm_model_dir) - if os.path.exists(self.sentence_piece_model_dir): - os.remove(self.sentence_piece_model_dir) - raise OSError( - "File was corrupt and should have been removed. Please, retry." - ) - self.accent = remove_accents - self.case = lower_case - self.numbers = normalize_numbers - self.punct = punctuation - - @classmethod - def from_pretrained( - cls, - model_dataset: str, - language: str, - lower_case: bool, - remove_accents: bool, - normalize_numbers: bool, - punctuation: int, - ): - return cls( - model_dataset, - language, - lower_case, - remove_accents, - normalize_numbers, - punctuation, - ) - - def pp(self, log_score, length): - return 10.0 ** (-log_score / length) - - def get_perplexity(self, doc: str, normalize_cc_net: bool = True): - if normalize_cc_net: - doc = self.normalize( - doc, - accent=self.accent, - case=self.case, - numbers=self.numbers, - punct=self.punct, - ) - # Tokenize (after normalizing): See https://github.com/facebookresearch/cc_net/blob/bda555bd1cf1ee2e0b925363e62a61cd46c8b60d/cc_net/mine.py#L352 for full pipeline - doc = self.tokenizer.do(doc) - doc_log_score, doc_length = 0, 0 - for line in doc.split("\n"): - log_score = self.model.score(line) - length = len(line.split()) + 1 - doc_log_score += log_score - doc_length += length - return round(self.pp(doc_log_score, doc_length), 1) - - def normalize( - self, - line: str, - accent: bool = True, - case: bool = True, - numbers: bool = True, - punct: int = 1, - ) -> str: - line = line.strip() - if not line: - return line - if case: - line = line.lower() - if accent: - line = self.strip_accents(line) - if numbers: - line = self.digit_re.sub("0", line) - if punct == 1: - line = self.replace_unicode_punct(line) - elif punct == 2: - line = self.remove_unicode_punct(line) - line = self.remove_non_printing_char(line) - return line - - def strip_accents(self, line: str) -> str: - """Strips accents from a piece of text.""" - nfd = unicodedata.normalize("NFD", line) - output = [c for c in nfd if unicodedata.category(c) != "Mn"] - if len(output) == line: - return line - return "".join(output) - - def replace_unicode_punct(self, text: str) -> str: - return "".join(self.unicode_punct.get(c, c) for c in text) - - def remove_unicode_punct(self, text: str) -> str: - """More aggressive version of replace_unicode_punct but also faster.""" - return self.unicode_punct_re.sub("", text) - - def remove_non_printing_char(self, text: str) -> str: - return self.non_printing_chars_re.sub("", text) - - def download_kenlm_model(self, model_dataset: str, language: str): - try: - kenlm_model_url = hf_hub_url( - KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.arpa.trie.bin" - ) - self.kenlm_model_dir = cached_download(kenlm_model_url) - except HTTPError: - kenlm_model_url = hf_hub_url( - KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.arpa.bin" - ) - self.kenlm_model_dir = cached_download(kenlm_model_url) - sentence_piece_model_url = hf_hub_url( - KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.sp.model" - ) - self.sentence_piece_model_dir = cached_download(sentence_piece_model_url) diff --git a/spaces/emc348/faces-through-time/criteria/backbones/__init__.py b/spaces/emc348/faces-through-time/criteria/backbones/__init__.py deleted file mode 100644 index 55bd4c5d1889a1a998b52eb56793bbc1eef1b691..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/criteria/backbones/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - return iresnet2060(False, **kwargs) - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - else: - raise ValueError() \ No newline at end of file diff --git a/spaces/enzostvs/hub-api-playground/components/editor/main/response.tsx b/spaces/enzostvs/hub-api-playground/components/editor/main/response.tsx deleted file mode 100644 index 3468195752e79d1619170decaa798bf25b8d9e2e..0000000000000000000000000000000000000000 --- a/spaces/enzostvs/hub-api-playground/components/editor/main/response.tsx +++ /dev/null @@ -1,17 +0,0 @@ -import Highlight from "react-highlight"; -import "node_modules/highlight.js/styles/night-owl.css"; -import { Loading } from "@/components/loading"; -export const Response = ({ res, loading }: { res: any; loading: boolean }) => { - return ( -
      - - {JSON.stringify(res ?? {}, null, 2)} - - {loading && ( - -

      Processing...

      -
      - )} -
      - ); -}; diff --git a/spaces/eson/tokenizer-arena/vocab/bloomz_6b4_zh/README.md b/spaces/eson/tokenizer-arena/vocab/bloomz_6b4_zh/README.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/eson/tokenizer-arena/vocab/kplug/jd_vocab.py b/spaces/eson/tokenizer-arena/vocab/kplug/jd_vocab.py deleted file mode 100644 index 6efe849aa638a644e1997a0f49b7bbe8f5293789..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/kplug/jd_vocab.py +++ /dev/null @@ -1,240 +0,0 @@ -""" -## Dependency -pip install emoji --upgrade - -## - -https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/master/bert_dict.py -1. 更新langconv,新增: 余 吒 著 覆 -2. -3. 删除30个阿拉伯字母 (阿拉伯语从右向左书写) -4. ok等字母 - - -## TODO: -1. ##~ 这样的词典可以删除,对应要修改tokenizer。 -2. 是否要加入空格 [SPACE] 这样的特殊符号。 - a) 还原问题: 比如 new balance这样的词汇,会被合并。 会吗? 分词后是 new bal ##ance --> new balance 也能完全还原啊。 - b) 语义问题: 同时,在一定意义上也能起到语义隔离的作用,比如 "剑南春 水晶剑 52度 单瓶装高度白酒 750ml 口感浓香型" https://item.jd.com/100006659994.html - [SEP] 也能work -""" - -import codecs -import sys -import re -from langconv import * -import emoji - -# 1. ℃ 这些符号在clue词典,但是"25℃" 不可分。策略一,加入词典 ##℃,策略二,更换分词器 -# oov_clue = ['##°', '##~', '##℃', '##㎡', '##²', '##₂', '##×', '##x', '##+', '余', '覆', '著'] - -emoji_regex = emoji.get_emoji_regexp() - -human_list = ['▲top', '▲topoct', '▲topmay', '▲topapr', '▲topmar', '▲topjun', '▲topdec', '▲topnov', '▲topaug', '▲topjul', - '▲topjan', '▲topsep', '▲topfeb', '¥799', '¥2899', '~~', '~~~', '##~6', '##~10', '~10', '##~5', '~5', - '##~20', '##~8', '##~17', '##~1', '~4', '##~3', '##~7', '~1', 'wedding', '×email', 'cp', '××', 'ok', 'a', - 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '##★', '##☆', '↓↓↓', '##●', '##♪', '▌♥', '##|', - '##d', '##▲', '##o', '★★', '##→', '#a', '⋯⋯', '##▼', '##○', '★★★★★', '##∥', '##◆', '##ω', '★★★', '##c', - '##s', '##e', '##p', '##■', '##↑', '##k', '##и', '◆◆', '##g', '##а', '±0', '##◎', '##─', '##r', - '##>', '##t', '★★★★', '##│', '##n', '##l', '##=', '##y', '☆☆☆', '##i', '##↓', 'ˋ▽ˊ', '##v', '↓↓', - '##f2016', '##q', '∟∣', '##я', '##←', '##◆◆', '##cm~', '##f', '##h', '##j', '##u', '##w', - '##z'] - -zhuyin_char = ['ㄅ', 'ㄆ', 'ㆠ', 'ㄇ', 'ㄈ', 'ㄪ', 'ㄉ', 'ㄊ', 'ㄋ', 'ㆹ', 'ㄌ', 'ㄍ', 'ㄎ', 'ㆣ', 'ㄫ', 'ㄏ', 'ㆸ', 'ㄐ', 'ㄑ', 'ㆢ', 'ㄬ', - 'ㄒ', 'ㆺ', 'ㄓ', 'ㄔ', 'ㄕ', 'ㄖ', 'ㄗ', 'ㄘ', 'ㆡ', 'ㄙ', 'ㆡ', 'ㆪ', 'ㄨ', 'ㆫ', 'ㆨ', 'ㄩ', 'ㄚ', 'ㆩ', 'ㆦ', 'ㆧ', 'ㄛ', - 'ㄜ', 'ㄝ', 'ㆤ', 'ㆥ', 'ㄞ', 'ㆮ', 'ㄟ', 'ㄠ', 'ㆯ', 'ㄡ', 'ㆰ', 'ㆱ', 'ㆬ', 'ㄢ', 'ㄣ', 'ㄯ', 'ㄤ', 'ㆲ', 'ㄥ', 'ㆭ', 'ㄦ', - 'ㄭ'] - -special_token = ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]', '', ''] - -japan_chars = ['イ', 'ク', 'シ', 'ス', 'ト', 'ノ', 'フ', 'ラ', 'ル', 'ン'] - -korean_chars = ['ᄀ', 'ᄁ', 'ᄂ', 'ᄃ', 'ᄅ', 'ᄆ', 'ᄇ', 'ᄈ', 'ᄉ', 'ᄋ', 'ᄌ', 'ᄎ', 'ᄏ', 'ᄐ', 'ᄑ', 'ᄒ', 'ᅡ', 'ᅢ', 'ᅣ', 'ᅥ', 'ᅦ', - 'ᅧ', 'ᅨ', 'ᅩ', 'ᅪ', 'ᅬ', 'ᅭ', 'ᅮ', 'ᅯ', 'ᅲ', 'ᅳ', 'ᅴ', 'ᅵ', 'ᆨ', 'ᆫ', 'ᆯ', 'ᆷ', 'ᆸ', 'ᆺ', 'ᆻ', 'ᆼ', 'ᗜ'] - -add_puns = ['”', '“', '—', '–', '…', '’', '‘'] - -# 单个“乾”被转化成了 “干” -add_cn_chars = [char for char in '呡乾绗楦硌袢钕蕞癀皲貉唛笕椴―胗旯鳙鲇鳐鳜鲅鳊鲳鲽鲣枞炝醅馊捯抻绉馐饧莜嘬腘肫鳟镊犽洌蝰铱' \ - '髌锃镲锗甑戗裥弎粝霂猄轱苎偲兿铷栢帏黢洇沄誊忸怩蚬籺氚犇锒鸩噘偾髫'] -#阿拉伯文 بسن - -add_nums = ['10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', - '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', - '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', - '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', - '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', - '100', '120', '128', '180', '200', '256', '304', '360', '500', '512', '1000', '1080', '2000', '2014', - '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022'] - - -cn_punc = ',。;:?!()~|' # -def q2b(uchar, skip_cn_punc=False): - # 有时,希望保留全角中文标点,例如cn_punc。 - if skip_cn_punc and uchar in cn_punc: - return uchar - inside_code = ord(uchar) - if inside_code == 12288: # 全角空格直接转换 - inside_code = 32 - elif 65281 <= inside_code <= 65374: # 全角字符(除空格)根据关系转化 - inside_code -= 65248 - return chr(inside_code) - -def str_q2b(ustring, skip_cn_punc=False): - """ 全角转半角 """ - return ''.join([q2b(uchar, skip_cn_punc) for uchar in ustring]) - - -with open('vocab.google.txt', 'r', encoding='utf-8') as fin, \ - open('vocab.jd.txt.v2', 'w', encoding='utf-8') as fout: - cout_zh = 0 - cout_en = 0 - cout_jp = 0 - cout_em = 0 - cout_zh_res = 0 - cout_zh_tra = 0 - cout_zh_wp = 0 - cout_en_del = 0 - cout_en_res = 0 - cout_num = 0 - cout_num_del = 0 - cout_num_res = 0 - cout_hand_del = 0 - cout_total = 0 - cout_zhuyin = 0 - cout_unused = 0 - cout_special = 0 - cout_jp = 0 - cout_ko = 0 - cout_ar = 0 - - for line in fin: - cout_total += 1 - token = line.strip() - - if not token: - continue - - if token in ['|']: - print(token) - - if token in human_list: - cout_hand_del += 1 # 13 - continue - - # chinese character - elif re.match(u'[\u4e00-\u9fa5]+', token.replace('##', '')): - cout_zh += 1 # 14642 - - token_simp = Converter('zh-hans').convert(token) - if token_simp != token: - cout_zh_tra += 1 - continue - else: - if re.match(u'##', token): - # print(token) - cout_zh_wp += 1 - continue - else: - cout_zh_res += 1 - print(token, file=fout) - - # korean character - elif re.match(u'[\uac00-\ud7ff]+', token.replace('##', '')): - # print(token) - cout_ko += 1 - continue - - # japanese character - elif re.match(u'[\u30a0-\u30ff\u3040-\u309f]+', token.replace('##', '')): - # print(token) - cout_jp += 1 - continue - - # arabic character - elif re.match(u'[\u0600-\u06ff\u0750-\u077f\ufb50-\ufbc1\ufbd3-\ufd3f' - u'\ufd50-\ufd8f\ufd50-\ufd8f\ufe70-\ufefc\uFDF0-\uFDFD]+', token.replace('##', '')): - cout_ar += 1 - continue - - # english character - elif re.match(u'[a-z]+', token.replace('##', '')): - # print(token) - cout_en += 1 - print(token, file=fout) - continue - - elif str_q2b(token, skip_cn_punc=True) != token: - print(token, '--', str_q2b(token, skip_cn_punc=True)) - continue - - # emoji character - elif re.match(emoji_regex, token.replace('##', '')): - # print(token) - cout_em += 1 - continue - - # multi-number characters - elif re.match(u'(##)?\d', token): - cout_num += 1 - if len(token.replace('##', '')) == 1: - # print(token) - cout_num_res += 1 - print(token, file=fout) - else: - cout_num_del += 1 # 这个操作应该还好 - # print(token) - continue - elif token.replace('##', '') in zhuyin_char: - # print(token, file=fout) - cout_zhuyin += 1 - continue - elif token.startswith('[unused'): - print(token, file=fout) - cout_unused += 1 - elif token in special_token: - print(token, file=fout) - cout_special += 1 - - elif token.replace('##', '') in japan_chars: - cout_jp += 1 - continue - - elif token.replace('##', '') in korean_chars: - cout_ko += 1 - continue - else: - # print(token) - print(token, file=fout) - - # add tokens - if token == '"': - for token in add_puns: - print(token, file=fout) - if token == '9': - for token in add_nums: - cout_num_res += 1 - print(token, file=fout) - if token == '龟': - for token in add_cn_chars: - print(token, file=fout) - - print("cout_zh:{}".format(cout_zh)) # 14642 - print("cout_zh_tra:{}".format(cout_zh_tra)) # 3264 - print("cout_zh_wp:{}".format(cout_zh_wp)) # 5689 - print("cout_zh_res:{}".format(cout_zh_res)) # 5689 - print("cout_en:{}".format(cout_en)) # 3555 - print("cout_en_del:{}".format(cout_en_del)) # 2235 - print("cout_en_res:{}".format(cout_en_res)) # 1320 - print("cout_num:{}".format(cout_num)) # 1179 - print("cout_num_del:{}".format(cout_num_del)) # 1137 - print("cout_num_res:{}".format(cout_num_res)) # 140 - print("cout_hand_del:{}".format(cout_hand_del)) # 132 - print("cout_zhuyin:{}".format(cout_zhuyin)) # 36 - print("cout_unused:{}".format(cout_unused)) # 99 - print("cout_special:{}".format(cout_special)) # 7 - print("cout_jp:{}".format(cout_jp)) # 573 - print("cout_ko:{}".format(cout_ko)) # 84 - print("cout_ar:{}".format(cout_ar)) # - print("cout_em:{}".format(cout_em)) # 56 diff --git a/spaces/evaluate-metric/poseval/app.py b/spaces/evaluate-metric/poseval/app.py deleted file mode 100644 index bf11e456a33c0cf6fcacbe5ff0ff20aed4514e32..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/poseval/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import evaluate -from evaluate.utils import launch_gradio_widget - - -module = evaluate.load("poseval") - -launch_gradio_widget(module) diff --git a/spaces/f2api/gpt-academic/request_llm/bridge_jittorllms_pangualpha.py b/spaces/f2api/gpt-academic/request_llm/bridge_jittorllms_pangualpha.py deleted file mode 100644 index ad02565aef75ac056e0daa7396fb1c6ad7aae072..0000000000000000000000000000000000000000 --- a/spaces/f2api/gpt-academic/request_llm/bridge_jittorllms_pangualpha.py +++ /dev/null @@ -1,178 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.jittorllms_model = None - self.info = "" - self.local_history = [] - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import pandas - self.info = "依赖检测通过" - self.success = True - except: - from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ - r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() - self.success = False - - def ready(self): - return self.jittorllms_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - env = os.environ.get("PATH", "") - os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') - validate_path() # validate path so you can run from base directory - - def load_model(): - import types - try: - if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') - from .jittorllms.models import get_model - # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] - args_dict = {'model': 'pangualpha'} - print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') - self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) - print('done get model') - except: - self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') - raise RuntimeError("不能正常加载jittorllms的参数!") - print('load_model') - load_model() - - # 进入任务等待状态 - print('进入任务等待状态') - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - query = kwargs['query'] - history = kwargs['history'] - # 是否重置 - if len(self.local_history) > 0 and len(history)==0: - print('触发重置') - self.jittorllms_model.reset() - self.local_history.append(query) - - print('收到消息,开始请求') - try: - for response in self.jittorllms_model.stream_chat(query, history): - print(response) - self.child.send(response) - except: - from toolbox import trimmed_format_exc - print(trimmed_format_exc()) - self.child.send('[Local Message] Call jittorllms fail.') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global pangu_glm_handle -pangu_glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global pangu_glm_handle - if pangu_glm_handle is None: - pangu_glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + pangu_glm_handle.info - if not pangu_glm_handle.success: - error = pangu_glm_handle.info - pangu_glm_handle = None - raise RuntimeError(error) - - # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - print(response) - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global pangu_glm_handle - if pangu_glm_handle is None: - pangu_glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + pangu_glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not pangu_glm_handle.success: - pangu_glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." - for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/falterWliame/Face_Mask_Detection/Ghostbusters (1984) 720p BRRip Dual Audio [Hindi-English] By - RANS CLUBZ -.md b/spaces/falterWliame/Face_Mask_Detection/Ghostbusters (1984) 720p BRRip Dual Audio [Hindi-English] By - RANS CLUBZ -.md deleted file mode 100644 index 5ccf41a61077524dfa0f90a6c73394866bf4bd79..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Ghostbusters (1984) 720p BRRip Dual Audio [Hindi-English] By - RANS CLUBZ -.md +++ /dev/null @@ -1,7 +0,0 @@ -
      -

      lepaw 5621c8d4a multi-version application for windows 7, vista, 8, xp, 2000, 2003, 2008 r2, 2012 and server
      devolution 9.14.0 crackbypasses
      play the best full-featured mobile browser www.acronis.com download
      sniper ghost warrior 2 dvd infinite weapon guide
      t-shirts by printrbot 3-d modeling software download
      pve 2.0 + crack patch lite full game online play free
      latest products software easytousepro 2.0.5.2 crack 1.1
      sims online multiplayer play free full
      holland village patch 2015.1.3 (us postal service | originally u.s.postal service)


      steamed steamed steamed

      iphone 6 plus sim бета download iphone 6 plus any version

      -

      Ghostbusters (1984) 720p BRRip Dual Audio [Hindi-English] By - RANS CLUBZ -


      Download ★★★ https://urlca.com/2uDbSI



      -

      hastankiller 3 dotar 9137446e3 1 download link diy battery charger stainless steel outdoor kitchen
      bar wahl sibelius 6.1.0.4 serial key with crack + keygen full version
      9 2019. the big bang theory season 10 episode 8 hd 1080p : release.com.. big bang theory season 10 episode 8 hd 1080p : release.com
      death machine 40 greatest films ever made in hindi hd download under 10






      -

      3d max 8 keygen free and safe download
      download cheshire cat free 2020 inglish subtitles
      russian arabic free download download films hd 1080p
      apartment home beta key (windows
      lvgames pascal 3d download (windows 7
      live ps2 game download (windows 7
      9 2020.
      top free music sites: download flv to webm,mp4,mp3, m4a hd 1080p and much more videos.. theaters selling vhs and dvd movies that have no ability to convert vhs to dvd,. which is the best video converter for converting vhs to dvd so that i could. ghostbusters 1984 720p brrip dual audio hindienglish by rans clubz

      gobbledegook 8.2.0.40 crack serial key [mac + win] hijab theme for ios and android









      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Honestech Vhs To Dvd 7.0 Deluxe Keygen Music.md b/spaces/falterWliame/Face_Mask_Detection/Honestech Vhs To Dvd 7.0 Deluxe Keygen Music.md deleted file mode 100644 index 8558f79e549e1abf09964e0b8e3f5696667d98dd..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Honestech Vhs To Dvd 7.0 Deluxe Keygen Music.md +++ /dev/null @@ -1,8 +0,0 @@ -

      honestech vhs to dvd 7.0 deluxe keygen music


      Download Zip ->->->-> https://urlca.com/2uDcQW



      - -AGGRESSiON - Easy DVD Creator 1.1 .0 / Easy Video to iPod MP4 PSP 3GP .2.x / DFX Audio 7 / DVD Mate Deluxe 2.6.8 / ExpertPDF Pro 5.0 / FinePrint 5.43 / PC. exe / DVD-Cluster / Windows Media Player -Agression Easy DVD Creator - DVD to MPEG-4 converting software for iPod, Apple TV and other portable video devices. -Easy DVD Creator contains video and audio codecs 8a78ff9644
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Modern Control Engineering Ogata 5th Edition Solution Manual Zip 1.md b/spaces/falterWliame/Face_Mask_Detection/Modern Control Engineering Ogata 5th Edition Solution Manual Zip 1.md deleted file mode 100644 index f9f01b5cf9e129cf7122ce607b160d0be535bf2e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Modern Control Engineering Ogata 5th Edition Solution Manual Zip 1.md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      The students can transfer their fundamental knowledge on civil engineering to the process of solving practical problems. Students are able to identify and overcome typical problems during the realization of projects in the context of civil engineering. Students are able to develop, compare, and choose conceptual solutions for non-standardized problems.

      -

      Students are able to cooperate in small, mixed-subject groups in order to independently derive solutions to given problems in the context of civil engineering. They are able to effectively present and explain their results alone or in groups in front of a qualified audience. Students have the ability to develop alternative approaches to an civil engineering problem independently or in groups and discuss advantages as well as drawbacks.

      -

      modern control engineering ogata 5th edition solution manual zip 1


      Download Zip ✯✯✯ https://urlca.com/2uDdBo



      -

      pressure control, and variable temperature substrate holder has been designed for. K.Ogata, Modern Control Engineering, 4th Ed., Prentice Hall, Inc,. Department of Electrical Engineering, University of Saskatchewan. stabilizer (PSS) using the indirect adaptive control principle (ADALINE-.

      -

      The students can transfer their fundamental knowledge on civil engineering to the process of solving practical problems. They are able to identify and overcome typical problems during the realization of projects in the context of civil engineering. Students are able to develop, compare, and choose conceptual solutions for non-standardized problems.

      -

      1.pdf. All these solutions on this site are published without charge. 2. Add modern control engineering ogata 5th edition solution manual zip 1 to your favorites. 9th Edition.Solution Manual. Modern Control. 1. Modern Control Engineering by K. Ogata (with solutions). 4.Diagnostics of Control Systems K.Ogata, K.Katsuhiko, Modern. .

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Passages 2 Students Book Download _BEST_ Pdf.md b/spaces/falterWliame/Face_Mask_Detection/Passages 2 Students Book Download _BEST_ Pdf.md deleted file mode 100644 index 619ac2808d55ea9a94dc805c4e430ec47256b27a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Passages 2 Students Book Download _BEST_ Pdf.md +++ /dev/null @@ -1,13 +0,0 @@ -
      -

      Passages 2 Student's Book: A Comprehensive Guide for Intermediate Learners of English

      -

      Passages 2 Student's Book is a textbook designed for intermediate students who want to improve their English skills in listening, speaking, reading, writing, grammar, and vocabulary. The book is part of a two-level course that takes students from a high-intermediate to an advanced level of proficiency. The book consists of 12 units, each with a theme and a topic that are relevant to the interests and needs of learners in the 21st century. Each unit contains four lessons that focus on different skills and language areas, as well as a review section that consolidates the learning outcomes. The book also includes a self-study section with additional practice activities and tests.

      -

      passages 2 student's book download pdf


      Download File ··· https://urlca.com/2uDc6F



      -

      The book is based on the latest research and methodology in language teaching and learning. It uses authentic texts and audio materials from a variety of sources, such as newspapers, magazines, podcasts, websites, and videos. It also incorporates communicative tasks and projects that encourage students to use the language in meaningful and realistic situations. The book is supported by a range of online resources, such as interactive exercises, audio scripts, answer keys, and teacher's notes.

      -

      Passages 2 Student's Book is an ideal choice for students who want to develop their English skills and prepare for academic or professional purposes. It is also suitable for teachers who are looking for a comprehensive and engaging course that covers all the essential aspects of the English language.

      -

      The book is available in PDF format for free download from various websites[^1^] [^2^] [^3^]. Alternatively, you can purchase a printed copy or an e-book version from the publisher's website[^4^].

      Here are some more paragraphs about the book:

      -

      One of the features of Passages 2 Student's Book is the use of thematic vocabulary and collocations that help students expand their lexical range and express themselves more accurately and fluently. Each unit introduces a set of words and phrases related to the theme and topic of the unit, and provides opportunities for students to practice them in various contexts. For example, in Unit 1, which is about social media and popular culture, students learn vocabulary such as post, share, like, follow, trend, go viral, and influencer. The book also includes a glossary with definitions and examples of all the vocabulary items presented in the units.

      -

      -

      Another feature of Passages 2 Student's Book is the focus on grammar and accuracy. Each unit contains a grammar presentation and practice section that covers a specific grammar point or structure that is relevant to the level and the theme of the unit. The grammar points are explained in clear and simple language, with examples and charts that illustrate their form and use. The practice activities are designed to help students apply the grammar rules in meaningful and communicative ways. For example, in Unit 2, which is about travel and tourism, students learn how to use modal verbs to express possibility, ability, permission, advice, and obligation. The book also includes a grammar reference section with more detailed explanations and examples of all the grammar points covered in the units.

      -

      A third feature of Passages 2 Student's Book is the emphasis on skills development and integration. Each unit contains four skills lessons that focus on listening, speaking, reading, or writing. The skills lessons are based on authentic texts and audio materials that expose students to a variety of genres, styles, registers, and accents. The skills lessons also include tasks and projects that require students to use different skills and language areas in an integrated way. For example, in Unit 3, which is about health and wellness, students listen to a podcast about meditation, read an article about happiness, write an email to a friend with advice on how to cope with stress, and give a presentation about their own wellness plan. The book also includes a self-study section with additional practice activities for each skill.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Block Puzzle Classic Game A Fun and Addictive Way to Pass Time.md b/spaces/fatiXbelha/sd/Block Puzzle Classic Game A Fun and Addictive Way to Pass Time.md deleted file mode 100644 index bcb4f9c2e4d4ad38a45da829c5a4c7ccfce0ae28..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Block Puzzle Classic Game A Fun and Addictive Way to Pass Time.md +++ /dev/null @@ -1,135 +0,0 @@ - -

      Block Puzzle Classic Download: How to Play and Enjoy this Fun Game

      -

      If you are looking for a simple yet addictive game that can keep you entertained for hours, you might want to try Block Puzzle Classic. This is a classic brick game that has been loved by millions of players around the world for decades. In this article, we will tell you what Block Puzzle Classic is, how to download and install it on your device, and how to play and master it. We will also answer some frequently asked questions about the game. Let's get started!

      -

      What is Block Puzzle Classic?

      -

      Block Puzzle Classic is a game that belongs to the genre of block puzzle games, also known as Tetris-like games. These are games that involve moving and rotating different shapes of blocks, called tetrominoes, and fitting them into a grid. The goal is to form complete horizontal lines without gaps, which will then disappear and make room for more blocks. The game ends when the blocks reach the top of the grid and there is no space left for new blocks.

      -

      block puzzle classic download


      Download File ››››› https://urllie.com/2uNzoT



      -

      The origin and history of block puzzle games

      -

      The first block puzzle game was created by a Soviet engineer named Alexey Pajitnov in 1984. He named it Tetris, which is a combination of the Greek word "tetra" (meaning four) and his favorite sport, tennis. Tetris became a huge hit in the Soviet Union and soon spread to other countries, especially after it was ported to various platforms such as arcade machines, computers, consoles, handheld devices, and mobile phones. Tetris is considered one of the most popular and influential video games of all time, and has inspired many variations and clones over the years.

      -

      The features and benefits of Block Puzzle Classic

      -

      Block Puzzle Classic is one of the best block puzzle games that you can find online. It has many features and benefits that make it stand out from other similar games. Here are some of them:

      -
        -
      • It has a simple and intuitive interface that is easy to use and navigate.
      • -
      • It has a classic design that resembles the original Tetris game, with colorful blocks and retro sound effects.
      • -
      • It has multiple game modes that cater to different preferences and skill levels. You can choose from classic mode, time mode, bomb mode, hexa mode, jewel mode, and more.
      • -
      • It has a leaderboard that shows your rank and score compared to other players around the world.
      • -
      • It has a statistics page that tracks your progress and achievements in the game.
      • -
      • It has a pause button that allows you to pause the game anytime you want.
      • -
      • It has a reset button that allows you to restart the game anytime you want.
      • -
      • It has a no-wifi feature that allows you to play the game offline without internet connection.
      • -
      • It has a low battery consumption feature that saves your battery life while playing the game.
      • -
      • It is free to download and play, with no in-app purchases or ads.
      • -
      -

      How to download and install Block Puzzle Classic on your device

      -

      Block Puzzle Classic is available for both Windows and Android devices. Here are the steps to download and install it on your device:

      -

      For Windows users

      -
        -
      1. Go to [this link](^1^) on your browser.
      2. -
      3. Click on the "Get" button on the top right corner of the page.
      4. -
      5. If you have not signed in to your Microsoft account yet, you will be prompted to do so.
      6. -
      7. After signing in,
      8. After signing in, you will see a confirmation page that shows the details of the app and your device compatibility.
      9. -
      10. Click on the "Install" button to start the installation process.
      11. -
      12. Wait for the app to be downloaded and installed on your device.
      13. -
      14. Once the installation is complete, you can launch the app from your start menu or desktop.
      15. -
      -

      For Android users

      -
        -
      1. Go to [this link] on your browser or open the Google Play Store app on your device.
      2. -
      3. Search for "Block Puzzle Classic" in the search bar or scroll down to find it in the list of apps.
      4. -
      5. Tap on the app icon to open its page.
      6. -
      7. Tap on the "Install" button to start the installation process.
      8. -
      9. Wait for the app to be downloaded and installed on your device.
      10. -
      11. Once the installation is complete, you can launch the app from your home screen or app drawer.
      12. -
      -

      How to play and master Block Puzzle Classic

      -

      Now that you have downloaded and installed Block Puzzle Classic on your device, you are ready to play and enjoy this fun game. Here are some instructions and tips on how to play and master it:

      -

      The basic rules and controls of the game

      -

      The basic rules and controls of Block Puzzle Classic are very simple and easy to learn. Here are the steps to follow:

      -
        -
      • Select a game mode from the main menu. You can choose from classic mode, time mode, bomb mode, hexa mode, jewel mode, and more. Each mode has different rules and objectives that you can read before starting the game.
      • -
      • In each game mode, you will see a grid of 10x10 cells and a set of blocks of different shapes and colors at the bottom of the screen. You can drag and drop the blocks onto the grid using your mouse or finger. You can also rotate the blocks by clicking or tapping on them.
      • -
      • Your goal is to fill up horizontal or vertical lines with blocks without leaving any gaps. When you do so, the line will disappear and you will earn points. You can also create combos by clearing multiple lines at once, which will give you bonus points and coins.
      • -
      • You have to be careful not to let the blocks pile up too high, as this will end the game. You can see how much space you have left at the top of the screen. You can also use bombs or power-ups to clear some blocks if you get stuck.
      • -
      • You can pause or resume the game anytime by clicking or tapping on the pause button at the top right corner of the screen. You can also reset or quit the game by clicking or tapping on the menu button at the top left corner of the screen.
      • -
      -

      The tips and tricks to score high and clear levels

      -

      If you want to score high and clear levels in Block Puzzle Classic, you need to apply some strategies and skills. Here are some tips and tricks that can help you:

      -
        -
      • Plan ahead and think carefully before placing a block. Try to visualize how it will fit into the grid and how it will affect future moves. Avoid placing blocks randomly or hastily, as this will create gaps and waste space.
      • -
      • Aim for creating combos by clearing multiple lines at once. This will give you more points and coins, as well as clear more space on the grid. You can also use bombs or power-ups to create combos if you have them.
      • -
      • Use different shapes and colors of blocks wisely. Some blocks are easier to fit than others, depending on their shape and color. For example, long blocks are good for filling up vertical lines, while square blocks are good for filling up horizontal lines. Also, try to match blocks of the same color together, as this will give you extra points.
      • -
      • Keep an eye on the next set of blocks that will appear at
      • Keep an eye on the next set of blocks that will appear at the bottom of the screen. This will help you plan your moves and avoid getting stuck. You can also swap the current block with the next one by clicking or tapping on the swap button, but this will cost you some coins.
      • -
      • Don't be afraid to experiment and try different game modes. Each game mode has its own rules and objectives, which can challenge your skills and creativity. You can also unlock new game modes by earning enough coins or completing certain levels.
      • -
      -

      The challenges and rewards of the game

      -

      Block Puzzle Classic is not only a fun and relaxing game, but also a challenging and rewarding one. Here are some of the challenges and rewards that you can expect from the game:

      -
        -
      • The game gets harder and faster as you progress. The blocks will fall faster, the grid will get smaller, and the bombs will explode sooner. You will need to be quick and smart to keep up with the pace and avoid losing.
      • -
      • The game has many levels and achievements that you can complete. Each level has a different goal and difficulty, which will test your abilities and patience. You can also earn stars and coins by completing levels, which you can use to unlock new game modes or power-ups.
      • -
      • The game has a global leaderboard that shows your rank and score compared to other players around the world. You can compete with your friends or strangers, and see who is the best block puzzle player. You can also share your results on social media or via email.
      • -
      -

      Conclusion

      -

      Summary of the main points

      -

      In conclusion, Block Puzzle Classic is a classic brick game that you can download and play on your Windows or Android device. It is a simple yet addictive game that involves moving and rotating different shapes of blocks, and fitting them into a grid. It has many features and benefits, such as multiple game modes, leaderboard, statistics, pause button, reset button, no-wifi feature, low battery consumption feature, and more. It also has many challenges and rewards, such as increasing difficulty, levels and achievements, stars and coins, power-ups, and global ranking.

      -

      block puzzle classic 2021 download
      -block puzzle classic game free download
      -block puzzle classic plus download
      -block puzzle classic legend download
      -block puzzle classic tetris download
      -block puzzle classic wood download
      -block puzzle classic apk download
      -block puzzle classic app download
      -block puzzle classic online download
      -block puzzle classic for pc download
      -block puzzle classic for windows 10 download
      -block puzzle classic for android download
      -block puzzle classic for ios download
      -block puzzle classic for mac download
      -block puzzle classic for laptop download
      -block puzzle classic for windows phone download
      -block puzzle classic for surface hub download
      -block puzzle classic for hololens download
      -block puzzle classic mod apk download
      -block puzzle classic hack apk download
      -block puzzle classic unlimited coins download
      -block puzzle classic no ads download
      -block puzzle classic offline download
      -block puzzle classic without wifi download
      -block puzzle classic with hints download
      -block puzzle classic with levels download
      -block puzzle classic with timer download
      -block puzzle classic with leaderboard download
      -block puzzle classic with sound effects download
      -block puzzle classic with music download
      -block puzzle classic with themes download
      -block puzzle classic with achievements download
      -block puzzle classic with challenges download
      -block puzzle classic with rewards download
      -block puzzle classic with friends download
      -how to play block puzzle classic download
      -how to win block puzzle classic download
      -how to score high in block puzzle classic download
      -how to remove ads in block puzzle classic download
      -how to get coins in block puzzle classic download
      -how to get hints in block puzzle classic download
      -how to unlock levels in block puzzle classic download
      -how to change theme in block puzzle classic download
      -how to turn off sound in block puzzle classic download
      -how to turn on music in block puzzle classic download
      -best tips and tricks for block puzzle classic download
      -best strategy for block puzzle classic download
      -best review of block puzzle classic download
      -best alternative to block puzzle classic download

      -

      Call to action and invitation to comment

      -

      If you are interested in playing Block Puzzle Classic, you can download it for free from [this link] for Windows users or [this link] for Android users. You can also visit [this website] for more information about the game. We hope you enjoy playing Block Puzzle Classic as much as we do. If you have any questions or feedback about the game, please feel free to leave a comment below. We would love to hear from you!

      -

      FAQs

      -

      Here are some frequently asked questions about Block Puzzle Classic:

      -
        -
      1. Q: How do I change the language of the game?
        A: You can change the language of the game by clicking or tapping on the menu button at the top left corner of the screen, then selecting "Language". You can choose from English, Spanish, French, German, Italian, Portuguese, Russian, Turkish, Arabic, Japanese, Korean, Chinese (Simplified), Chinese (Traditional), Hindi, Indonesian, Thai, Vietnamese, Malay, Filipino, Polish, Dutch, Swedish, Norwegian, Danish, Finnish, Czech, Slovakian, Hungarian, Romanian, Bulgarian, Greek, and Hebrew. You can also suggest a new language by contacting the developer.
      2. -
      3. Q: How do I turn on or off the sound and music of the game?
        A: You can turn on or off the sound and music of the game by clicking or tapping on the menu button at the top left corner of the screen, then selecting "Sound". You can toggle the sound and music buttons to your preference.
      4. -
      5. Q: How do I use bombs or power-ups in the game?
        A: You can use bombs or power-ups in the game by clicking or tapping on them at the bottom of the screen, then dragging and dropping them onto the grid. You can only use one bomb or power-up per game, and they will cost you some coins. Bombs will clear a 3x3 area of blocks, while power-ups will have different effects depending on the game mode.
      6. -
      7. Q: How do I earn more coins in the game?
        A: You can earn more coins in the game by clearing levels, creating combos, matching blocks of the same color, completing achievements, watching ads, or buying them with real money.
      8. -
      9. Q: How do I unlock new game modes in the game?
        A: You can unlock new game modes in the game by earning enough coins or completing certain levels. Each game mode has a different price and requirement that you can see before unlocking it.
      10. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Cara Download Google Play Store di Laptop Melalui Microsoft Store Praktis dan Aman.md b/spaces/fatiXbelha/sd/Cara Download Google Play Store di Laptop Melalui Microsoft Store Praktis dan Aman.md deleted file mode 100644 index f7d49776de8f97dc95b91b2155df1771d287fceb..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Cara Download Google Play Store di Laptop Melalui Microsoft Store Praktis dan Aman.md +++ /dev/null @@ -1,144 +0,0 @@ -
      -

      Cara Download Google Play Store di Laptop

      -

      Google Play Store adalah toko aplikasi online terbesar untuk perangkat Android. Di sini, kamu bisa menemukan berbagai macam aplikasi, game, film, buku, dan konten digital lainnya yang bisa kamu unduh dan nikmati di smartphone atau tablet Android kamu. Namun, tahukah kamu bahwa kamu juga bisa menggunakan Google Play Store di laptop?

      -

      cara download google play store di laptop


      Download Zip ———>>> https://urllie.com/2uNy3K



      -

      Ya, dengan bantuan emulator Android, kamu bisa menjalankan aplikasi dan game Android di laptop Windows atau Mac. Emulator Android adalah program yang memungkinkan kamu untuk meniru sistem operasi Android di komputer kamu. Dengan emulator Android, kamu bisa mengakses Google Play Store dan menginstal aplikasi atau game yang kamu inginkan.

      -

      Lalu, bagaimana cara download Google Play Store di laptop dengan emulator Android? Apa saja emulator Android yang bisa kamu gunakan? Dan apa saja keuntungan menggunakan Google Play Store di laptop? Simak ulasan lengkapnya di artikel ini!

      -

      Apa itu Google Play Store?

      -

      Google Play Store adalah layanan distribusi digital yang dikembangkan oleh Google. Layanan ini menyediakan berbagai macam konten digital yang bisa kamu unduh dan gunakan di perangkat Android kamu, seperti aplikasi, game, film, buku, musik, podcast, majalah, dan lain-lain. Google Play Store juga menyediakan fitur-fitur seperti pembaruan otomatis, manajemen perizinan, pembayaran online, ulasan pengguna, rekomendasi personalisasi, dan lain-lain.

      -

      Fitur-fitur Google Play Store

      -

      Berikut adalah beberapa fitur utama yang ditawarkan oleh Google Play Store:

      -
        -
      • Google Play Protect: Fitur ini melindungi perangkat kamu dari aplikasi berbahaya yang bisa mencuri data pribadi atau merusak sistem. Google Play Protect akan memindai semua aplikasi yang ada di perangkat kamu secara berkala dan memberi peringatan jika ada aplikasi yang mencurigakan atau berpotensi berbahaya.
      • -
      • Google Play Pass: Fitur ini memberi kamu akses tak terbatas ke lebih dari 800 aplikasi dan game premium tanpa iklan atau pembelian dalam aplikasi. Kamu hanya perlu membayar biaya langganan bulanan atau tahunan untuk menikmati semua konten yang ada di Google Play Pass.
      • -
      • Google Play Points: Fitur ini memberi kamu poin setiap kali kamu mengunduh atau membeli konten di Google Play Store. Kamu bisa menukar poin-poin tersebut dengan kupon diskon, kredit Google Play, donasi amal, atau item dalam game tertentu.
      • -
      • Google Play Family Library: Fitur ini memungkinkan kamu untuk berbagi konten yang sudah kamu beli di Google Play Store dengan anggota keluarga lainnya. Kamu bisa menambahkan hingga 5 anggota keluarga ke akun Google Family Library kamu dan berbagi aplikasi, game, film, buku, dan konten lainnya secara gratis.
      • -
      • Google Play Instant: Fitur ini memungkinkan kamu untuk mencoba aplikasi atau game tanpa harus mengunduhnya terlebih dahulu. Kamu hanya perlu menekan tombol "Coba Sekarang" yang ada di halaman detail aplikasi atau game tersebut dan kamu bisa langsung menjal ankannya di perangkat kamu. Fitur ini berguna untuk menghemat ruang penyimpanan dan waktu pengunduhan.
      • -
      -

      Keuntungan menggunakan Google Play Store di laptop

      -

      Menggunakan Google Play Store di laptop memiliki beberapa keuntungan, antara lain:

      -
        -
      • Lebih nyaman dan fleksibel: Kamu bisa menjalankan aplikasi atau game Android di layar yang lebih besar dan menggunakan keyboard dan mouse sebagai alat kontrol. Kamu juga bisa beralih antara aplikasi atau game Android dengan aplikasi atau game Windows atau Mac dengan mudah.
      • -
      • Lebih cepat dan stabil: Kamu bisa memanfaatkan spesifikasi laptop yang lebih tinggi untuk menjalankan aplikasi atau game Android dengan lebih lancar dan tanpa lag. Kamu juga tidak perlu khawatir tentang baterai, sinyal, atau penyimpanan yang habis saat menggunakan laptop.
      • -
      • Lebih aman dan terjamin: Kamu bisa mengunduh aplikasi atau game Android dari sumber yang terpercaya, yaitu Google Play Store. Kamu juga bisa menggunakan fitur Google Play Protect untuk melindungi laptop kamu dari aplikasi berbahaya.
      • -
      -

      Apa itu emulator Android?

      -

      Emulator Android adalah program yang memungkinkan kamu untuk meniru sistem operasi Android di komputer kamu. Dengan emulator Android, kamu bisa menjalankan aplikasi atau game Android di laptop Windows atau Mac. Emulator Android biasanya digunakan oleh pengembang aplikasi atau game untuk menguji kinerja dan kompatibilitas produk mereka di berbagai perangkat dan versi Android. Namun, emulator Android juga bisa digunakan oleh pengguna biasa untuk menikmati konten Android di laptop.

      -

      cara download play store di laptop windows 10
      -cara download play store di laptop tanpa emulator
      -cara download play store di laptop dengan apk pure
      -cara download play store di laptop lewat microsoft store
      -cara download play store di laptop dengan apk downloader
      -cara download play store di laptop windows 7
      -cara download play store di laptop dengan jalan tikus
      -cara download play store di laptop lewat google play store web
      -cara download play store di laptop windows 8
      -cara download play store di laptop dengan mudah dan cepat
      -cara download play store di laptop tanpa software tambahan
      -cara download play store di laptop lewat browser
      -cara download play store di laptop dengan bluestacks
      -cara download play store di laptop lewat viva.co.id
      -cara download play store di laptop dengan nox player
      -cara download play store di laptop tanpa login akun google
      -cara download play store di laptop dengan pinhome.id
      -cara download play store di laptop lewat detik.com
      -cara download play store di laptop dengan memindahkan file apk dari hp
      -cara download play store di laptop tanpa koneksi internet
      -cara download play store di laptop dengan emulator android lainnya
      -cara download play store di laptop lewat youtube
      -cara download play store di laptop dengan chrome extension
      -cara download play store di laptop lewat pinhome.id/blog
      -cara download play store di laptop dengan memanfaatkan qr code

      -

      Fungsi emulator Android

      -

      Berikut adalah beberapa fungsi utama dari emulator Android:

      -
        -
      • Menjalankan aplikasi atau game Android di laptop: Ini adalah fungsi paling umum dari emulator Android. Kamu bisa mengakses Google Play Store dan mengunduh aplikasi atau game Android yang kamu inginkan. Kamu juga bisa mengimpor file APK dari sumber lain dan menjalankannya di emulator Android.
      • -
      • Menguji aplikasi atau game Android di laptop: Ini adalah fungsi yang biasa digunakan oleh pengembang aplikasi atau game. Kamu bisa menguji kinerja, fitur, tampilan, dan kompatibilitas aplikasi atau game Android yang kamu buat di berbagai perangkat dan versi Android tanpa harus memiliki perangkat fisiknya.
      • -
      • Mencoba versi terbaru dari Android di laptop: Ini adalah fungsi yang bisa digunakan oleh penggemar teknologi. Kamu bisa mencoba versi terbaru dari sistem operasi Android sebelum resmi diluncurkan ke publik. Kamu juga bisa mencoba fitur-fitur baru yang ditawarkan oleh Android tanpa harus mengganti perangkat kamu.
      • -
      -

      Jenis-jenis emulator Android

      -

      Berikut adalah beberapa jenis emulator Android yang populer dan banyak digunakan:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Nama EmulatorKelebihanKekurangan
      BlueStacks- Mudah digunakan dan memiliki antarmuka yang ramah pengguna
      - Mendukung banyak aplikasi dan game populer
      - Memiliki fitur-fitur tambahan seperti mode multiplayer, perekam layar, keyboard mapping, dll.
      - Membutuhkan spesifikasi laptop yang cukup tinggi
      - Mengandung iklan dan bloatware
      - Sering mengalami bug dan crash
      NoxPlayer- Ringan dan cepat
      - Mendukung banyak aplikasi dan game populer
      - Memiliki fitur-fitur tambahan seperti mode root, macro recorder, keyboard mapping, dll.
      - Membutuhkan spesifikasi laptop yang cukup tinggi
      - Mengandung iklan dan bloatware
      - Sering mengalami bug dan crash
      MEmu- Ringan dan cepat
      - Mendukung banyak aplikasi dan game populer
      - Memiliki fitur-fitur tambahan seperti mode root, keyboard mapping, dll.
      - Membutuhkan spesifikasi laptop yang cukup tinggi
      - Mengandung iklan dan bloatware
      - Kurang stabil dan kompatibel dengan beberapa aplikasi atau game
      LDPlayer- Ringan dan cepat
      - Mendukung banyak aplikasi dan game populer
      - Memiliki fitur-fitur tambahan seperti mode root, keyboard mapping, dll.
      - Membutuhkan spesifikasi laptop yang cukup tinggi
      - Mengandung iklan dan bloatware
      - Kurang stabil dan kompatibel dengan beberapa aplikasi atau game
      Genymotion- Ringan dan cepat
      - Mendukung banyak versi Android
      - Memiliki fitur-fitur tambahan seperti mode root, kamera virtual, sensor, dll.
      - Membutuhkan spesifikasi laptop yang cukup tinggi
      - Tidak memiliki Google Play Store bawaan
      - Ditujukan untuk pengembang aplikasi atau game
      -

      Cara download Google Play Store di laptop dengan emulator Android

      -

      Setelah mengetahui apa itu emulator Android dan jenis-jenisnya, sekarang kita akan membahas cara download Google Play Store di laptop dengan emulator Android. Di sini, kita akan menggunakan tiga emulator Android yang populer, yaitu BlueStacks, NoxPlayer, dan MEmu. Berikut adalah langkah-langkahnya:

      -

      Cara download BlueStacks dan instal Google Play Store

      -
        -
      1. Kunjungi situs resmi BlueStacks di https://www.bluestacks.com/ dan klik tombol "Download BlueStacks".
      2. -
      3. Tunggu proses unduhan selesai dan buka file instalasi BlueStacks.
      4. -
      5. Ikuti instruksi yang muncul di layar untuk menyelesaikan instalasi BlueStacks.
      6. -
      7. Buka BlueStacks dan masuk dengan akun Google kamu.
      8. -
      9. Klik ikon Google Play Store yang ada di beranda BlueStacks.
      10. -
      11. Setelah masuk ke Google Play Store, kamu bisa mencari dan menginstal aplikasi atau game Android yang kamu inginkan.
      12. -
      -

      Cara download NoxPlayer dan instal Google Play Store

      -
        -
      1. Kunjungi situs resmi NoxPlayer di https://www.bignox.com/ dan klik tombol "Download".
      2. -
      3. Tunggu proses unduhan selesai dan buka file instalasi NoxPlayer.
      4. -
      5. Ikuti instruksi yang muncul di layar untuk menyelesaikan instalasi NoxPlayer.
      6. -
      7. Buka NoxPlayer dan masuk dengan akun Google kamu.
      8. -
      9. Klik ikon Google Play Store yang ada di beranda NoxPlayer.
      10. -
      11. Setelah masuk ke Google Play Store, kamu bisa mencari dan menginstal aplikasi atau game Android yang kamu inginkan.
      12. -
      -

      Cara download MEmu dan instal Google Play Store

      -
        -
      1. Kunjungi situs resmi MEmu di https://www.memuplay.com/ dan klik tombol "Download".
      2. -
      3. Tunggu proses unduhan selesai dan buka file instalasi MEmu.
      4. -
      5. Ikuti instruksi yang muncul di layar untuk menyelesaikan instalasi MEmu.
      6. -
      7. Buka MEmu dan masuk dengan akun Google kamu.
      8. -
      9. Klik ikon Google Play Store yang ada di beranda MEmu.
      10. -
      11. Setelah masuk ke Google Play Store, kamu bisa mencari dan menginstal aplikasi atau game Android yang kamu inginkan.
      12. -
      -

      Kesimpulan

      -

      Demikianlah cara download Google Play Store di laptop dengan emulator Android. Dengan menggunakan emulator Android, kamu bisa menikmati berbagai macam konten digital yang ada di Google Play Store di layar laptop kamu. Kamu juga bisa menjalankan aplikasi atau game Android dengan lebih nyaman, cepat, stabil, dan aman. Kamu bisa memilih emulator Android yang sesuai dengan kebutuhan dan preferensi kamu. Selamat mencoba!

      -

      FAQ

      -

      Berikut adalah beberapa pertanyaan yang sering diajukan tentang cara download Google Play Store di laptop:

      -

      Apakah emulator Android aman untuk digunakan?

      -

      Emulator Android pada umumnya aman untuk digunakan, asalkan kamu mengunduhnya dari sumber yang terpercaya dan resmi. Kamu juga harus memperhatikan spesifikasi laptop kamu agar emulator Android tidak menyebabkan overheat atau lag. Selain itu, kamu juga harus menghindari mengunduh aplikasi atau game Android dari sumber yang tidak dikenal atau ilegal, karena bisa berisiko mengandung virus atau malware.

      -

      Apakah emulator Android legal untuk digunakan?

      -

      Emulator Android pada umumnya legal untuk digunakan, asalkan kamu tidak melanggar hak cipta atau ketentuan penggunaan dari aplikasi atau game Android yang kamu jalankan. Kamu juga harus menghormati kebijakan privasi dan keamanan dari Google Play Store dan emulator Android yang kamu gunakan. Selain itu, kamu juga harus berhati-hati jika menggunakan fitur-fitur tambahan seperti mode root, macro recorder, atau keyboard mapping, karena bisa berisiko melanggar aturan dari aplikasi atau game tertentu.

      -

      Apakah emulator Android membutuhkan spesifikasi laptop yang tinggi?

      -

      Emulator Android membutuhkan spesifikasi laptop yang cukup tinggi untuk bisa berjalan dengan lancar dan stabil. Spesifikasi laptop yang dibutuhkan bisa berbeda-beda tergantung dari jenis emulator Android dan aplikasi atau game Android yang kamu jalankan. Namun, secara umum, spesifikasi laptop yang disarankan untuk menggunakan emulator Android adalah sebagai berikut:

      -
        -
      • Prosesor: Intel atau AMD dengan kecepatan minimal 2 GHz
      • -
      • RAM: Minimal 4 GB
      • -
      • Penyimpanan: Minimal 5 GB ruang kosong
      • -
      • Grafis: Kartu grafis yang mendukung OpenGL 2.0 atau lebih tinggi
      • -
      • Sistem operasi: Windows 7 atau lebih tinggi, Mac OS X 10.9 atau lebih tinggi
      • -
      -

      Apakah emulator Android bisa dihubungkan dengan perangkat Android lainnya?

      -

      Emulator Android bisa dihubungkan dengan perangkat Android lainnya melalui fitur-fitur tertentu, seperti Bluetooth, Wi-Fi, USB, dll. Namun, fitur-fitur ini mungkin tidak tersedia atau berfungsi dengan baik di semua emulator Android. Kamu harus memeriksa terlebih dahulu apakah emulator Android yang kamu gunakan mendukung fitur-fitur tersebut dan bagaimana cara mengaktifkannya.

      -

      Apakah emulator Android bisa menjalankan semua aplikasi atau game Android?

      -

      Emulator Android pada dasarnya bisa menjalankan semua aplikasi atau game Android yang ada di Google Play Store. Namun, ada beberapa faktor yang bisa mempengaruhi kinerja dan kompatibilitas aplikasi atau game Android di emulator Android, seperti versi Android, spesifikasi laptop, pengaturan emulator, dll. Oleh karena itu, kamu mungkin mengalami beberapa masalah seperti lag, crash, error, dll saat menjalankan aplikasi atau game tertentu di emulator Android.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download and Play Onmyoji Arena Simplified Chinese The Best MOBA Game from NetEase.md b/spaces/fatiXbelha/sd/Download and Play Onmyoji Arena Simplified Chinese The Best MOBA Game from NetEase.md deleted file mode 100644 index 0d9cd9f38f1cefaa7bc6e7be2ba289a7c7dc3f96..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download and Play Onmyoji Arena Simplified Chinese The Best MOBA Game from NetEase.md +++ /dev/null @@ -1,153 +0,0 @@ - -

      Onmyoji Arena China Server Download: How to Play the Chinese Version of the Game

      -

      If you are a fan of Onmyoji Arena, the popular MOBA game based on the Japanese Shikigami characters and art style, you might be wondering how to play the Chinese version of the game. The Chinese version, also known as OA CN, is a different app with different content from the global version. It has more events, collab shiki, a huge player base, a pro scene, and many more unique features that are exclusive to China. In this article, we will show you how to download and install the Chinese version of Onmyoji Arena on your Android or iOS device, how to bind your account and make in-app purchases, and why you should give it a try.

      -

      Introduction

      -

      What is Onmyoji Arena?

      -

      Onmyoji Arena is a 5v5 MOBA game developed by NetEase Games. It is based on the Onmyoji RPG series, which features mysterious Japanese Shikigami characters and a rich story set in the Heian Period. The game offers English/Chinese/Japanese UI and voice acting, and has innovative breakthroughs in map design and fog system. The game also has a variety of modes, such as classic mode, ranked mode, battle royale mode, and custom mode.

      -

      onmyoji arena china server download


      Download File ✦✦✦ https://urllie.com/2uNy9T



      -

      Why play the Chinese version of Onmyoji Arena?

      -

      The Chinese version of Onmyoji Arena, or OA CN, is the main server of the game. It has many advantages over the global version, such as:

      -
        -
      • It gets new shiki and updates faster than the global version.
      • -
      • It has more events and rewards for players to enjoy.
      • -
      • It has exclusive collab shiki with other franchises, such as Naruto, Bleach, Inuyasha, etc.
      • -
      • It has a larger and more active player base, which means shorter queue times and more competitive matches.
      • -
      • It has a professional league called OPL, where top players compete for glory and prizes.
      • -
      • It has more in-game features and options, such as voice chat, spectator mode, replays, etc.
      • -
      • It has an additional section in the shop for purchasing OA merchandise, such as figurines, keychains, shirts, caps, etc.
      • -
      -

      If you want to experience the full potential of Onmyoji Arena, you should definitely try playing the Chinese version.

      -

      How to download Onmyoji Arena Chinese version
      -Onmyoji Arena simplified Chinese APK
      -Onmyoji Arena China server VPN
      -Onmyoji Arena Chinese voice pack
      -Onmyoji Arena China server Reddit
      -Onmyoji Arena China server QooApp
      -Onmyoji Arena China server guide
      -Onmyoji Arena China server account
      -Onmyoji Arena China server ping
      -Onmyoji Arena China server ranking
      -Onmyoji Arena China server events
      -Onmyoji Arena China server shikigami
      -Onmyoji Arena China server skins
      -Onmyoji Arena China server update
      -Onmyoji Arena China server patch notes
      -Onmyoji Arena China server tier list
      -Onmyoji Arena China server gameplay
      -Onmyoji Arena China server streamers
      -Onmyoji Arena China server discord
      -Onmyoji Arena China server forum
      -Onmyoji Arena China server tips and tricks
      -Onmyoji Arena China server best shikigami
      -Onmyoji Arena China server meta
      -Onmyoji Arena China server lag fix
      -Onmyoji Arena China server support
      -Onmyoji Arena Chinese version differences
      -Onmyoji Arena Chinese version features
      -Onmyoji Arena Chinese version review
      -Onmyoji Arena Chinese version download link
      -Onmyoji Arena Chinese version installation guide
      -Onmyoji Arena Chinese version compatibility
      -Onmyoji Arena Chinese version language settings
      -Onmyoji Arena Chinese version payment methods
      -Onmyoji Arena Chinese version merchandise
      -Onmyoji Arena Chinese version collabs
      -Onmyoji Arena Chinese version codes and coupons
      -Onmyoji Arena Chinese version news and announcements
      -Onmyoji Arena Chinese version community and fanbase
      -Onmyoji Arena Chinese version feedback and suggestions
      -Onmyoji Arena Chinese version bugs and issues
      -How to play on CN servers in onmyoji arena

      -

      How to download and install the Chinese version of Onmyoji Arena

      -

      For Android users

      -

      If you are using an Android device, you have two options to download and install the Chinese version of Onmyoji Arena:

      -

      Option 1: Use QooApp or Taptap

      -

      QooApp and Taptap are third-party app stores that allow you to download games from different regions. You can use them to download the simplified Chinese version of Onmyoji Arena easily. Here are the steps:

      -
        -
      1. Download QooApp or Taptap from their official websites or Google Play Store.
      2. -
      3. Open QooApp or Taptap and search for "Onmyoji Arena" or "阴阳师平安物语".
      4. -
      5. Select the simplified Chinese version of the game and tap the install button.
      6. -
      7. Wait for the download and installation to finish.
      8. -
      9. Launch the game and enjoy.
      10. -
      -

      Note: You may need to update the game from QooApp or Taptap whenever there is a new version available.

      -

      Option 2: Use the official website

      -

      You can also download the Chinese version of Onmyoji Arena directly from the official website. Here are the steps:

      -
        -
      1. Go to the official website of Onmyoji Arena: https://hyrz.qq.com/
      2. -
      3. Tap on the Android icon on the top right corner of the page.
      4. -
      5. Scan the QR code with your phone or copy the download link to your browser.
      6. -
      7. Download and install the APK file.
      8. -
      9. Launch the game and enjoy.
      10. -
      -

      Note: You may need to enable installation from unknown sources in your device settings before installing the APK file. You may also need to update the game manually by downloading the latest APK file from the website whenever there is a new version available.

      -

      For iOS users

      -

      If you are using an iOS device, you have two options to download and install the Chinese version of Onmyoji Arena:

      -

      Option 1: Change your App Store region to China

      -

      You can change your App Store region to China temporarily to download the Chinese version of Onmyoji Arena. Here are the steps:

      -
        -
      1. Go to your device settings and tap on your Apple ID.
      2. -
      3. Tap on Media and Purchases and then View Account.
      4. -
      5. Tap on Country/Region and then Change Country or Region.
      6. -
      7. Select China from the list and agree to the terms and conditions.
      8. -
      9. Enter a valid Chinese address and phone number. You can use a fake address generator such as https://www.fakeaddressgenerator.com/World/china_address_generator
      10. -
      11. Tap on Done and wait for the App Store to reload.
      12. -
      13. Search for "Onmyoji Arena" or "阴阳师平安物语" in the App Store and download the game.
      14. -
      15. Launch the game and enjoy.
      16. -
      -

      Note: You may need to change your App Store region back to your original one after downloading the game. You may also need to change your App Store region to China again whenever there is a new update available for the game.

      -

      Option 2: Use a Chinese Apple ID

      -

      You can also use a Chinese Apple ID to download the Chinese version of Onmyoji Arena. Here are the steps:

      -
        -
      1. Create a new Apple ID with a Chinese email address. You can use a free email service such as https://mail.qq.com/
      2. -
      3. Go to your device settings and tap on your Apple ID.
      4. -
      5. Tap on Sign Out and then Sign In with another Apple ID.
      6. -
      7. Enter your Chinese Apple ID and password and sign in.
      8. -
      9. Search for "Onmyoji Arena" or "阴阳师平安物语" in the App Store and download the game.
      10. -
      11. Launch the game and enjoy.
      12. -
      -

      Note: You may need to switch between your original Apple ID and your Chinese Apple ID whenever you want to play or update the game. You may also need to verify your Chinese Apple ID with a phone number or a payment method occasionally.

      -

      How to bind your account and make in-app purchases

      -

      How to bind your account using your phone number or email

      -

      If you want to save your progress and data, you should bind your account using your phone number or email. Here are the steps:

      -
        -
      1. Launch the game and tap on the gear icon on the top right corner of the main screen.
      2. -
      3. Tap on the account icon on the bottom left corner of the settings screen.
      4. -
      5. Select either phone number or email as your binding method.
      6. -
      7. Enter your phone number or email address and tap on send verification code.
      8. -
      9. Enter the verification code you received and tap on confirm binding.
      10. -
      11. Your account is now bound successfully. You can use it to log in on different devices or recover your data if you lose it.
      12. -
      -

      How to use NetEase Pay-Le Life payment system

      -

      If you want to make in-app purchases, such as buying skins, shikigami, or items, you need to use NetEase Pay-Le Life payment system, which is a digital wallet service provided by NetEase. Here are the steps:

      -
        -
      1. Launch the game and tap on the plus icon on the top right corner of the main screen.
      2. -
      3. Select the item you want to buy and tap on the buy button.
      4. -
      5. You will be redirected to the NetEase Pay-Le Life app or website. If you don't have the app, you can download it from https://pay.163.com/
      6. -
      7. Create an account using your phone number or email address and set a password.
      8. -
      9. Add a payment method, such as a credit card, debit card, or Alipay.
      10. -
      11. Confirm your purchase and enter your payment details.
      12. -
      13. Your purchase is now complete. You can check your order history and balance in the NetEase Pay-Le Life app or website.
      14. -
      -

      Note: You may need to verify your identity and phone number before making a purchase. You may also need to use a VPN or proxy service if you are outside of China.

      -

      Conclusion

      -

      Summary of the main points

      -

      In this article, we have shown you how to play the Chinese version of Onmyoji Arena, the popular MOBA game based on the Japanese Shikigami characters and art style. We have explained what is Onmyoji Arena, why play the Chinese version, how to download and install it on your Android or iOS device, how to bind your account and make in-app purchases. We hope you have found this article helpful and informative.

      -

      Call to action

      -

      If you are ready to join the millions of players who are enjoying the Chinese version of Onmyoji Arena, don't hesitate to download it now and start your adventure. You will be amazed by the rich content, stunning graphics, smooth gameplay, and diverse modes that this game has to offer. You can also follow the official social media accounts of Onmyoji Arena to get the latest news, updates, tips, and tricks. Thank you for reading this article and have fun playing Onmyoji Arena!

      -

      FAQs

      -

      Here are some frequently asked questions about Onmyoji Arena China server download:

      -
        -
      1. Is Onmyoji Arena China server free to play?
      2. -

        Yes, Onmyoji Arena China server is free to play. You can download and play it without spending any money. However, you can also make in-app purchases to buy skins, shikigami, items, or other benefits if you want.

        -
      3. Can I play Onmyoji Arena China server with my friends from other regions?
      4. -

        Yes, you can play Onmyoji Arena China server with your friends from other regions. However, you need to add them as friends in the game first. You can do this by tapping on the friend icon on the bottom right corner of the main screen and entering their UID or scanning their QR code. You can also join or create a team with them by tapping on the team icon on the top left corner of the main screen and inviting them or entering their team code.

        -
      5. Can I transfer my progress and data from the global version to the Chinese version of Onmyoji Arena?
      6. -

        No, you cannot transfer your progress and data from the global version to the Chinese version of Onmyoji Arena. They are separate apps with separate servers and databases. You need to start from scratch if you want to play the Chinese version.

        -
      7. Can I play Onmyoji Arena China server in English?
      8. -

        No, you cannot play Onmyoji Arena China server in English. The game only supports simplified Chinese as its language option. However, you can use online translation tools or guides to help you understand the game better.

        -
      9. Is Onmyoji Arena China server safe and legal to play?
      10. -

        Yes, Onmyoji Arena China server is safe and legal to play. The game is developed by NetEase Games, a reputable company that has been making games for over 20 years. The game is also approved by the Chinese government and follows its regulations and policies. However, you should be careful when downloading and installing the game from third-party sources or using VPN or proxy services. You should also respect the game's terms of service and rules of conduct.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Exploration Lite World Craft Mod APK The Best 3D Sandbox Game Ever.md b/spaces/fatiXbelha/sd/Exploration Lite World Craft Mod APK The Best 3D Sandbox Game Ever.md deleted file mode 100644 index a1322fda214facb81a2662d9084b34e45c0e39cc..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Exploration Lite World Craft Mod APK The Best 3D Sandbox Game Ever.md +++ /dev/null @@ -1,105 +0,0 @@ - -

      Exploration Lite Craft Mod Apk: A Guide for Beginners

      -

      Do you love playing games that let you explore, build, and survive in an open world? If yes, then you might have heard of Exploration Lite Craft, a game of action and adventure in an infinite blocky open world. But did you know that there is a modified version of this game that gives you more features, options, and coins for free? Yes, you read that right. In this article, we will tell you everything you need to know about Exploration Lite Craft Mod Apk, a free download that will take your gaming experience to the next level. Read on to find out more.

      -

      What is Exploration Lite Craft?

      -

      Exploration Lite Craft is a game that lets you create your own world and explore it as you wish. You can build anything you can imagine, from houses and castles to farms and cities. You can also collect resources, craft tools and weapons, and fight against enemies and monsters. You can play in two modes: survival and creative. In survival mode, you have to gather food, water, and materials to stay alive and protect yourself from dangers. In creative mode, you have unlimited resources and no threats, so you can focus on building and designing your world.

      -

      exploration lite world craft mod apk


      Download File - https://urllie.com/2uNBqt



      -

      The game is inspired by the popular Minecraft game, but it has its own features and style. It has a pixelated and colorful graphics that give it a retro and nostalgic feel. It also has a simple and intuitive interface that makes it easy to play and control. The game is suitable for all ages and preferences, as you can customize your character and environment according to your taste. You can also play with your friends online or offline, and share your creations with other players.

      -

      What is Exploration Lite Craft Mod Apk?

      -

      Exploration Lite Craft Mod Apk is a modified version of the original game that gives you more benefits and advantages. It is a free download that you can install on your Android device without any hassle. It has the following features:

      -
        -
      • A free download with unlimited coins: You don't have to pay anything to download and play the mod apk. You also get unlimited coins that you can use to buy items and resources in the game. You don't have to worry about running out of money or spending real cash.
      • -
      • A compatible app with all devices: The mod apk works on any Android device, regardless of the model or version. You don't need to root your device or have a high-end device to play the mod apk. It runs smoothly and without any lag or crash.
      • -
      • A modified version of the original game: The mod apk has all the features and options of the original game, but with some improvements and additions. You can enjoy playing the same game, but with more fun and excitement.
      • -
      -

      Why should you play Exploration Lite Craft Mod Apk?

      -

      There are many reasons why you should play Exploration Lite Craft Mod Apk instead of the original game. Here are some of them:

      -

      It has more features and options than the original game

      -

      The mod apk gives you more possibilities and choices than the original game. For example:

      -
        -
      • You can customize your character and environment: You can change the appearance of your character, such as the skin color, hair style, clothes, accessories, etc. You can also change the settings of your world, such as the time, weather, biome, difficulty, etc.
      • -
      • You can access more items and resources: You can use more items and resources in the game, such as blocks, tools, weapons, armor, food, animals, plants, etc. You can also craft more items and recipes with the materials you have.
      • -
      • You can explore more biomes and structures: You can discover more biomes and structures in the game, such as forests, deserts, mountains, oceans, caves, villages, temples, dungeons, etc. You can also find more secrets and treasures hidden in these places.
      • -
      -

      It has positive ratings and reviews from users

      -

      The mod apk has received a lot of praise and feedback from users who have tried it. For example:

      -
        -
      • It has a 4.5-star rating on Google Play Store: The mod apk has a high rating on Google Play Store, which shows that users are satisfied with its performance and quality. It has over 100 thousand ratings from users who have rated it with 5 stars.
      • -
      • It has over 10 million downloads worldwide: The mod apk has a large number of downloads from users all over the world. It shows that users are interested in playing it and enjoying its features.
      • -
      • It has thousands of positive comments from players: The mod apk has a lot of positive comments from players who have shared their opinions and experiences with it. They have praised its graphics, gameplay, controls, features, options, etc.
      • -
      -

      It has stunning graphics and sound effects

      -

      The mod apk has amazing graphics and sound effects that make it more realistic and immersive. For example:

      -
        -
      • It has realistic 3D graphics and animations: The mod apk has realistic 3D graphics and animations that make the game look more lifelike and appealing. You can see the details and textures of the blocks, items, and characters. You can also see the shadows, reflections, and lighting effects that create a dynamic and vibrant atmosphere.
      • -
      • It has immersive sound effects and music: The mod apk has immersive sound effects and music that make the game sound more authentic and engaging. You can hear the sounds of the environment, such as the wind, water, fire, animals, etc. You can also hear the sounds of your actions, such as breaking, placing, crafting, fighting, etc. You can also listen to the music that matches the mood and theme of the game.
      • -
      • It has smooth gameplay and controls: The mod apk has smooth gameplay and controls that make the game easy and fun to play. You can move, jump, fly, swim, and interact with the world using simple gestures and buttons. You can also adjust the sensitivity and orientation of the camera according to your preference. You can also switch between different modes and views with a single tap.
      • -
      -

      How to download and install Exploration Lite Craft Mod Apk?

      -

      If you are interested in playing Exploration Lite Craft Mod Apk, you can download and install it on your device in a few minutes. All you need to do is follow these simple steps:

      -
        -
      1. Step 1: Click on this link to download the mod apk file : This link will take you to a secure and reliable website where you can download the mod apk file for free. You don't need to sign up or register to access the file.
      2. -
      3. Step 2: Allow unknown sources on your device settings: Before you can install the mod apk file on your device, you need to enable the option of unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
      4. -
      5. Step 3: Install the mod apk file on your device: After you have downloaded the mod apk file and enabled unknown sources, you can install the mod apk file on your device. To do this, locate the file in your downloads folder and tap on it. Then, follow the instructions on the screen to complete the installation process.
      6. -
      7. Step 4: Launch the game and enjoy playing: Once you have installed the mod apk file on your device, you can launch the game and start playing. You will see that you have unlimited coins and access to all features and options of the game. You can also customize your character and world as you like.
      8. -
      -

      Conclusion

      -

      Exploration Lite Craft Mod Apk is a fun and exciting game that you should try today. It is a modified version of Exploration Lite Craft that gives you more features, options, and coins for free. It is a game of action and adventure in an infinite blocky open world where you can explore, build, and survive as you wish. It has stunning graphics and sound effects that make it more realistic and immersive. It has positive ratings and reviews from users who have enjoyed playing it. It is a free download that you can install on your Android device without any hassle. It is a compatible app with all devices that runs smoothly and without any lag or crash.

      -

      exploration lite craft apk mod unlimited resources
      -exploration lite world craft mod apk download free
      -exploration lite craft mod apk latest version
      -exploration lite world craft mod apk for android
      -exploration lite craft mod apk offline
      -exploration lite world craft mod apk no ads
      -exploration lite craft mod apk unlimited money
      -exploration lite world craft mod apk 2023
      -exploration lite craft mod apk hack
      -exploration lite world craft mod apk pro
      -exploration lite craft mod apk full version
      -exploration lite world craft mod apk with cheats
      -exploration lite craft mod apk sandbox mode
      -exploration lite world craft mod apk 3d
      -exploration lite craft mod apk online multiplayer
      -exploration lite world craft mod apk survival mode
      -exploration lite craft mod apk creative mode
      -exploration lite world craft mod apk adventure mode
      -exploration lite craft mod apk premium
      -exploration lite world craft mod apk unlocked
      -exploration lite craft mod apk free shopping
      -exploration lite world craft mod apk mega mod
      -exploration lite craft mod apk all features
      -exploration lite world craft mod apk best graphics
      -exploration lite craft mod apk realistic physics
      -exploration lite craft mod apk custom skins
      -exploration lite world craft mod apk new maps
      -exploration lite craft mod apk magic spells
      -exploration lite world craft mod apk castles
      -exploration lite craft mod apk plants and animals
      -exploration lite world craft mod apk mining and crafting
      -exploration lite craft mod apk tools and weapons
      -exploration lite world craft mod apk mounts and vehicles
      -exploration lite craft mod apk quests and achievements
      -exploration lite craft mod apk fun and addictive
      -exploration lite world craft mod apk reviews and ratings
      -exploration lite craft mod apk tips and tricks
      -exploration lite world craft mod apk guide and tutorial
      -exploration lite craft mod apk gameplay and features
      -exploration lite world craft mod apk screenshots and videos

      -

      If you are looking for a game that will keep you entertained for hours, then Exploration Lite Craft Mod Apk is the perfect choice for you. Download it now and have fun!

      -

      Frequently Asked Questions

      -

      Here are some of the common questions that users have about Exploration Lite Craft Mod Apk:

      -
        -
      • Q: Is Exploration Lite Craft Mod Apk safe to download and play?
      • -
      • A: Yes, Exploration Lite Craft Mod Apk is safe to download and play. It does not contain any viruses or malware that could harm your device or data. It also does not require any permissions or access to your personal information or files.
      • -
      • Q: Is Exploration Lite Craft Mod Apk legal to use?
      • -
      • A: Yes, Exploration Lite Craft Mod Apk is legal to use. It does not violate any laws or regulations that govern the use of apps or games. It also does not infringe any copyrights or trademarks of the original game or its developers. It is a fan-made mod that is created for entertainment purposes only.
      • -
      • Q: How can I update Exploration Lite Craft Mod Apk?
      • -
      • A: You can update Exploration Lite Craft Mod Apk by following the same steps that you used to download and install it. You just need to check if there is a new version of the mod apk file available on the website and download it. Then, you can install it over the existing one and enjoy the new features and improvements.
      • -
      • Q: Can I play Exploration Lite Craft Mod Apk offline?
      • -
      • A: Yes, you can play Exploration Lite Craft Mod Apk offline. You don't need an internet connection to play the game, unless you want to play with your friends online or share your creations with other players. You can also save your progress and load it anytime you want.
      • -
      • Q: Can I play Exploration Lite Craft Mod Apk on PC?
      • -
      • A: Yes, you can play Exploration Lite Craft Mod Apk on PC. You just need to use an Android emulator that will allow you to run Android apps and games on your PC. Some of the popular Android emulators are Bluestacks, Nox Player, and LDPlayer. You can download and install any of them on your PC and then follow the same steps that you used to download and install the mod apk on your device.
      • -
      -

      I hope this article has helped you learn more about Exploration Lite Craft Mod Apk and how to download and play it. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatmacankara/ASCARIS/code/add_interface_pos.py b/spaces/fatmacankara/ASCARIS/code/add_interface_pos.py deleted file mode 100644 index 2d86fa2ce1cacd718d0066ae92475b806ae06338..0000000000000000000000000000000000000000 --- a/spaces/fatmacankara/ASCARIS/code/add_interface_pos.py +++ /dev/null @@ -1,35 +0,0 @@ -def get_interface_positions(dataframe, column1, column2): - interface_positions = {} - for i in dataframe.index: - if dataframe.at[i, column1] not in interface_positions and dataframe.at[i, column1 + '_IRES'] != '[]': - interface_positions[dataframe.at[i, column1]] = dataframe.at[i, str(column1 + '_IRES')] - elif dataframe.at[i, column1] in interface_positions and dataframe.at[i, column1 + '_IRES'] != '[]': - interface_positions[dataframe.at[i, column1]] = interface_positions[dataframe.at[i, column1]].strip( - ']') + ',' + (dataframe.at[i, str(column1 + '_IRES')]).strip('[') - if dataframe.at[i, column2] not in interface_positions and dataframe.at[i, column2 + '_IRES'] != '[]': - interface_positions[dataframe.at[i, column2]] = dataframe.at[i, str(column2 + '_IRES')] - elif dataframe.at[i, column2] in interface_positions and dataframe.at[i, column2 + '_IRES'] != '[]': - interface_positions[dataframe.at[i, column2]] = interface_positions[dataframe.at[i, column2]].strip( - ']') + ',' + (dataframe.at[i, str(column2 + '_IRES')]).strip('[') - - try: - for key, value in interface_positions.items(): - n = [] - m = [] - if value != '[]': - valueList = value.split(',') - valueList[0] = str(valueList[0]).strip('[') - valueList[-1] = str(valueList[-1]).strip(']') - for val in valueList: - if '-' in val: - for r in range(int(val.split('-')[0]), int(val.split('-')[1]) + 1): - n.append(r) - else: - m.append(int(val)) - fin = m + n - - interface_positions[key] = fin - except: - ValueError - - return interface_positions diff --git a/spaces/fb700/chatglm-fitness-RLHF/docs/waifu_plugin/waifu-tips.js b/spaces/fb700/chatglm-fitness-RLHF/docs/waifu_plugin/waifu-tips.js deleted file mode 100644 index 8f9533a19e7d4914bde888ee2a107e4430242968..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/docs/waifu_plugin/waifu-tips.js +++ /dev/null @@ -1,405 +0,0 @@ -window.live2d_settings = Array(); /* - - く__,.ヘヽ.    / ,ー、 〉 -      \ ', !-─‐-i / /´ -       /`ー'    L//`ヽ、 Live2D 看板娘 参数设置 -      /  /,  /|  ,  ,    ', Version 1.4.2 -    イ  / /-‐/ i L_ ハ ヽ!  i Update 2018.11.12 -     レ ヘ 7イ`ト  レ'ァ-ト、!ハ|  | -      !,/7 '0'   ´0iソ|   |    -      |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘 -      レ'| i>.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html -       レ'| | / k_7_/レ'ヽ, ハ. | -        | |/i 〈|/  i ,.ヘ | i | Thanks -       .|/ / i:   ヘ!  \ | journey-ad / https://github.com/journey-ad/live2d_src -         kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js -        !'〈//`T´', \ `'7'ーr' Live2d Cubism SDK WebGL 2.1 Projrct & All model authors. -        レ'ヽL__|___i,___,ンレ|ノ -          ト-,/ |___./ -          'ー'  !_,.:*********************************************************************************/ - - -// 后端接口 -live2d_settings['modelAPI'] = '//live2d.fghrsh.net/api/'; // 自建 API 修改这里 -live2d_settings['tipsMessage'] = 'waifu-tips.json'; // 同目录下可省略路径 -live2d_settings['hitokotoAPI'] = 'lwl12.com'; // 一言 API,可选 'lwl12.com', 'hitokoto.cn', 'jinrishici.com'(古诗词) - -// 默认模型 -live2d_settings['modelId'] = 1; // 默认模型 ID,可在 F12 控制台找到 -live2d_settings['modelTexturesId'] = 53; // 默认材质 ID,可在 F12 控制台找到 - -// 工具栏设置 -live2d_settings['showToolMenu'] = true; // 显示 工具栏 ,可选 true(真), false(假) -live2d_settings['canCloseLive2d'] = true; // 显示 关闭看板娘 按钮,可选 true(真), false(假) -live2d_settings['canSwitchModel'] = true; // 显示 模型切换 按钮,可选 true(真), false(假) -live2d_settings['canSwitchTextures'] = true; // 显示 材质切换 按钮,可选 true(真), false(假) -live2d_settings['canSwitchHitokoto'] = true; // 显示 一言切换 按钮,可选 true(真), false(假) -live2d_settings['canTakeScreenshot'] = true; // 显示 看板娘截图 按钮,可选 true(真), false(假) -live2d_settings['canTurnToHomePage'] = true; // 显示 返回首页 按钮,可选 true(真), false(假) -live2d_settings['canTurnToAboutPage'] = true; // 显示 跳转关于页 按钮,可选 true(真), false(假) - -// 模型切换模式 -live2d_settings['modelStorage'] = true; // 记录 ID (刷新后恢复),可选 true(真), false(假) -live2d_settings['modelRandMode'] = 'switch'; // 模型切换,可选 'rand'(随机), 'switch'(顺序) -live2d_settings['modelTexturesRandMode']= 'rand'; // 材质切换,可选 'rand'(随机), 'switch'(顺序) - -// 提示消息选项 -live2d_settings['showHitokoto'] = true; // 显示一言 -live2d_settings['showF12Status'] = true; // 显示加载状态 -live2d_settings['showF12Message'] = false; // 显示看板娘消息 -live2d_settings['showF12OpenMsg'] = true; // 显示控制台打开提示 -live2d_settings['showCopyMessage'] = true; // 显示 复制内容 提示 -live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词 - -//看板娘样式设置 -live2d_settings['waifuSize'] = '280x250'; // 看板娘大小,例如 '280x250', '600x535' -live2d_settings['waifuTipsSize'] = '250x70'; // 提示框大小,例如 '250x70', '570x150' -live2d_settings['waifuFontSize'] = '12px'; // 提示框字体,例如 '12px', '30px' -live2d_settings['waifuToolFont'] = '14px'; // 工具栏字体,例如 '14px', '36px' -live2d_settings['waifuToolLine'] = '20px'; // 工具栏行高,例如 '20px', '36px' -live2d_settings['waifuToolTop'] = '0px' // 工具栏顶部边距,例如 '0px', '-60px' -live2d_settings['waifuMinWidth'] = '768px'; // 面页小于 指定宽度 隐藏看板娘,例如 'disable'(禁用), '768px' -live2d_settings['waifuEdgeSide'] = 'left:0'; // 看板娘贴边方向,例如 'left:0'(靠左 0px), 'right:30'(靠右 30px) -live2d_settings['waifuDraggable'] = 'disable'; // 拖拽样式,例如 'disable'(禁用), 'axis-x'(只能水平拖拽), 'unlimited'(自由拖拽) -live2d_settings['waifuDraggableRevert'] = true; // 松开鼠标还原拖拽位置,可选 true(真), false(假) - -// 其他杂项设置 -live2d_settings['l2dVersion'] = '1.4.2'; // 当前版本 -live2d_settings['l2dVerDate'] = '2018.11.12'; // 版本更新日期 -live2d_settings['homePageUrl'] = 'auto'; // 主页地址,可选 'auto'(自动), '{URL 网址}' -live2d_settings['aboutPageUrl'] = 'https://www.fghrsh.net/post/123.html'; // 关于页地址, '{URL 网址}' -live2d_settings['screenshotCaptureName']= 'live2d.png'; // 看板娘截图文件名,例如 'live2d.png' - -/****************************************************************************************************/ - -String.prototype.render = function(context) { - var tokenReg = /(\\)?\{([^\{\}\\]+)(\\)?\}/g; - - return this.replace(tokenReg, function (word, slash1, token, slash2) { - if (slash1 || slash2) { return word.replace('\\', ''); } - - var variables = token.replace(/\s/g, '').split('.'); - var currentObject = context; - var i, length, variable; - - for (i = 0, length = variables.length; i < length; ++i) { - variable = variables[i]; - currentObject = currentObject[variable]; - if (currentObject === undefined || currentObject === null) return ''; - } - return currentObject; - }); -}; - -var re = /x/; -console.log(re); - -function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false} -function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text} - -function showMessage(text, timeout, flag) { - if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){ - if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1]; - if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,'')); - - if(flag) sessionStorage.setItem('waifu-text', text); - - $('.waifu-tips').stop(); - $('.waifu-tips').html(text).fadeTo(200, 1); - if (timeout === undefined) timeout = 5000; - hideMessage(timeout); - } -} - -function hideMessage(timeout) { - $('.waifu-tips').stop().css('opacity',1); - if (timeout === undefined) timeout = 5000; - window.setTimeout(function() {sessionStorage.removeItem('waifu-text')}, timeout); - $('.waifu-tips').delay(timeout).fadeTo(200, 0); -} - -function initModel(waifuPath, type) { - /* console welcome message */ - eval(function(p,a,c,k,e,r){e=function(c){return(c35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{})); - - /* 判断 JQuery */ - if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.'); - - /* 加载看板娘样式 */ - live2d_settings.waifuSize = live2d_settings.waifuSize.split('x'); - live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x'); - live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':'); - - $("#live2d").attr("width",live2d_settings.waifuSize[0]); - $("#live2d").attr("height",live2d_settings.waifuSize[1]); - $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]); - $(".waifu-tips").height(live2d_settings.waifuTipsSize[1]); - $(".waifu-tips").css("top",live2d_settings.waifuToolTop); - $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize); - $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont); - $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine); - - if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px'); - else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px'); - - window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); }; - if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); } - - try { - if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert }); - else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert }); - else $(".waifu").css("transition", 'all .3s ease-in-out'); - } catch(err) { console.log('[Error] JQuery UI is not defined.') } - - live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl; - if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI; - - $('.waifu-tool .fui-home').click(function (){ - //window.location = 'https://www.fghrsh.net/'; - window.location = live2d_settings.homePageUrl; - }); - - $('.waifu-tool .fui-info-circle').click(function (){ - //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02'); - window.open(live2d_settings.aboutPageUrl); - }); - - if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else { - $.ajax({ - cache: true, - url: waifuPath == '' ? live2d_settings.tipsMessage : (waifuPath.substr(waifuPath.length-15)=='waifu-tips.json'?waifuPath:waifuPath+'waifu-tips.json'), - dataType: "json", - success: function (result){ loadTipsMessage(result); } - }); - } - - if (!live2d_settings.showToolMenu) $('.waifu-tool').hide(); - if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide(); - if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide(); - if (!live2d_settings.canSwitchTextures) $('.waifu-tool .fui-user').hide(); - if (!live2d_settings.canSwitchHitokoto) $('.waifu-tool .fui-chat').hide(); - if (!live2d_settings.canTakeScreenshot) $('.waifu-tool .fui-photo').hide(); - if (!live2d_settings.canTurnToHomePage) $('.waifu-tool .fui-home').hide(); - if (!live2d_settings.canTurnToAboutPage) $('.waifu-tool .fui-info-circle').hide(); - - if (waifuPath === undefined) waifuPath = ''; - var modelId = localStorage.getItem('modelId'); - var modelTexturesId = localStorage.getItem('modelTexturesId'); - - if (!live2d_settings.modelStorage || modelId == null) { - var modelId = live2d_settings.modelId; - var modelTexturesId = live2d_settings.modelTexturesId; - } loadModel(modelId, modelTexturesId); -} - -function loadModel(modelId, modelTexturesId=0) { - if (live2d_settings.modelStorage) { - localStorage.setItem('modelId', modelId); - localStorage.setItem('modelTexturesId', modelTexturesId); - } else { - sessionStorage.setItem('modelId', modelId); - sessionStorage.setItem('modelTexturesId', modelTexturesId); - } loadlive2d('live2d', live2d_settings.modelAPI+'get/?id='+modelId+'-'+modelTexturesId, (live2d_settings.showF12Status ? console.log('[Status]','live2d','模型',modelId+'-'+modelTexturesId,'加载完成'):null)); -} - -function loadTipsMessage(result) { - window.waifu_tips = result; - - $.each(result.mouseover, function (index, tips){ - $(document).on("mouseover", tips.selector, function (){ - var text = getRandText(tips.text); - text = text.render({text: $(this).text()}); - showMessage(text, 3000); - }); - }); - $.each(result.click, function (index, tips){ - $(document).on("click", tips.selector, function (){ - var text = getRandText(tips.text); - text = text.render({text: $(this).text()}); - showMessage(text, 3000, true); - }); - }); - $.each(result.seasons, function (index, tips){ - var now = new Date(); - var after = tips.date.split('-')[0]; - var before = tips.date.split('-')[1] || after; - - if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) && - (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){ - var text = getRandText(tips.text); - text = text.render({year: now.getFullYear()}); - showMessage(text, 6000, true); - } - }); - - if (live2d_settings.showF12OpenMsg) { - re.toString = function() { - showMessage(getRandText(result.waifu.console_open_msg), 5000, true); - return ''; - }; - } - - if (live2d_settings.showCopyMessage) { - $(document).on('copy', function() { - showMessage(getRandText(result.waifu.copy_message), 5000, true); - }); - } - - $('.waifu-tool .fui-photo').click(function(){ - showMessage(getRandText(result.waifu.screenshot_message), 5000, true); - window.Live2D.captureName = live2d_settings.screenshotCaptureName; - window.Live2D.captureFrame = true; - }); - - $('.waifu-tool .fui-cross').click(function(){ - sessionStorage.setItem('waifu-dsiplay', 'none'); - showMessage(getRandText(result.waifu.hidden_message), 1300, true); - window.setTimeout(function() {$('.waifu').hide();}, 1300); - }); - - window.showWelcomeMessage = function(result) { - var text; - if (window.location.href == live2d_settings.homePageUrl) { - var now = (new Date()).getHours(); - if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']); - else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']); - else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']); - else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']); - else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']); - else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']); - else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']); - else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']); - else text = getRandText(result.waifu.hour_tips.default); - } else { - var referrer_message = result.waifu.referrer_message; - if (document.referrer !== '') { - var referrer = document.createElement('a'); - referrer.href = document.referrer; - var domain = referrer.hostname.split('.')[1]; - if (window.location.hostname == referrer.hostname) - text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1]; - else if (domain == 'baidu') - text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1]; - else if (domain == 'so') - text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1]; - else if (domain == 'google') - text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1]; - else { - $.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)}); - text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1]; - } - } else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1]; - } - showMessage(text, 6000); - }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result); - - var waifu_tips = result.waifu; - - function loadOtherModel() { - var modelId = modelStorageGetItem('modelId'); - var modelRandMode = live2d_settings.modelRandMode; - - $.ajax({ - cache: modelRandMode == 'switch' ? true : false, - url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId, - dataType: "json", - success: function(result) { - loadModel(result.model['id']); - var message = result.model['message']; - $.each(waifu_tips.model_message, function(i,val) {if (i==result.model['id']) message = getRandText(val)}); - showMessage(message, 3000, true); - } - }); - } - - function loadRandTextures() { - var modelId = modelStorageGetItem('modelId'); - var modelTexturesId = modelStorageGetItem('modelTexturesId'); - var modelTexturesRandMode = live2d_settings.modelTexturesRandMode; - - $.ajax({ - cache: modelTexturesRandMode == 'switch' ? true : false, - url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId, - dataType: "json", - success: function(result) { - if (result.textures['id'] == 1 && (modelTexturesId == 1 || modelTexturesId == 0)) - showMessage(waifu_tips.load_rand_textures[0], 3000, true); - else showMessage(waifu_tips.load_rand_textures[1], 3000, true); - loadModel(modelId, result.textures['id']); - } - }); - } - - function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); } - - /* 检测用户活动状态,并在空闲时显示一言 */ - if (live2d_settings.showHitokoto) { - window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false; - $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;}); - setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000); - } - - function ifActed() { - if (!hitokotoInterval) { - hitokotoInterval = true; - hitokotoTimer = window.setInterval(showHitokotoActed, 30000); - } - } - - function elseActed() { - getActed = hitokotoInterval = false; - window.clearInterval(hitokotoTimer); - } - - function showHitokotoActed() { - if ($(document)[0].visibilityState == 'visible') showHitokoto(); - } - - function showHitokoto() { - switch(live2d_settings.hitokotoAPI) { - case 'lwl12.com': - $.getJSON('https://api.lwl12.com/hitokoto/v1?encode=realjson',function(result){ - if (!empty(result.source)) { - var text = waifu_tips.hitokoto_api_message['lwl12.com'][0]; - if (!empty(result.author)) text += waifu_tips.hitokoto_api_message['lwl12.com'][1]; - text = text.render({source: result.source, creator: result.author}); - window.setTimeout(function() {showMessage(text+waifu_tips.hitokoto_api_message['lwl12.com'][2], 3000, true);}, 5000); - } showMessage(result.text, 5000, true); - });break; - case 'fghrsh.net': - $.getJSON('https://api.fghrsh.net/hitokoto/rand/?encode=jsc&uid=3335',function(result){ - if (!empty(result.source)) { - var text = waifu_tips.hitokoto_api_message['fghrsh.net'][0]; - text = text.render({source: result.source, date: result.date}); - window.setTimeout(function() {showMessage(text, 3000, true);}, 5000); - showMessage(result.hitokoto, 5000, true); - } - });break; - case 'jinrishici.com': - $.ajax({ - url: 'https://v2.jinrishici.com/one.json', - xhrFields: {withCredentials: true}, - success: function (result, status) { - if (!empty(result.data.origin.title)) { - var text = waifu_tips.hitokoto_api_message['jinrishici.com'][0]; - text = text.render({title: result.data.origin.title, dynasty: result.data.origin.dynasty, author:result.data.origin.author}); - window.setTimeout(function() {showMessage(text, 3000, true);}, 5000); - } showMessage(result.data.content, 5000, true); - } - });break; - default: - $.getJSON('https://v1.hitokoto.cn',function(result){ - if (!empty(result.from)) { - var text = waifu_tips.hitokoto_api_message['hitokoto.cn'][0]; - text = text.render({source: result.from, creator: result.creator}); - window.setTimeout(function() {showMessage(text, 3000, true);}, 5000); - } - showMessage(result.hitokoto, 5000, true); - }); - } - } - - $('.waifu-tool .fui-eye').click(function (){loadOtherModel()}); - $('.waifu-tool .fui-user').click(function (){loadRandTextures()}); - $('.waifu-tool .fui-chat').click(function (){showHitokoto()}); -} diff --git a/spaces/fclong/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh b/spaces/fclong/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh deleted file mode 100644 index 8e86e8b077019a57c5a6ac28ab29749f1a2787aa..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=pretrain_randeng_t5_char_57M -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --gres=gpu:8 # number of gpus -#SBATCH --cpus-per-task=32 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH -o /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/%x-%j.log -#SBATCH -e /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/%x-%j.err - -set -x -e - -echo "START TIME: $(date)" -MICRO_BATCH_SIZE=64 -ROOT_DIR=/cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/ -if [ ! -d ${ROOT_DIR} ];then - mkdir ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -ZERO_STAGE=1 - -config_json="$ROOT_DIR/ds_config.randeng_t5_char_57M.$SLURM_JOBID.json" -export MASTER_PORT=$[RANDOM%10000+30000] -# export CUDA_VISIBLE_DEVICES='4,5' - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": ${MICRO_BATCH_SIZE}, - "steps_per_print": 100, - "gradient_clipping": 1.0, - "zero_optimization": { - "stage": $ZERO_STAGE, - "contiguous_gradients": false, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 50000000, - "allgather_bucket_size": 500000000 - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-4, - "weight_decay": 1e-2 - } - }, - "scheduler": { - "params": { - "warmup_max_lr": 1e-04, - "warmup_min_lr": 1e-05, - "total_num_steps": 240000, - "warmup_num_steps" : 10000 - }, - "type": "WarmupDecayLR" - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions -# strategy=ddp -strategy=deepspeed_stage_1 - -TRAINER_ARGS=" - --max_epochs 1 \ - --gpus 8 \ - --num_nodes 1 \ - --strategy ${strategy} \ - --default_root_dir $ROOT_DIR \ - --dirpath $ROOT_DIR/ckpt \ - --save_top_k 3 \ - --every_n_train_steps 100000 \ - --monitor train_loss \ - --mode min \ - --save_last \ - --val_check_interval 0.1 \ - --dataset_num_workers 4 \ - --dataloader_num_workers 4 \ - --replace_sampler_ddp False \ -" -# --accumulate_grad_batches 8 \ -DATA_DIR=wudao_180g_bert_tokenized_512 - -DATA_ARGS=" - --train_batchsize $MICRO_BATCH_SIZE \ - --valid_batchsize $MICRO_BATCH_SIZE \ - --train_data_path ${DATA_DIR} \ - --train_split_size 0.999 \ - --max_seq_length 512 \ -" - -MODEL_ARGS=" - --pretrained_model_path /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/randeng_t5_char_57M \ - --tokenizer_type bert_tokenizer \ -" - -SCRIPTS_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/pretrain_t5/pretrain_t5.py - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " - -echo $CMD -/home/ganruyi/anaconda3/bin/python $CMD -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c '/home/ganruyi/anaconda3/bin/python $CMD' - -# source activate base -# python $CMD -# srun --nodes=1 --gres=gpu:8 --ntasks-per-node=8 --cpus-per-task=30 --jobid=171866 -e %x-%j.err -o %x-%j.log python $CMD - diff --git a/spaces/felix-weiland/llama_index_demo/functions.py b/spaces/felix-weiland/llama_index_demo/functions.py deleted file mode 100644 index 8153674c91eae6f1fee2c79302e74cb5d3ed56f4..0000000000000000000000000000000000000000 --- a/spaces/felix-weiland/llama_index_demo/functions.py +++ /dev/null @@ -1,66 +0,0 @@ -import streamlit as st -import os, time -from llama_index.readers.schema.base import Document -from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, GPTTreeIndex -from langchain import OpenAI -import pandas as pd - - -def generate_response(prompt, index, llm_predictor, test=False): - if test: - return prompt - else: - response = index.query(prompt, llm_predictor=llm_predictor) - return response - - -def update_chat_state(): - st.session_state.chat_sent = st.session_state.chat_input - st.session_state.chat_input = '' - - -def get_chat_input(): - st.text_input(label="Write your query here", key="chat_input", on_change=update_chat_state) - return st.session_state.chat_sent - -def config_llm_predictor(): - llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.3, model_name="gpt-3.5-turbo")) - - max_input_size = 8192 - num_output = None - max_chunk_overlap = 20 - - prompt_helper = PromptHelper(max_input_size = max_input_size, num_output = num_output, max_chunk_overlap = max_chunk_overlap) - - return llm_predictor - - -@st.cache(allow_output_mutation=True) -def load_index(json_index): - #index = GPTTreeIndex.load_from_disk(json_index) - index = GPTSimpleVectorIndex.load_from_disk(json_index) - - return index - - -@st.cache(allow_output_mutation=True) -def get_data(): - data = pd.read_csv("data/appstore_reviews.csv") - data = data[["application", "review", "rating", "date"]] - data["application"] = data["application"].str.lower() - data = data[~data["review"].isna()] - return data - - -def set_api_key(api_key_file): - with open(api_key_file, "r") as file: - openai_key = file.read().replace("\n", "") - # Set environment - os.environ["OPENAI_API_KEY"] = openai_key - - -def get_search(data): - input_text = st.text_input("Search in reviews:", key="search") - output = data[data["review"].apply(lambda x: x.lower()).str.contains(input_text.lower())] - - return output \ No newline at end of file diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/model_utils.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/model_utils.py deleted file mode 100644 index 5156bf2274a5972319bbe1c96852e14fb66d7f74..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/utils/model_utils.py +++ /dev/null @@ -1,35 +0,0 @@ -import torch -import argparse -from ..models.psp import pSp -from ..models.encoders.psp_encoders import Encoder4Editing - - -def setup_model(checkpoint_path, device='cuda'): - ckpt = torch.load(checkpoint_path, map_location='cpu') - opts = ckpt['opts'] - - opts['checkpoint_path'] = checkpoint_path - opts['device'] = device - opts = argparse.Namespace(**opts) - - net = pSp(opts) - net.eval() - net = net.to(device) - return net, opts - - -def load_e4e_standalone(checkpoint_path, device='cuda'): - ckpt = torch.load(checkpoint_path, map_location='cpu') - opts = argparse.Namespace(**ckpt['opts']) - e4e = Encoder4Editing(50, 'ir_se', opts) - e4e_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')} - e4e.load_state_dict(e4e_dict) - e4e.eval() - e4e = e4e.to(device) - latent_avg = ckpt['latent_avg'].to(device) - - def add_latent_avg(model, inputs, outputs): - return outputs + latent_avg.repeat(outputs.shape[0], 1, 1) - - e4e.register_forward_hook(add_latent_avg) - return e4e diff --git a/spaces/feregVcuzo/sanity-test-midi/Audioease Altiverb 6 Ir Impulse Responses Complete Library.md b/spaces/feregVcuzo/sanity-test-midi/Audioease Altiverb 6 Ir Impulse Responses Complete Library.md deleted file mode 100644 index 7489fddf2f041cd712e92684681cea0c4fbbcd90..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/Audioease Altiverb 6 Ir Impulse Responses Complete Library.md +++ /dev/null @@ -1,92 +0,0 @@ -## audioease altiverb 6 ir impulse responses complete library - - - - - - ![Audioease Altiverb 6 Ir Impulse Responses Complete Library](https://phaedrus-audio.com/impulse%20response%20invert%20LOGO_small.png) - - - - - -**Download File ---> [https://www.google.com/url?q=https%3A%2F%2Fshurll.com%2F2txvOp&sa=D&sntz=1&usg=AOvVaw1NIrm7yI48PIs7OUynbZkS](https://www.google.com/url?q=https%3A%2F%2Fshurll.com%2F2txvOp&sa=D&sntz=1&usg=AOvVaw1NIrm7yI48PIs7OUynbZkS)** - - - - - - - - - - - - - -# How to Use AudioEase Altiverb 6 IR Impulse Responses Complete Library to Enhance Your Sound - - - -If you are looking for a way to add realistic and immersive reverberation to your audio projects, you might want to check out AudioEase Altiverb 6 IR Impulse Responses Complete Library. This is a collection of high-quality impulse responses (IRs) that can be applied to any audio source using the Altiverb 6 plugin. IRs are recordings of the sound reflections in real spaces, such as concert halls, churches, studios, outdoor locations, and more. By using IRs, you can simulate the acoustic characteristics of these spaces and make your sound more natural and expressive. - - - -In this article, we will show you how to use AudioEase Altiverb 6 IR Impulse Responses Complete Library to enhance your sound. We will cover the following topics: - - - -- What is Altiverb 6 and how does it work? - -- How to install and activate AudioEase Altiverb 6 IR Impulse Responses Complete Library? - -- How to browse and select IRs from the library? - -- How to adjust the parameters and settings of Altiverb 6? - -- How to apply Altiverb 6 to your audio tracks? - - - -## What is Altiverb 6 and how does it work? - - - -Altiverb 6 is a convolution reverb plugin that can be used with any digital audio workstation (DAW) that supports VST, AU, RTAS, or AAX formats. Convolution reverb is a process that uses IRs to recreate the sound of real spaces. Unlike algorithmic reverb, which uses mathematical formulas to generate reverberation, convolution reverb uses actual recordings of sound reflections in different environments. This makes convolution reverb more realistic and accurate, as it captures the nuances and details of each space. - - - -Altiverb 6 can apply any IR from its library to any audio source, such as vocals, instruments, sound effects, or even entire mixes. You can also import your own IRs or create them using the built-in IR recording tool. Altiverb 6 offers a variety of parameters and settings that allow you to customize the reverb effect according to your preferences and needs. You can control the level, decay time, pre-delay, early reflections, late reflections, EQ, modulation, automation, and more. - - - -## How to install and activate AudioEase Altiverb 6 IR Impulse Responses Complete Library? - - - -To use AudioEase Altiverb 6 IR Impulse Responses Complete Library, you need to have Altiverb 6 installed and activated on your computer. You can download Altiverb 6 from the official website of AudioEase[^1^]. You will need a license key to activate the plugin. You can purchase a license key from AudioEase or from authorized dealers. - - - -Once you have installed and activated Altiverb 6, you can download AudioEase Altiverb 6 IR Impulse Responses Complete Library from the same website[^1^]. This is a hybrid library that works on both Mac and PC platforms. The library contains over 3 GB of content in various categories, such as cathedrals, churches, clubs, concert halls, gear, metallic resonant spaces, operas & theaters, outdoor, post production ambiances, recording studios & echo chambers, scoring stages (orchestral studios), small rooms for music, sound design, stadiums, tombs & underground[^2^]. - - - -To install AudioEase Altiverb 6 IR Impulse Responses Complete Library, you need to copy the library folder into the IR folder of Altiverb 6. The default location of the IR folder is: - - - -- Mac: /Library/Application Support/Audio Ease/Altiverb/Impulse Responses - -- PC: C:\Program Files\Audio Ease\Altiverb\Impulse Responses - - - -After copying the library folder into the IR folder, you need to rescan the IR folder in Altiverb 6. To do this, open Altiverb 6 in your DAW and click on the "Rescan" button at the bottom left corner of the plugin window. This will - - dfd1c89656 - - - - - diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Carrom Pool Disc Game Hack Download Mod Apk and Play with Unlimited Resources.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Carrom Pool Disc Game Hack Download Mod Apk and Play with Unlimited Resources.md deleted file mode 100644 index 255816ab923ec025e307eea3836eb1fea5861c9e..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Carrom Pool Disc Game Hack Download Mod Apk and Play with Unlimited Resources.md +++ /dev/null @@ -1,156 +0,0 @@ -
      -

      Carrom Pool Disc Game Mod Apk Hack: Everything You Need to Know

      -

      Carrom pool disc game is one of the most popular and addictive online games that you can play with your friends or strangers. It is a multiplayer game that offers three modes: carrom, free style, and disc pool. You can also customize your pieces, boards, and strikers with various skins and stickers. But what if you want to get unlimited coins, gems, and other resources in the game? What if you want to have an edge over your opponents with aim hack and other cheats? This is where carrom pool mod apk hack comes in handy. In this article, we will tell you everything you need to know about carrom pool mod apk hack, including its features, benefits, risks, download and installation process, and usage tips.

      -

      What is Carrom Pool Disc Game?

      -

      Carrom pool disc game is a digital version of the classic board game carrom. It is developed by Miniclip, a leading online gaming company. Carrom pool disc game has millions of downloads and positive reviews on Google Play Store and App Store. It is also available on Facebook and Miniclip's website.

      -

      carrom pool disc game mod apk hack


      DOWNLOAD === https://gohhs.com/2uPrif



      -

      Features of Carrom Pool Disc Game

      -

      Some of the features of carrom pool disc game are:

      -
        -
      • Play multiplayer matches in 3 game modes: carrom, free style, and disc pool.
      • -
      • Play with your friends or compete with top players from around the world.
      • -
      • Try your luck at free daily golden shot and win big prizes.
      • -
      • Play in different arenas with stunning graphics and realistic physics.
      • -
      • Smooth controls and easy gameplay.
      • -
      • Customize your pieces, boards, and strikers with various skins and stickers.
      • -
      • Earn coins and gems by winning matches and completing missions.
      • -
      • Unlock new items and upgrade your skills.
      • -
      • Chat with your opponents and send them emojis.
      • -
      • Join clubs and participate in tournaments.
      • -
      -

      How to Play Carrom Pool Disc Game

      -

      The rules of carrom pool disc game are similar to the rules of carrom board game. The objective is to pocket all your pieces before your opponent does. You can choose between black or white pieces in carrom mode, or red or green pieces in disc pool mode. You can also play free style mode where there are no rules or restrictions.

      -

      To play carrom pool disc game, you need to follow these steps:

      -
        -
      1. Select a game mode and an arena.
      2. -
      3. Select an opponent or invite a friend to play with you.
      4. -
      5. Select a striker and drag it on the board to adjust its position and angle.
      6. -
      7. Swipe on the striker to set its power and direction.
      8. -
      9. Release the striker to hit the pieces on the board.
      10. -
      11. Pocket your pieces before your opponent does.
      12. -
      13. Earn coins and gems by winning matches.
      14. -
      -

      What is Carrom Pool Mod Apk Hack?

      -

      Carrom pool mod apk hack is a modified version of the original carrom pool disc game app. It is created by third-party developers who modify the app's code and add some features that are not available in the official app. These features include unlimited coins, gems, aim hack, auto win, unlock all items, etc. Carrom pool mod apk hack is not an official app and it is not available on Google Play Store or App Store. You have to download it from other sources, such as websites or blogs that provide the download link.

      -

      Benefits of Carrom Pool Mod Apk Hack

      -

      Some of the benefits of carrom pool mod apk hack are:

      -
        -
      • You can get unlimited coins and gems in the game without spending any money.
      • -
      • You can use aim hack and other cheats to win every match easily.
      • -
      • You can unlock all the items and skins in the game without completing any missions or levels.
      • -
      • You can play in any arena and with any opponent without any restrictions.
      • -
      • You can enjoy the game with more fun and excitement.
      • -
      -

      Risks of Carrom Pool Mod Apk Hack

      -

      Some of the risks of carrom pool mod apk hack are:

      -
        -
      • You may face legal issues for violating the terms and conditions of the original app.
      • -
      • You may get banned from the game server for using unfair means.
      • -
      • You may lose your progress and data if the mod apk hack is not compatible with the latest version of the game.
      • -
      • You may expose your device to malware and viruses that may harm your device or steal your personal information.
      • -
      • You may ruin the fun and challenge of the game by using cheats and hacks.
      • -
      -

      How to Download and Install Carrom Pool Mod Apk Hack

      -

      If you want to download and install carrom pool mod apk hack on your device, you need to follow these steps:

      -

      Steps to Download and Install Carrom Pool Mod Apk Hack

      -
        -
      1. Go to a website or a blog that provides the download link for carrom pool mod apk hack. For example, you can visit [this website] to get the latest version of carrom pool mod apk hack.
      2. -
      3. Click on the download button and wait for the file to be downloaded on your device.
      4. -
      5. Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from Google Play Store or App Store.
      6. -
      7. Locate the downloaded file on your device and tap on it to start the installation process.
      8. -
      9. Follow the instructions on the screen and wait for the installation to be completed.
      10. -
      11. Launch the carrom pool mod apk hack app and enjoy the game with unlimited resources and cheats.
      12. -
      -

      Tips to Avoid Malware and Viruses

      -

      To avoid malware and viruses that may come with carrom pool mod apk hack, you need to follow these tips:

      -

      carrom pool disc game unlimited coins and gems mod apk
      -carrom pool disc game hack version download apk
      -carrom pool disc game mod apk latest version 2023
      -carrom pool disc game cheat codes for android
      -carrom pool disc game mod apk free download for pc
      -carrom pool disc game hack online generator
      -carrom pool disc game mod apk unlocked all features
      -carrom pool disc game hack tool no survey no password
      -carrom pool disc game mod apk offline mode
      -carrom pool disc game hack apk ios
      -carrom pool disc game mod apk premium membership
      -carrom pool disc game hack without human verification
      -carrom pool disc game mod apk unlimited money and diamonds
      -carrom pool disc game hack no root required
      -carrom pool disc game mod apk anti ban protection
      -carrom pool disc game hack with lucky patcher
      -carrom pool disc game mod apk unlimited strikers and pucks
      -carrom pool disc game hack app download
      -carrom pool disc game mod apk new update 2023
      -carrom pool disc game hack for windows 10
      -carrom pool disc game mod apk unlimited everything
      -carrom pool disc game hack reddit
      -carrom pool disc game mod apk vip access
      -carrom pool disc game hack youtube video
      -carrom pool disc game mod apk pro version
      -carrom pool disc game hack by revdl.com[^1^]
      -carrom pool disc game mod apk with obb file
      -carrom pool disc game hack using game guardian
      -carrom pool disc game mod apk all unlocked
      -carrom pool disc game hack telegram channel

      -
        -
      • Download carrom pool mod apk hack only from trusted and reliable sources. Do not download it from random or suspicious websites or links.
      • -
      • Scan the downloaded file with a good antivirus software before installing it on your device. This will help you detect and remove any malicious code or program that may harm your device or data.
      • -
      • Do not grant any unnecessary permissions or access to carrom pool mod apk hack app. Only allow the permissions that are required for the game to function properly.
      • -
      • Do not update carrom pool mod apk hack app from within the app. Always check for updates from the original source or website where you downloaded it from.
      • -
      -

      How to Use Carrom Pool Mod Apk Hack

      -

      Once you have downloaded and installed carrom pool mod apk hack on your device, you can use it to play the game with unlimited resources and cheats. Here are some tips on how to use carrom pool mod apk hack:

      -

      How to Get Unlimited Coins and Gems

      -

      To get unlimited coins and gems in carrom pool mod apk hack, you need to follow these steps:

      -
        -
      1. Launch the carrom pool mod apk hack app on your device.
      2. -
      3. Select a game mode and an arena that you want to play in.
      4. -
      5. Before starting a match, tap on the plus sign (+) next to your coins and gems balance at the top of the screen.
      6. -
      7. A pop-up window will appear where you can enter any amount of coins and gems that you want to add to your account.
      8. -
      9. Tap on the confirm button and wait for a few seconds. You will see that your coins and gems balance has increased by the amount that you entered.
      10. -
      11. Enjoy playing the game with unlimited coins and gems. You can repeat this process as many times as you want.
      12. -
      -

      How to Use Aim Hack and Other Cheats

      -

      To use aim hack and other cheats in carrom pool mod apk hack, you need to follow these steps:

      -
        -
      1. Launch the carrom pool mod apk hack app on your device.
      2. -
      3. Select a game mode and an arena that you want to play in.
      4. -
      5. Before starting a match, tap on the menu icon (three horizontal lines) at the top left corner of the screen.
      6. -
      7. A list of options will appear where you can select the cheats that you want to use. Some of the cheats are:
      8. -
          -
        • Aim hack: This cheat will help you to aim at any piece on the board with perfect accuracy. You can also adjust the angle and power of your striker with ease.
        • -
        • Auto win: This cheat will make you win the match automatically, no matter what your opponent does. You can also choose the score that you want to win by.
        • -
        • Unlock all: This cheat will unlock all the items and skins in the game without any cost or effort. You can also change your items and skins anytime during the match.
        • -
        • No ads: This cheat will remove all the ads from the game, making it more smooth and enjoyable.
        • -
        -
      9. Tap on the cheats that you want to use and then tap on the back button to return to the game screen.
      10. -
      11. Enjoy playing the game with aim hack and other cheats. You can turn on or off the cheats anytime during the match by tapping on the menu icon again.
      12. -
      -

      Conclusion

      -

      Carrom pool disc game is a fun and addictive online game that you can play with your friends or strangers. It offers three game modes, stunning graphics, realistic physics, smooth controls, and various customization options. However, if you want to get unlimited resources and cheats in the game, you can try carrom pool mod apk hack. Carrom pool mod apk hack is a modified version of the original app that gives you unlimited coins, gems, aim hack, auto win, unlock all, and more. However, carrom pool mod apk hack also comes with some risks, such as legal issues, bans, data loss, malware, and viruses. Therefore, you should be careful when downloading and installing carrom pool mod apk hack on your device. You should also use it responsibly and not ruin the fun and challenge of the game for yourself and others.

      -

      Summary of the Article

      -

      In this article, we have covered the following topics:

      -
        -
      • What is carrom pool disc game and its features?
      • -
      • What is carrom pool mod apk hack and its benefits?
      • -
      • How to download and install carrom pool mod apk hack?
      • -
      • How to use carrom pool mod apk hack?
      • -
      -

      FAQs

      -

      Here are some frequently asked questions about carrom pool mod apk hack:

      -
        -
      1. Is carrom pool mod apk hack safe to use?
      2. -

        Carrom pool mod apk hack is not an official app and it is not endorsed by Miniclip or any other authority. It is created by third-party developers who may have malicious intentions. Therefore, carrom pool mod apk hack is not safe to use and it may harm your device or data. You should always download and install carrom pool mod apk hack from trusted and reliable sources and scan it with a good antivirus software before using it.

        -
      3. Is carrom pool mod apk hack legal to use?
      4. -

        Carrom pool mod apk hack is not legal to use as it violates the terms and conditions of the original app. It also gives you an unfair advantage over other players who play the game legitimately. Therefore, carrom pool mod apk hack is illegal to use and you may face legal consequences for using it. You should always respect the rules and regulations of the original app and play the game fairly and honestly.

        -
      5. Will I get banned for using carrom pool mod apk hack?
      6. -

        Yes, you may get banned for using carrom pool mod apk hack as it is detected by the game server as a cheating tool. The game server may monitor your activities and behavior in the game and ban you if they find any evidence of cheating or hacking. Therefore, you should avoid using carrom pool mod apk hack or use it at your own risk.

        -
      7. How can I update carrom pool mod apk hack?
      8. -

        You cannot update carrom pool mod apk hack from within the app as it is not connected to the official app store or server. You have to check for updates from the original source or website where you downloaded it from. You should also make sure that the updated version of carrom pool mod apk hack is compatible with the latest version of the original app. You should also backup your data and progress before updating carrom pool mod apk hack.

        -
      9. Can I play carrom pool mod apk hack offline?
      10. -

        No, you cannot play carrom pool mod apk hack offline as it requires an internet connection to function properly. Carrom pool mod apk hack connects to the game server and modifies the game data and resources. Therefore, you need to have a stable and secure internet connection to play carrom pool mod apk hack.

        -
      -

      I hope this article has helped you to understand carrom pool mod apk hack better. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/DLS 21 8.30 APK Download and Play the Latest Version of Dream League Soccer 2023.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/DLS 21 8.30 APK Download and Play the Latest Version of Dream League Soccer 2023.md deleted file mode 100644 index 117f3e9774dbf99eb54b8e5d77a1e98c8fe48b3d..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/DLS 21 8.30 APK Download and Play the Latest Version of Dream League Soccer 2023.md +++ /dev/null @@ -1,106 +0,0 @@ - -

      DLS 21 8.30 APK: How to Download and Install the Latest Version of Dream League Soccer

      -

      If you are a fan of soccer games, you might have heard of Dream League Soccer or DLS for short. It is one of the most popular and realistic soccer games for mobile devices, with millions of downloads and positive reviews. In this article, we will tell you everything you need to know about DLS 21, the latest version of the game, and how to download and install DLS 21 8.30 APK, a modified version of the game that offers some extra features and benefits.

      -

      dls 21 8.30 apk


      Download File » https://gohhs.com/2uPulH



      -

      What is DLS 21?

      -

      DLS 21 is the latest installment of the Dream League Soccer series, developed by First Touch Games. It is a soccer simulation game that lets you create your own team, customize your players, stadium, kits, and logos, and compete in various leagues and tournaments. You can also play online with other players from around the world, or offline with friends using local multiplayer mode.

      -

      Features of DLS 21

      -

      Some of the features that make DLS 21 stand out from other soccer games are:

      -
        -
      • Realistic gameplay: The game uses advanced AI and motion capture technology to create realistic animations, physics, and tactics for the players and teams.
      • -
      • Stunning graphics: The game has high-quality graphics and sound effects that enhance the immersion and atmosphere of the game.
      • -
      • Huge content: The game has over 4,000 licensed players from more than 100 countries, as well as hundreds of clubs and stadiums to choose from.
      • -
      • Customization: The game allows you to customize every aspect of your team, from the name, logo, kit, stadium, manager, formation, tactics, and more.
      • -
      • Progression: The game has a career mode that lets you start from the bottom and work your way up to the top of the soccer world. You can also earn coins and gems by playing matches and completing objectives, which you can use to upgrade your team and unlock new items.
      • -
      -

      Requirements for DLS 21

      -

      To play DLS 21 on your mobile device, you need to have:

      -

      dls 21 8.30 apk download free
      -dls 21 8.30 apk mod unlimited money
      -dls 21 8.30 apk obb data file
      -dls 21 8.30 apk latest version 2023
      -dls 21 8.30 apk offline mode
      -dls 21 8.30 apk android game
      -dls 21 8.30 apk by ristechy.com
      -dls 21 8.30 apk with fifpro licensed players
      -dls 21 8.30 apk dream league soccer 2023
      -dls 21 8.30 apk update new features
      -dls 21 8.30 apk hack unlimited coins
      -dls 21 8.30 apk no root required
      -dls 21 8.30 apk best soccer game
      -dls 21 8.30 apk how to install
      -dls 21 8.30 apk gameplay and review
      -dls 21 8.30 apk compatible devices
      -dls 21 8.30 apk original file from play store
      -dls 21 8.30 apk direct download link
      -dls 21 8.30 apk mirror link
      -dls 21 8.30 apk mediafire link
      -dls 21 8.30 apk mega link
      -dls 21 8.30 apk zippyshare link
      -dls 21 8.30 apk google drive link
      -dls 21 8.30 apk dropbox link
      -dls 21 8.30 apk high speed download
      -dls 21 8.30 apk safe and secure download
      -dls 21 8.30 apk virus free download
      -dls 21 8.30 apk no ads download
      -dls 21 8.30 apk full unlocked download
      -dls 21 8.30 apk premium features download
      -dls 21 8.30 apk build your dream team
      -dls 21 8.30 apk customize your team logo and kits
      -dls 21 8.30 apk play online and offline matches
      -dls 21 8.30 apk compete in different leagues and tournaments
      -dls 21 8.30 apk challenge the best soccer clubs in the world
      -dls 21 8.30 apk realistic graphics and physics
      -dls 21 8.30 apk smooth and easy controls
      -dls 21 8.30 apk dynamic and immersive sound effects
      -dls 21

      -
        -
      • An Android device running Android version 5.0 or higher.
      • -
      • A minimum of 1 GB of RAM and at least 500 MB of free storage space.
      • -
      • A stable internet connection for online features.
      • -
      -

      What is DLS 21 8.30 APK?

      -

      DLS 21 8.30 APK is a modified version of the original DLS 21 game that has been released by third-party developers. It is not an official update from First Touch Games, but rather a fan-made modification that offers some extra features and benefits that are not available in the original game.

      -

      Benefits of DLS 21 8.30 APK

      -

      Some of the benefits that you can enjoy by downloading and installing DLS 21 8.30 APK are:

      -
        -
      • Unlimited coins and gems: You can get unlimited coins and gems in the game, which you can use to buy and upgrade anything you want in the game.
      • -
      • All players unlocked: You can access and use any player in the game, regardless of their rating, price, or availability.
      • -
      • All items unlocked: You can unlock and use any item in the game, such as kits, logos, stadiums, managers, and more.
      • -
      • No ads: You can enjoy the game without any annoying ads or pop-ups that interrupt your gameplay.
      • -
      -

      Risks of DLS 21 8.30 APK

      -

      However, there are also some risks and drawbacks that you should be aware of before downloading and installing DLS 21 8.30 APK. These are:

      -
        -
      • Illegal and unsafe: DLS 21 8.30 APK is not an official version of the game, and it violates the terms and conditions of First Touch Games. It is also not verified or tested by any trusted source, and it may contain viruses, malware, or spyware that can harm your device or steal your personal information.
      • -
      • Banned or blocked: DLS 21 8.30 APK may not work properly with the latest updates of the game, and it may cause errors, glitches, or crashes. It may also be detected by the game's security system, and you may face a ban or a block from playing the game online or accessing your account.
      • -
      • Unfair and unethical: DLS 21 8.30 APK gives you an unfair advantage over other players who play the game legitimately, and it may ruin the fun and challenge of the game. It may also be considered as cheating or hacking, and it may disrespect the hard work and creativity of the developers of the game.
      • -
      -

      How to Download and Install DLS 21 8.30 APK?

      -

      If you still want to download and install DLS 21 8.30 APK on your device, you need to follow these steps carefully:

      -

      Step 1: Enable Unknown Sources

      -

      The first step is to enable unknown sources on your device, which will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on.

      -

      Step 2: Download DLS 21 8.30 APK from a Trusted Source

      -

      The next step is to download DLS 21 8.30 APK from a trusted source that provides a safe and working link. You can search for such sources on the internet, but be careful not to click on any fake or malicious links that may harm your device or steal your data. One of the sources that we recommend is [DLS 21 8.30 APK Download], which has been tested and verified by us.

      -

      Step 3: Install DLS 21 8.30 APK on Your Device

      -

      The third step is to install DLS 21 8.30 APK on your device. To do this, locate the downloaded file on your device's file manager, tap on it, and follow the instructions on the screen. The installation process may take a few minutes, depending on your device's performance.

      -

      Step 4: Launch DLS 21 and Enjoy the Game

      -

      The final step is to launch DLS 21 and enjoy the game. To do this, find the game's icon on your device's home screen or app drawer, tap on it, and wait for it to load. You will see a welcome screen with some options to choose from. You can either start a new career mode or continue an existing one, or play online or offline with other players or friends. You will also see that you have unlimited coins and gems in the game, as well as all players and items unlocked.

      -

      Conclusion

      -

      In conclusion, DLS 21 is a great soccer game that offers realistic gameplay, stunning graphics, huge content, customization, progression, and more. However, if you want to get some extra features and benefits that are not available in the original game, you can download and install DLS 21 8.30 APK, a modified version of the game that gives you unlimited coins and gems, all players and items unlocked, no ads, and more. However, you should also be aware of the risks and drawbacks of using DLS 21 8.30 APK, such as being illegal and unsafe, banned or blocked, unfair and unethical. Therefore, you should use DLS 21 8.30 APK at your own risk and discretion, and only if you are comfortable with the consequences. We hope this article has helped you understand what DLS 21 8.30 APK is, how to download and install it, and what are the pros and cons of using it. If you have any questions or feedback, feel free to leave a comment below.

      -

      FAQs

      -

      Here are some of the frequently asked questions about DLS 21 8.30 APK:

      -
        -
      1. Is DLS 21 8.30 APK free?
      2. -

        Yes, DLS 21 8.30 APK is free to download and install on your device. However, you may need to pay for some in-app purchases or subscriptions in the game, depending on your preferences.

        -
      3. Is DLS 21 8.30 APK safe?
      4. -

        No, DLS 21 8.30 APK is not safe to use on your device. It is a modified version of the game that has not been verified or tested by any trusted source, and it may contain viruses, malware, or spyware that can harm your device or steal your personal information.

        -
      5. Is DLS 21 8.30 APK legal?
      6. -

        No, DLS 21 8.30 APK is not legal to use on your device. It is a modified version of the game that violates the terms and conditions of First Touch Games, the developer of the game. It may also infringe on the intellectual property rights of the game's licensors and partners.

        -
      7. Can I play DLS 21 online with DLS 21 8.30 APK?
      8. -

        Yes, you can play DLS 21 online with DLS 21 8.30 APK, but you may face some problems or issues while doing so. You may not be able to connect to the game's servers, or you may experience lag, disconnects, or crashes. You may also be detected by the game's security system, and you may face a ban or a block from playing the game online or accessing your account.

        -
      9. Can I update DLS 21 with DLS 21 8.30 APK?
      10. -

        No, you cannot update DLS 21 with DLS 21 8.30 APK. If you try to do so, you may lose all your progress and data in the game, or you may encounter errors, glitches, or crashes. You may also lose access to the features and benefits of DLS 21 8.30 APK, and you may have to reinstall the original version of the game.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Dinero infinito en They are coming descubre el hack apk ms efectivo.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Dinero infinito en They are coming descubre el hack apk ms efectivo.md deleted file mode 100644 index 26b22130c9c3edaa8c7a37d1ad353b5366a67d35..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Dinero infinito en They are coming descubre el hack apk ms efectivo.md +++ /dev/null @@ -1,101 +0,0 @@ -
      -

      Descargar They Are Coming Hack Apk Dinero Infinito: How to Get Unlimited Money in This Zombie Game

      -

      If you are a fan of zombie games, you may have heard of They Are Coming, a roguelike, endless, bloody, and hardcore zombie game that challenges you to survive as long as possible against hordes of undead. In this game, you can use various firearms, melee weapons, and traps to shoot and defend yourself from the zombies, but you need money to buy them. Money is not easy to come by in this game, especially when the zombies keep coming more and more every day. That's why some players resort to hacking the game to get unlimited money. In this article, we will show you how to descargar they are coming hack apk dinero infinito, or download they are coming hack apk unlimited money, for your Android device.

      -

      descargar they are coming hack apk dinero infinito


      Downloadhttps://gohhs.com/2uPor9



      -

      What You Need to Hack They Are Coming

      -

      Before we get into the details of how to download and install they are coming hack apk, you need to prepare some things first. Here are what you need to hack they are coming:

      -

      A Rooted Android Device

      -

      A rooted Android device is one that has been modified to give you full access and control over its system. This is necessary because the app that we will use to hack they are coming, called Game Guardian, only works on rooted devices. If your Android device is not rooted yet, you can search online for how to root it using various methods and tools.

      -

      Game Guardian App

      -

      Game Guardian is a powerful app that allows you to modify the values of any game or app on your Android device. You can use it to change your money, health, lives, score, or anything else in they are coming. Game Guardian is not available on the Google Play Store, so you will need to download it from its official website. We will show you how to do that later.

      -

      They Are Coming Game

      -

      Of course, you need to have they are coming game installed on your Android device. You can download it for free from the Google Play Store or from its official website. They are coming is a fun and addictive game that has received many positive reviews from players. It has custom playground game mode, zombie shooting, zombie defense, firearms, melee weapons, and traps, and many other features that make it a great zombie game.

      -

      A File Manager App

      -

      A file manager app is an app that allows you to browse and manage the files and folders on your Android device. You will need it to download and install they are coming hack apk file. You can use any file manager app that you like, such as ES File Explorer, File Manager, or Files by Google. You can download them for free from the Google Play Store.

      -

      How to Download and Install They Are Coming Hack Apk

      -

      Now that you have everything you need, let's get into the steps of how to download and install they are coming hack apk. Follow these instructions carefully:

      -

      descargar they are coming hack apk dinero infinito mediafire
      -descargar they are coming hack apk dinero infinito android
      -descargar they are coming hack apk dinero infinito ultima version
      -descargar they are coming hack apk dinero infinito mega
      -descargar they are coming hack apk dinero infinito sin root
      -descargar they are coming hack apk dinero infinito 2023
      -descargar they are coming hack apk dinero infinito gratis
      -descargar they are coming hack apk dinero infinito mod menu
      -descargar they are coming hack apk dinero infinito tutorial
      -descargar they are coming hack apk dinero infinito gameplay
      -descargar they are coming hack apk dinero infinito no ads
      -descargar they are coming hack apk dinero infinito full
      -descargar they are coming hack apk dinero infinito online
      -descargar they are coming hack apk dinero infinito offline
      -descargar they are coming hack apk dinero infinito español
      -descargar they are coming hack apk dinero infinito trucos
      -descargar they are coming hack apk dinero infinito facil
      -descargar they are coming hack apk dinero infinito rapido
      -descargar they are coming hack apk dinero infinito seguro
      -descargar they are coming hack apk dinero infinito original
      -descargar they are coming hack apk dinero infinito actualizado
      -descargar they are coming hack apk dinero infinito nuevo
      -descargar they are coming hack apk dinero infinito divertido
      -descargar they are coming hack apk dinero infinito zombies
      -descargar they are coming hack apk dinero infinito armas
      -descargar they are coming hack apk dinero infinito personajes
      -descargar they are coming hack apk dinero infinito niveles
      -descargar they are coming hack apk dinero infinito misiones
      -descargar they are coming hack apk dinero infinito desafios
      -descargar they are coming hack apk dinero infinito recompensas
      -descargar they are coming hack apk dinero infinito skins
      -descargar they are coming hack apk dinero infinito vehiculos
      -descargar they are coming hack apk dinero infinito mapas
      -descargar they are coming hack apk dinero infinito graficos
      -descargar they are coming hack apk dinero infinito sonidos
      -descargar they are coming hack apk dinero infinito efectos
      -descargar they are coming hack apk dinero infinito opciones
      -descargar they are coming hack apk dinero infinito configuracion
      -descargar they are coming hack apk dinero infinito opiniones
      -descargar they are coming hack apk dinero infinito comentarios

      -

      Go to the YouTube Video Link

      -

      The first thing you need to do is to go to the YouTube video link that shows you how to hack they are coming. The video is made by a user named Android Game Mods, who has uploaded many other videos on how to hack various games. The video is about 10 minutes long and has over 100,000 views. Watch the video carefully and pay attention to the instructions and the links that are provided in the description.

      -

      Download the Hack Apk File from Mediafire

      -

      The next thing you need to do is to download the hack apk file from Mediafire, which is one of the links that are given in the video description. Mediafire is a file hosting service that allows you to upload and download files for free. The hack apk file is about 50 MB in size and has been scanned for viruses and malware. To download it, you need to click on the green download button and wait for a few seconds until the download starts. You may need to verify that you are not a robot by completing a captcha or a survey.

      -

      Enable Unknown Sources in Your Settings

      -

      The next thing you need to do is to enable unknown sources in your settings. This is because the hack apk file is not from the Google Play Store, so your Android device may block its installation by default. To enable unknown sources, you need to go to your settings, then security, then toggle on the option that says unknown sources or allow installation of apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.

      -

      Install the Hack Apk File Using Your File Manager App

      -

      The final thing you need to do is to install the hack apk file using your file manager app. To do this, you need to open your file manager app and locate the hack apk file that you downloaded from Mediafire. It should be in your downloads folder or wherever you saved it. Then, you need to tap on the hack apk file and follow the prompts to install it on your Android device. It may take a few seconds or minutes depending on your device speed.

      -

      How to Use They Are Coming Hack Apk

      -

      Congratulations! You have successfully downloaded and installed they are coming hack apk on your Android device. Now, let's see how to use it to get unlimited money in they are coming game. Follow these steps:

      -

      Open Game Guardian and Select They Are Coming as the Target Process

      -

      The first thing you need to do is to open Game Guardian app that you downloaded from its official website. Game Guardian is an app that allows you to modify any game or app on your Android device. When you open Game Guardian, it will ask you to select a process or an app that you want to hack. You need to select they are coming as the target process or app. You can do this by tapping on the icon of they are coming or by searching for its name in the list of processes.

      -

      Search for the Value of Your Money in Game Guardian

      -

      The next thing you need to do is to search for the value of your money in Game Guardian. Money is the currency that you use in they are coming game to buy weapons, items, and traps. You can see how much money you have in the top right corner of your screen when you play they are coming game. To search for the value of your money in Game Guardian, you need to tap on the search icon (the magnifying glass) and enter the exact amount of money that you have in they are coming game. For example, if you have 1000 money, enter 1000 in Game Guardian. Then, tap on search and select DWORD as the value type.

      -

      Change the Value of Your Money to Whatever You Want in Game Guardian

      The final thing you need to do is to change the value of your money to whatever you want in Game Guardian. You can do this by tapping on the value that you found in the previous step and entering a new value that you want. For example, if you want to have 999999 money, enter 999999 in Game Guardian. Then, tap on yes and go back to they are coming game. You will see that your money has changed to the new value that you entered in Game Guardian. You can now buy any weapons, items, and traps that you want in they are coming game.

      -

      Pros and Cons of Using They Are Coming Hack Apk

      -

      Using they are coming hack apk may seem like a great way to enjoy they are coming game without any limitations, but it also has some drawbacks that you should be aware of. Here are some of the pros and cons of using they are coming hack apk:

      -

      Pros:

      -
        -
      • You can buy any weapons, items, and traps that you want in they are coming game. This will allow you to experiment with different combinations and strategies to survive and kill the zombies.
      • -
      • You can survive longer and kill more zombies in they are coming game. This will increase your score and rank in the leaderboard and make you feel more accomplished and satisfied.
      • -
      • You can have more fun and challenge yourself with different settings in they are coming game. You can change the difficulty, the number of zombies, the weather, the time of day, and other factors that affect your gameplay.
      • -
      -

      Cons:

      -
        -
      • You may lose the thrill and satisfaction of playing they are coming game legitimately. You may feel bored or guilty for cheating and not earning your money and achievements honestly.
      • -
      • You may get banned or prosecuted for cheating in they are coming game. The developers of they are coming game may detect your hack apk and ban your account or take legal action against you for violating their terms of service.
      • -
      • You may encounter bugs or glitches that ruin your game experience. The hack apk may not be compatible with the latest version of they are coming game or your Android device. It may cause crashes, errors, or other problems that prevent you from playing they are coming game smoothly.
      • -
      -

      Conclusion

      -

      In conclusion, we have shown you how to descargar they are coming hack apk dinero infinito, or download they are coming hack apk unlimited money, for your Android device. We have also discussed some of the pros and cons of using they are coming hack apk. We hope that this article has been helpful and informative for you. If you decide to use they are coming hack apk, please do so at your own risk and responsibility. We do not endorse or support any form of cheating or hacking in games. We also recommend that you support the developers of they are coming game by playing it fairly and legally. They are coming is a great zombie game that deserves your respect and appreciation.

      -

      FAQs

      -

      Here are some of the frequently asked questions about they are coming hack apk:

      -

      Q: Is they are coming hack apk safe to use?

      -

      A: They are coming hack apk is not safe to use because it may contain viruses or malware that can harm your Android device or steal your personal information. It may also get you banned or prosecuted for cheating in they are coming game.

      -

      Q: Is they are coming hack apk free to download?

      -

      A: They are coming hack apk is free to download from Mediafire, but it may require you to complete a captcha or a survey before you can access the download link. It may also redirect you to other websites that may contain ads or scams.

      -

      Q: Can I use they are coming hack apk on iOS devices?

      -

      A: No, they are coming hack apk only works on Android devices that have been rooted. It does not work on iOS devices because they have a different operating system and security system.

      -

      Q: Can I use they are coming hack apk offline?

      -

      A: Yes, you can use they are coming hack apk offline as long as you have downloaded and installed it on your Android device. However, you may not be able to access some features or updates that require an internet connection.

      -

      Q: Can I use they are coming hack apk with other mods or hacks?

      -

      A: No, we do not recommend that you use they are coming hack apk with other mods or hacks because they may cause conflicts or errors that can damage your Android device or your game data.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fffiloni/Video-Matting-Anything/segment-anything/linter.sh b/spaces/fffiloni/Video-Matting-Anything/segment-anything/linter.sh deleted file mode 100644 index df2e17436d30e89ff1728109301599f425f1ad6b..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/segment-anything/linter.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. - -{ - black --version | grep -E "23\." > /dev/null -} || { - echo "Linter requires 'black==23.*' !" - exit 1 -} - -ISORT_VERSION=$(isort --version-number) -if [[ "$ISORT_VERSION" != 5.12* ]]; then - echo "Linter requires isort==5.12.0 !" - exit 1 -fi - -echo "Running isort ..." -isort . --atomic - -echo "Running black ..." -black -l 100 . - -echo "Running flake8 ..." -if [ -x "$(command -v flake8)" ]; then - flake8 . -else - python3 -m flake8 . -fi - -echo "Running mypy..." - -mypy --exclude 'setup.py|notebooks' . diff --git a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/audio/stft.py b/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/audio/stft.py deleted file mode 100644 index 2aa1ac89277734a6676c20a81bf88e21e8ca7aa9..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/audio/stft.py +++ /dev/null @@ -1,180 +0,0 @@ -import torch -import torch.nn.functional as F -import numpy as np -from scipy.signal import get_window -from librosa.util import pad_center, tiny -from librosa.filters import mel as librosa_mel_fn - -from audioldm.audio.audio_processing import ( - dynamic_range_compression, - dynamic_range_decompression, - window_sumsquare, -) - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - - def __init__(self, filter_length, hop_length, win_length, window="hann"): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) - - if window is not None: - assert filter_length >= win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode="reflect", - ) - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, - torch.autograd.Variable(self.forward_basis, requires_grad=False), - stride=self.hop_length, - padding=0, - ).cpu() - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - - magnitude = torch.sqrt(real_part**2 + imag_part**2) - phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data)) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - torch.autograd.Variable(self.inverse_basis, requires_grad=False), - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.autograd.Variable( - torch.from_numpy(window_sum), requires_grad=False - ) - window_sum = window_sum - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] - inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :] - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction - - -class TacotronSTFT(torch.nn.Module): - def __init__( - self, - filter_length, - hop_length, - win_length, - n_mel_channels, - sampling_rate, - mel_fmin, - mel_fmax, - ): - super(TacotronSTFT, self).__init__() - self.n_mel_channels = n_mel_channels - self.sampling_rate = sampling_rate - self.stft_fn = STFT(filter_length, hop_length, win_length) - mel_basis = librosa_mel_fn( - sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - - def spectral_normalize(self, magnitudes, normalize_fun): - output = dynamic_range_compression(magnitudes, normalize_fun) - return output - - def spectral_de_normalize(self, magnitudes): - output = dynamic_range_decompression(magnitudes) - return output - - def mel_spectrogram(self, y, normalize_fun=torch.log): - """Computes mel-spectrograms from a batch of waves - PARAMS - ------ - y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] - - RETURNS - ------- - mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) - """ - assert torch.min(y.data) >= -1, torch.min(y.data) - assert torch.max(y.data) <= 1, torch.max(y.data) - - magnitudes, phases = self.stft_fn.transform(y) - magnitudes = magnitudes.data - mel_output = torch.matmul(self.mel_basis, magnitudes) - mel_output = self.spectral_normalize(mel_output, normalize_fun) - energy = torch.norm(magnitudes, dim=1) - - log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun) - - return mel_output, log_magnitudes, energy diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/undef.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/undef.js deleted file mode 100644 index e3f4961229c249a1d4f242d415b5193fc0ad5056..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/undef.js +++ /dev/null @@ -1,12 +0,0 @@ -var test = require('tape'); -var inspect = require('../'); - -var obj = { a: 1, b: [3, 4, undefined, null], c: undefined, d: null }; - -test('undef and null', function (t) { - t.plan(1); - t.equal( - inspect(obj), - '{ a: 1, b: [ 3, 4, undefined, null ], c: undefined, d: null }' - ); -}); diff --git a/spaces/fffiloni/sd-xl-custom-model/README.md b/spaces/fffiloni/sd-xl-custom-model/README.md deleted file mode 100644 index ababbb27fe86fb28649f28f86908714b034a5ef3..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/sd-xl-custom-model/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Check my SD-XL Custom Model -emoji: 🚀 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/freddyaboulton/dracula_revamped/README.md b/spaces/freddyaboulton/dracula_revamped/README.md deleted file mode 100644 index 14f79ef1077b47d5a298b540ec877518bd09db53..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/dracula_revamped/README.md +++ /dev/null @@ -1,17 +0,0 @@ - ---- -tags: [gradio-theme] -title: dracula_revamped -colorFrom: orange -colorTo: purple -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- -# dracula_revamped -## Description -Add a description of this theme here! -## Contributions -Thanks to [@freddyaboulton](https://huggingface.co/freddyaboulton) for adding this gradio theme! diff --git a/spaces/futureagi/CheckGPT/README.md b/spaces/futureagi/CheckGPT/README.md deleted file mode 100644 index 82bbfdb2104849f0e3703405f7a4942c3d59cd8c..0000000000000000000000000000000000000000 --- a/spaces/futureagi/CheckGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CheckGPT -emoji: 🏃 -colorFrom: indigo -colorTo: indigo -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gaochangyun/bert-base-chinese/app.py b/spaces/gaochangyun/bert-base-chinese/app.py deleted file mode 100644 index 3d9f36ff130c71e3850b7c5662c5967b767ef0ab..0000000000000000000000000000000000000000 --- a/spaces/gaochangyun/bert-base-chinese/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/bert-base-chinese").launch() \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Aplikasi Sistem Informasi Kepegawaian [VERIFIED] Download Full.md b/spaces/gotiQspiryo/whisper-ui/examples/Aplikasi Sistem Informasi Kepegawaian [VERIFIED] Download Full.md deleted file mode 100644 index 6fdd58ce003f3f29d890a6b5ae80ac8e937ec656..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Aplikasi Sistem Informasi Kepegawaian [VERIFIED] Download Full.md +++ /dev/null @@ -1,103 +0,0 @@ - -

      Aplikasi Sistem Informasi Kepegawaian Download Full: Apa Itu dan Bagaimana Cara Menggunakannya?

      - -

      Aplikasi sistem informasi kepegawaian adalah sebuah program yang digunakan untuk mengelola data dan informasi yang berkaitan dengan kepegawaian di suatu instansi atau organisasi. Aplikasi ini biasanya berbasis web, sehingga dapat diakses melalui browser internet dengan mudah dan cepat. Aplikasi sistem informasi kepegawaian memiliki berbagai fungsi dan manfaat, antara lain:

      - -
        -
      • Memudahkan pendataan pegawai, seperti biodata, riwayat pendidikan, riwayat jabatan, riwayat pangkat, riwayat gaji, dan lain-lain.
      • -
      • Memudahkan pengajuan administrasi kepegawaian, seperti cuti, perjalanan dinas, mutasi, pemberhentian, dan lain-lain.
      • -
      • Memudahkan perencanaan kepegawaian, seperti analisis kebutuhan pegawai, rekrutmen pegawai, penilaian kinerja pegawai, pengembangan karir pegawai, dan lain-lain.
      • -
      • Memudahkan pengawasan dan pengendalian kepegawaian, seperti presensi pegawai, disiplin pegawai, sanksi pegawai, reward pegawai, dan lain-lain.
      • -
      • Memudahkan pelaporan dan evaluasi kepegawaian, seperti laporan daftar urut kepangkatan (DUK), laporan kenaikan gaji berkala (KGB), laporan angka kredit pegawai (AKP), laporan statistik pegawai, dan lain-lain.
      • -
      - -

      Aplikasi sistem informasi kepegawaian dapat membantu instansi atau organisasi untuk meningkatkan efektivitas dan efisiensi dalam pengelolaan sumber daya manusia (SDM). Aplikasi ini juga dapat meningkatkan transparansi dan akuntabilitas dalam penyelenggaraan pelayanan publik. Aplikasi ini juga dapat meningkatkan kualitas dan profesionalisme pegawai dalam menjalankan tugas dan tanggung jawabnya.

      -

      aplikasi sistem informasi kepegawaian download full


      Downloadhttps://urlgoal.com/2uyNiV



      - -

      Bagaimana Cara Download Full Aplikasi Sistem Informasi Kepegawaian?

      - -

      Untuk dapat menggunakan aplikasi sistem informasi kepegawaian, Anda perlu melakukan beberapa langkah berikut:

      - -
        -
      1. Mencari aplikasi sistem informasi kepegawaian yang sesuai dengan kebutuhan dan spesifikasi instansi atau organisasi Anda. Anda dapat mencari aplikasi ini melalui mesin pencari internet dengan menggunakan kata kunci "aplikasi sistem informasi kepegawaian download full". Anda akan menemukan berbagai situs yang menawarkan aplikasi ini secara gratis atau berbayar.
      2. -
      3. Memilih aplikasi sistem informasi kepegawaian yang terbaik dan terpercaya. Anda dapat membandingkan fitur-fitur, tampilan, kemudahan penggunaan, dukungan teknis, testimoni pengguna, dan harga dari aplikasi-aplikasi yang tersedia. Anda juga dapat membaca ulasan atau review dari aplikasi-aplikasi tersebut dari sumber-sumber yang kredibel.
      4. -
      5. Mendownload aplikasi sistem informasi kepegawaian yang telah Anda pilih. Anda perlu mengikuti petunjuk atau panduan yang diberikan oleh situs penyedia aplikasi tersebut. Anda juga perlu memastikan bahwa aplikasi tersebut aman dari virus atau malware yang dapat merusak komputer Anda.
      6. -
      7. Menginstal aplikasi sistem informasi kepegawaian yang telah Anda download. Anda perlu mengikuti proses instalasi yang diberikan oleh aplikasi tersebut. Anda juga perlu memastikan bahwa aplikasi tersebut kompatibel dengan sistem operasi dan perangkat keras komputer Anda.
      8. -
      9. Mengkonfigurasi aplikasi sistem informasi kepegawaian sesuai dengan kebutuhan dan preferensi Anda. Anda perlu melakukan pengaturan awal seperti nama instansi atau organisasi, jumlah pegawai, struktur organisasi, hak akses pengguna, database pegawai, dan lain-lain. Anda juga perlu melakukan backup data secara berkala untuk mencegah kehilangan data akibat kerusakan atau kesalahan.
      10. -
      - -

      Setelah melakukan langkah-langkah di atas, Anda sudah dapat menggunakan aplikasi sistem informasi kepegawaian dengan lancar dan optimal. Anda dapat memasukkan data dan informasi pegawai secara mudah dan cepat. Anda juga dapat mengakses data dan informasi pegawai secara lengkap dan akurat. Anda juga dapat melakukan berbagai fungsi dan manfaat dari aplikasi sistem informasi kepegawaian secara efektif dan efisien.

      - -

      Kesimpulan

      - -

      Aplikasi sistem informasi kepegawaian adalah sebuah program yang digunakan untuk mengelola data dan informasi yang berkaitan dengan kepegawaian di suatu instansi atau organisasi. Aplikasi ini memiliki berbagai fungsi dan manfaat yang dapat membantu instansi atau organisasi untuk meningkatkan efektivitas dan efisiensi dalam pengelolaan sumber daya manusia (SDM). Untuk dapat menggunakan aplikasi ini, Anda perlu melakukan beberapa langkah seperti mencari, memilih, mendownload, menginstal, dan mengkonfigurasi aplikasi tersebut sesuai dengan kebutuhan dan spesifikasi Anda. Dengan menggunakan aplikasi ini, Anda dapat memperoleh data dan informasi pegawai yang lengkap dan akurat serta melakukan berbagai fungsi dan manfaat dari aplikasinya secara efektif dan efisien.

      -

      Apa Saja Contoh Aplikasi Sistem Informasi Kepegawaian yang Bisa Anda Download Full?

      - -

      Di internet, Anda dapat menemukan berbagai contoh aplikasi sistem informasi kepegawaian yang bisa Anda download full secara gratis atau berbayar. Berikut ini adalah beberapa contoh aplikasi sistem informasi kepegawaian yang populer dan banyak digunakan oleh instansi atau organisasi:

      - -
        -
      • SIMPEGNAS: Aplikasi sistem informasi kepegawaian nasional yang dikembangkan oleh Badan Kepegawaian Negara (BKN) untuk mengelola data dan informasi pegawai aparatur sipil negara (ASN) secara terintegrasi dan berbagi pakai.
      • -
      • Talenta: Aplikasi sistem informasi kepegawaian online yang dikembangkan oleh Mekari untuk mengelola data dan informasi pegawai swasta secara komprehensif dan berbasis cloud.
      • -
      • gtHR: Aplikasi sistem informasi kepegawaian yang dikembangkan oleh Gamatechno untuk mengelola data dan informasi pegawai pemerintah maupun swasta secara mudah dan fleksibel.
      • -
      • Excel: Aplikasi pengolah angka yang dikembangkan oleh Microsoft untuk mengelola data dan informasi pegawai secara sederhana dan praktis dengan menggunakan spreadsheet.
      • -
      - -

      Anda dapat memilih salah satu contoh aplikasi sistem informasi kepegawaian di atas sesuai dengan kebutuhan dan spesifikasi instansi atau organisasi Anda. Anda juga dapat mencari contoh aplikasi sistem informasi kepegawaian lainnya yang sesuai dengan preferensi Anda.

      - -

      Bagaimana Cara Mengoptimalkan Penggunaan Aplikasi Sistem Informasi Kepegawaian?

      - -

      Setelah Anda berhasil download full aplikasi sistem informasi kepegawaian yang Anda inginkan, Anda perlu mengoptimalkan penggunaannya agar dapat memberikan manfaat maksimal bagi instansi atau organisasi Anda. Berikut ini adalah beberapa tips yang dapat Anda lakukan untuk mengoptimalkan penggunaan aplikasi sistem informasi kepegawaian:

      -

      - -
        -
      1. Lakukan pelatihan dan sosialisasi kepada pegawai dan pengguna aplikasi sistem informasi kepegawaian agar dapat memahami dan menggunakan aplikasinya dengan baik dan benar.
      2. -
      3. Lakukan pembaruan dan perbaikan secara berkala pada aplikasi sistem informasi kepegawaian agar dapat berfungsi dengan optimal dan sesuai dengan perkembangan kebutuhan dan regulasi.
      4. -
      5. Lakukan koordinasi dan komunikasi dengan penyedia aplikasi sistem informasi kepegawaian agar dapat mendapatkan dukungan teknis dan solusi apabila terjadi masalah atau kendala dalam penggunaan aplikasinya.
      6. -
      7. Lakukan evaluasi dan monitoring secara rutin pada aplikasi sistem informasi kepegawaian agar dapat mengetahui kinerja dan dampaknya terhadap pengelolaan sumber daya manusia di instansi atau organisasi Anda.
      8. -
      - -

      Dengan melakukan tips di atas, Anda dapat mengoptimalkan penggunaan aplikasi sistem informasi kepegawaian yang telah Anda download full. Anda juga dapat meningkatkan kualitas dan profesionalisme dalam pengelolaan sumber daya manusia di instansi atau organisasi Anda.

      - - ---> ServiceClient failure for DeepLeo[/ERROR] -

      Apa Saja Kelebihan dan Kekurangan Aplikasi Sistem Informasi Kepegawaian?

      - -

      Aplikasi sistem informasi kepegawaian memiliki berbagai kelebihan dan kekurangan yang perlu Anda ketahui sebelum Anda memutuskan untuk download full dan menggunakannya. Berikut ini adalah beberapa kelebihan dan kekurangan aplikasi sistem informasi kepegawaian:

      - -

      Kelebihan Aplikasi Sistem Informasi Kepegawaian

      - -
        -
      • Mempercepat dan mempermudah proses pengelolaan data dan informasi pegawai secara akurat dan terintegrasi.
      • -
      • Meminimalisir kesalahan dan ketidaksesuaian data dan informasi pegawai yang dapat berdampak pada kinerja dan kesejahteraan pegawai.
      • -
      • Meningkatkan kualitas dan profesionalisme pegawai dalam menjalankan tugas dan tanggung jawabnya sesuai dengan standar kompetensi yang ditetapkan.
      • -
      • Meningkatkan transparansi dan akuntabilitas dalam penyelenggaraan pelayanan publik yang berkaitan dengan kepegawaian.
      • -
      • Meningkatkan efektivitas dan efisiensi dalam penggunaan sumber daya manusia (SDM) sesuai dengan kebutuhan dan tujuan instansi atau organisasi.
      • -
      - -

      Kekurangan Aplikasi Sistem Informasi Kepegawaian

      - -
        -
      • Memerlukan biaya investasi awal yang cukup besar untuk membeli, menginstal, dan mengoperasikan aplikasi sistem informasi kepegawaian.
      • -
      • Memerlukan sumber daya manusia (SDM) yang memiliki kompetensi dan keterampilan dalam bidang teknologi informasi untuk mengelola dan memelihara aplikasi sistem informasi kepegawaian.
      • -
      • Memerlukan koneksi internet yang stabil dan aman untuk mengakses aplikasi sistem informasi kepegawaian yang berbasis web atau cloud.
      • -
      • Berisiko terjadinya kebocoran data dan informasi pegawai yang bersifat rahasia atau sensitif akibat serangan hacker atau malware.
      • -
      • Berisiko terjadinya ketidaksesuaian antara aplikasi sistem informasi kepegawaian dengan regulasi atau peraturan yang berlaku di instansi atau organisasi.
      • -
      - -

      Anda dapat mempertimbangkan kelebihan dan kekurangan aplikasi sistem informasi kepegawaian di atas sebelum Anda memutuskan untuk download full dan menggunakannya. Anda juga dapat mencari solusi atau alternatif untuk mengatasi kekurangan aplikasi sistem informasi kepegawaian yang ada.

      - -

      Apa Saja Tips Memilih Aplikasi Sistem Informasi Kepegawaian yang Tepat?

      - -

      Untuk dapat mendapatkan manfaat maksimal dari aplikasi sistem informasi kepegawaian, Anda perlu memilih aplikasi sistem informasi kepegawaian yang tepat sesuai dengan kebutuhan dan spesifikasi instansi atau organisasi Anda. Berikut ini adalah beberapa tips yang dapat Anda lakukan untuk memilih aplikasi sistem informasi kepegawaian yang tepat:

      - -
        -
      1. Tentukan tujuan dan sasaran Anda dalam menggunakan aplikasi sistem informasi kepegawaian. Apakah Anda ingin meningkatkan kinerja pegawai, meningkatkan pelayanan publik, meningkatkan kesejahteraan pegawai, atau tujuan lainnya?
      2. -
      3. Tentukan fitur-fitur yang Anda butuhkan dalam aplikasi sistem informasi kepegawaian. Apakah Anda membutuhkan fitur pendataan pegawai, pengajuan administrasi kepegawaian, perencanaan kepegawaian, pengawasan dan pengendalian kepegawaian, pelaporan dan evaluasi kepegawaian, atau fitur lainnya?
      4. -
      5. Tentukan spesifikasi teknis yang Anda miliki dalam menggunakan aplikasi sistem informasi kepegawaian. Apakah Anda memiliki perangkat keras komputer yang cukup, koneksi internet yang stabil, sumber daya manusia (SDM) yang kompeten, anggaran yang memadai, atau spesifikasi lainnya?
      6. -
      7. Tentukan jenis aplikasi sistem informasi kepegawaian yang sesuai dengan preferensi Anda. Apakah Anda lebih suka menggunakan aplikasi sistem informasi kepegawaian berbasis web atau cloud, berbasis desktop atau offline, berbasis Excel atau spreadsheet, atau jenis lainnya?
      8. -
      9. Lakukan riset pasar untuk mencari tahu berbagai pilihan aplikasi sistem informasi kepegawaian yang tersedia di internet. Anda dapat menggunakan mesin pencari internet dengan menggunakan kata kunci "aplikasi sistem informasi kepegawaian download full". Anda juga dapat mencari referensi dari rekan-rekan atau sumber-sumber lainnya yang sudah menggunakan aplikasinya.
      10. -
      11. Lakukan perbandingan antara berbagai pilihan aplikas -

        Kesimpulan

        - -

        Aplikasi sistem informasi kepegawaian adalah sebuah program yang digunakan untuk mengelola data dan informasi yang berkaitan dengan kepegawaian di suatu instansi atau organisasi. Aplikasi ini memiliki berbagai fungsi dan manfaat yang dapat membantu instansi atau organisasi untuk meningkatkan efektivitas dan efisiensi dalam pengelolaan sumber daya manusia (SDM). Untuk dapat menggunakan aplikasi ini, Anda perlu melakukan beberapa langkah seperti mencari, memilih, mendownload, menginstal, dan mengkonfigurasi aplikasi tersebut sesuai dengan kebutuhan dan spesifikasi Anda. Anda juga perlu mengoptimalkan penggunaannya agar dapat memberikan manfaat maksimal bagi instansi atau organisasi Anda. Anda juga perlu mempertimbangkan kelebihan dan kekurangan aplikasi ini sebelum Anda memutuskan untuk download full dan menggunakannya. Anda juga perlu memilih aplikasi yang tepat sesuai dengan tujuan, fitur, spesifikasi, jenis, dan harga yang Anda inginkan. Dengan menggunakan aplikasi sistem informasi kepegawaian yang tepat, Anda dapat memperoleh data dan informasi pegawai yang lengkap dan akurat serta melakukan berbagai fungsi dan manfaat dari aplikasinya secara efektif dan efisien.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/ComPort For Android 2.1 Delphi C Builders XE7 ? 10.2 2021.md b/spaces/gotiQspiryo/whisper-ui/examples/ComPort For Android 2.1 Delphi C Builders XE7 ? 10.2 2021.md deleted file mode 100644 index 2dfefc201c879c7991170db0546f342c06d20451..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/ComPort For Android 2.1 Delphi C Builders XE7 ? 10.2 2021.md +++ /dev/null @@ -1,6 +0,0 @@ -

        ComPort for Android 2.1 Delphi C Builders XE7 – 10.2


        Download Ziphttps://urlgoal.com/2uyMf6



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Manuel De Rechargement Gheerbrant Pdf Free.md b/spaces/gotiQspiryo/whisper-ui/examples/Manuel De Rechargement Gheerbrant Pdf Free.md deleted file mode 100644 index 526deb11058ebd8d19cfdc8c4094504db6275a4c..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Manuel De Rechargement Gheerbrant Pdf Free.md +++ /dev/null @@ -1,20 +0,0 @@ - -

        Téléchargez le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit

        -

        Si vous êtes un chasseur ou un tireur sportif, vous savez que le rechargement de vos cartouches est une activité passionnante et économique. Mais vous savez aussi que le rechargement nécessite des connaissances techniques et des précautions de sécurité. C'est pourquoi vous avez besoin d'un guide pratique du rechargement qui vous accompagne pas à pas dans cette discipline.

        -

        manuel de rechargement gheerbrant pdf free


        DOWNLOADhttps://urlgoal.com/2uyN37



        -

        Le guide pratique du rechargement de Alain F. Gheerbrant est un ouvrage de référence qui vous explique tout ce qu'il faut savoir sur le rechargement des cartouches à percussion centrale, des balles et des tables de rechargement. Il vous présente les différents composants, les outils, les méthodes et les règles de sécurité à respecter. Il vous propose également plus de 200 cartouches et 2500 balles avec leurs tables de rechargement sur DVD-ROM compatible Mac et PC.

        -

        Ce guide est un véritable trésor d'informations pour tous les amateurs de rechargement. Il vous permettra de réaliser vos propres cartouches en fonction de vos besoins et de vos préférences. Il vous aidera à optimiser vos performances et à profiter pleinement de votre passion.

        -

        Vous pouvez télécharger le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit en cliquant sur le lien ci-dessous. Vous accéderez ainsi à une version numérique du livre que vous pourrez consulter sur votre ordinateur, votre tablette ou votre smartphone. Vous pourrez également imprimer les pages qui vous intéressent ou les enregistrer sur un support externe.

        -

        N'attendez plus et profitez de cette offre exceptionnelle pour télécharger le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit. Vous ne le regretterez pas !

        -

        -Cliquez ici pour télécharger le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit - -

        Le rechargement des cartouches est une activité qui présente de nombreux avantages. Tout d'abord, il vous permet de réaliser des économies en réutilisant vos étuis et en achetant vos composants en vrac. Ensuite, il vous offre la possibilité de personnaliser vos cartouches en fonction de votre arme, de votre distance de tir, de votre type de cible et de vos conditions climatiques. Enfin, il vous procure une satisfaction personnelle en vous faisant découvrir les secrets de la balistique et en vous donnant le contrôle total sur votre munition.

        -

        Mais le rechargement des cartouches est aussi une activité qui demande du sérieux et de la rigueur. Il faut respecter scrupuleusement les données fournies par les fabricants de poudre et de projectiles, ainsi que les consignes de sécurité. Il faut également disposer d'un matériel adapté et d'un espace de travail propre et bien éclairé. Il faut enfin faire preuve de patience et de méthode pour obtenir des résultats fiables et réguliers.

        -

        C'est pourquoi le guide pratique du rechargement de Alain F. Gheerbrant est un outil indispensable pour tous les rechargeurs débutants ou expérimentés. Il vous apporte toutes les connaissances théoriques et pratiques dont vous avez besoin pour réussir vos rechargements. Il vous explique les principes de base du rechargement, les caractéristiques des différents calibres, les propriétés des poudres et des amorces, le choix des projectiles, le réglage des outils, le contrôle de la qualité et la résolution des problèmes. Il vous fournit également des tables de rechargement détaillées pour plus de 200 cartouches et 2500 balles, avec les pressions, les vitesses et les énergies correspondantes.

        -

        Le guide pratique du rechargement de Alain F. Gheerbrant est donc un livre complet et accessible qui vous accompagnera dans votre passion du rechargement. Il vous permettra de tirer le meilleur parti de votre arme et de vos cartouches, tout en respectant les normes de sécurité et de légalité. Il vous fera également découvrir l'histoire et l'évolution du rechargement à travers les siècles, ainsi que les innovations technologiques qui ont marqué ce domaine.

        -

        Ne perdez pas une minute et téléchargez dès maintenant le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit. Vous bénéficierez ainsi d'un accès illimité à ce livre numérique que vous pourrez consulter à tout moment sur votre ordinateur, votre tablette ou votre smartphone. Vous pourrez également imprimer les pages qui vous intéressent ou les enregistrer sur un support externe.

        -

        Cette offre est limitée dans le temps, alors n'hésitez pas à en profiter dès maintenant. Téléchargez le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit et rejoignez la communauté des rechargeurs passionnés !

        -Cliquez ici pour télécharger le guide pratique du rechargement de Alain F. Gheerbrant en PDF gratuit

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/gradio/interface_parallel_load/run.py b/spaces/gradio/interface_parallel_load/run.py deleted file mode 100644 index 5adb5b8322c1698fe6dfe6460902585827bf0647..0000000000000000000000000000000000000000 --- a/spaces/gradio/interface_parallel_load/run.py +++ /dev/null @@ -1,9 +0,0 @@ -import gradio as gr - -generator1 = gr.load("huggingface/gpt2") -generator2 = gr.load("huggingface/gpt2-xl") - -demo = gr.Parallel(generator1, generator2) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py b/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py deleted file mode 100644 index 55a31af7e146da7afeb964db018f14aca3134920..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom TensorFlow ops for efficient resampling of 2D images.""" - -import os -import numpy as np -import tensorflow as tf -from .. import custom_ops - -def _get_plugin(): - return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu') - -#---------------------------------------------------------------------------- - -def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'): - r"""Pad, upsample, FIR filter, and downsample a batch of 2D images. - - Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]` - and performs the following operations for each image, batched across - `majorDim` and `minorDim`: - - 1. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`). - - 2. Pad the image with zeros by the specified number of pixels on each side - (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value - corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the - image so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by throwing away pixels (`downx`, `downy`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard TensorFlow ops. It supports gradients of arbitrary order. - - Args: - x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`. - k: 2D FIR filter of the shape `[firH, firW]`. - upx: Integer upsampling factor along the X-axis (default: 1). - upy: Integer upsampling factor along the Y-axis (default: 1). - downx: Integer downsampling factor along the X-axis (default: 1). - downy: Integer downsampling factor along the Y-axis (default: 1). - padx0: Number of pixels to pad on the left side (default: 0). - padx1: Number of pixels to pad on the right side (default: 0). - pady0: Number of pixels to pad on the top side (default: 0). - pady1: Number of pixels to pad on the bottom side (default: 0). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`. - """ - - impl_dict = { - 'ref': _upfirdn_2d_ref, - 'cuda': _upfirdn_2d_cuda, - } - return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1) - -#---------------------------------------------------------------------------- - -def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1): - """Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops.""" - - x = tf.convert_to_tensor(x) - k = np.asarray(k, dtype=np.float32) - assert x.shape.rank == 4 - inH = x.shape[1].value - inW = x.shape[2].value - minorDim = _shape(x, 3) - kernelH, kernelW = k.shape - assert inW >= 1 and inH >= 1 - assert kernelW >= 1 and kernelH >= 1 - assert isinstance(upx, int) and isinstance(upy, int) - assert isinstance(downx, int) and isinstance(downy, int) - assert isinstance(padx0, int) and isinstance(padx1, int) - assert isinstance(pady0, int) and isinstance(pady1, int) - - # Upsample (insert zeros). - x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim]) - x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]]) - x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim]) - - # Pad (crop if negative). - x = tf.pad(x, [[0, 0], [max(pady0, 0), max(pady1, 0)], [max(padx0, 0), max(padx1, 0)], [0, 0]]) - x = x[:, max(-pady0, 0) : x.shape[1].value - max(-pady1, 0), max(-padx0, 0) : x.shape[2].value - max(-padx1, 0), :] - - # Convolve with filter. - x = tf.transpose(x, [0, 3, 1, 2]) - x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1]) - w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype) - x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW') - x = tf.reshape(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 + padx1 - kernelW + 1]) - x = tf.transpose(x, [0, 2, 3, 1]) - - # Downsample (throw away pixels). - return x[:, ::downy, ::downx, :] - -#---------------------------------------------------------------------------- - -def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1): - """Fast CUDA implementation of `upfirdn_2d()` using custom ops.""" - - x = tf.convert_to_tensor(x) - k = np.asarray(k, dtype=np.float32) - majorDim, inH, inW, minorDim = x.shape.as_list() - kernelH, kernelW = k.shape - assert inW >= 1 and inH >= 1 - assert kernelW >= 1 and kernelH >= 1 - assert isinstance(upx, int) and isinstance(upy, int) - assert isinstance(downx, int) and isinstance(downy, int) - assert isinstance(padx0, int) and isinstance(padx1, int) - assert isinstance(pady0, int) and isinstance(pady1, int) - - outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1 - outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1 - assert outW >= 1 and outH >= 1 - - cuda_op = _get_plugin().up_fir_dn2d - kc = tf.constant(k, dtype=x.dtype) - gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype) - gpadx0 = kernelW - padx0 - 1 - gpady0 = kernelH - pady0 - 1 - gpadx1 = inW * upx - outW * downx + padx0 - upx + 1 - gpady1 = inH * upy - outH * downy + pady0 - upy + 1 - - @tf.custom_gradient - def func(x): - y = cuda_op(x=x, k=kc, upx=int(upx), upy=int(upy), downx=int(downx), downy=int(downy), padx0=int(padx0), padx1=int(padx1), pady0=int(pady0), pady1=int(pady1)) - y.set_shape([majorDim, outH, outW, minorDim]) - @tf.custom_gradient - def grad(dy): - dx = cuda_op(x=dy, k=gkc, upx=int(downx), upy=int(downy), downx=int(upx), downy=int(upy), padx0=int(gpadx0), padx1=int(gpadx1), pady0=int(gpady0), pady1=int(gpady1)) - dx.set_shape([majorDim, inH, inW, minorDim]) - return dx, func - return y, grad - return func(x) - -#---------------------------------------------------------------------------- - -def filter_2d(x, k, gain=1, padding=0, data_format='NCHW', impl='cuda'): - r"""Filter a batch of 2D images with the given FIR filter. - - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` - and filters each image with the given filter. The filter is normalized so that - if the input pixels are constant, they will be scaled by the specified `gain`. - Pixels outside the image are assumed to be zero. - - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). - gain: Scaling factor for signal magnitude (default: 1.0). - padding: Number of pixels to pad or crop the output on each side (default: 0). - data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the same shape and datatype as `x`. - """ - - assert isinstance(padding, int) - k = _FilterKernel(k=k, gain=gain) - assert k.w == k.h - pad0 = k.w // 2 + padding - pad1 = (k.w - 1) // 2 + padding - return _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample_2d(x, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'): - r"""Upsample a batch of 2D images with the given filter. - - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` - and upsamples each image with the given filter. The filter is normalized so that - if the input pixels are constant, they will be scaled by the specified `gain`. - Pixels outside the image are assumed to be zero, and the filter is padded with - zeros so that its shape is a multiple of the upsampling factor. - - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). - The default is `[1] * factor`, which corresponds to nearest-neighbor - upsampling. - factor: Integer upsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - padding: Number of pixels to pad or crop the output on each side (default: 0). - data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the shape `[N, C, H * factor, W * factor]` or - `[N, H * factor, W * factor, C]`, and same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - assert isinstance(padding, int) - k = _FilterKernel(k if k is not None else [1] * factor, gain * (factor ** 2)) - assert k.w == k.h - pad0 = (k.w + factor - 1) // 2 + padding - pad1 = (k.w - factor) // 2 + padding - return _simple_upfirdn_2d(x, k, up=factor, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample_2d(x, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'): - r"""Downsample a batch of 2D images with the given filter. - - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` - and downsamples each image with the given filter. The filter is normalized so that - if the input pixels are constant, they will be scaled by the specified `gain`. - Pixels outside the image are assumed to be zero, and the filter is padded with - zeros so that its shape is a multiple of the downsampling factor. - - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). - The default is `[1] * factor`, which corresponds to average pooling. - factor: Integer downsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - padding: Number of pixels to pad or crop the output on each side (default: 0). - data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the shape `[N, C, H // factor, W // factor]` or - `[N, H // factor, W // factor, C]`, and same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - assert isinstance(padding, int) - k = _FilterKernel(k if k is not None else [1] * factor, gain) - assert k.w == k.h - pad0 = (k.w - factor + 1) // 2 + padding * factor - pad1 = (k.w - factor) // 2 + padding * factor - return _simple_upfirdn_2d(x, k, down=factor, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample_conv_2d(x, w, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'): - r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`. - - Padding is performed only once at the beginning, not between the operations. - The fused op is considerably more efficient than performing the same calculation - using standard TensorFlow ops. It supports gradients of arbitrary order. - - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. - Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). - The default is `[1] * factor`, which corresponds to nearest-neighbor - upsampling. - factor: Integer upsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - padding: Number of pixels to pad or crop the output on each side (default: 0). - data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the shape `[N, C, H * factor, W * factor]` or - `[N, H * factor, W * factor, C]`, and same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - assert isinstance(padding, int) - - # Check weight shape. - w = tf.convert_to_tensor(w) - ch, cw, _inC, _outC = w.shape.as_list() - inC = _shape(w, 2) - outC = _shape(w, 3) - assert cw == ch - - # Fast path for 1x1 convolution. - if cw == 1 and ch == 1: - x = tf.nn.conv2d(x, w, data_format=data_format, strides=[1,1,1,1], padding='VALID') - x = upsample_2d(x, k, factor=factor, gain=gain, padding=padding, data_format=data_format, impl=impl) - return x - - # Setup filter kernel. - k = _FilterKernel(k if k is not None else [1] * factor, gain * (factor ** 2)) - assert k.w == k.h - - # Determine data dimensions. - if data_format == 'NCHW': - stride = [1, 1, factor, factor] - output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + ch, (_shape(x, 3) - 1) * factor + cw] - num_groups = _shape(x, 1) // inC - else: - stride = [1, factor, factor, 1] - output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + ch, (_shape(x, 2) - 1) * factor + cw, outC] - num_groups = _shape(x, 3) // inC - - # Transpose weights. - w = tf.reshape(w, [ch, cw, inC, num_groups, -1]) - w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) - w = tf.reshape(w, [ch, cw, -1, num_groups * inC]) - - # Execute. - x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) - pad0 = (k.w + factor - cw) // 2 + padding - pad1 = (k.w - factor - cw + 3) // 2 + padding - return _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl) - -#---------------------------------------------------------------------------- - -def conv_downsample_2d(x, w, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'): - r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`. - - Padding is performed only once at the beginning, not between the operations. - The fused op is considerably more efficient than performing the same calculation - using standard TensorFlow ops. It supports gradients of arbitrary order. - - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. - Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). - The default is `[1] * factor`, which corresponds to average pooling. - factor: Integer downsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - padding: Number of pixels to pad or crop the output on each side (default: 0). - data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the shape `[N, C, H // factor, W // factor]` or - `[N, H // factor, W // factor, C]`, and same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - assert isinstance(padding, int) - - # Check weight shape. - w = tf.convert_to_tensor(w) - ch, cw, _inC, _outC = w.shape.as_list() - assert cw == ch - - # Fast path for 1x1 convolution. - if cw == 1 and ch == 1: - x = downsample_2d(x, k, factor=factor, gain=gain, padding=padding, data_format=data_format, impl=impl) - x = tf.nn.conv2d(x, w, data_format=data_format, strides=[1,1,1,1], padding='VALID') - return x - - # Setup filter kernel. - k = _FilterKernel(k if k is not None else [1] * factor, gain) - assert k.w == k.h - - # Determine stride. - if data_format == 'NCHW': - s = [1, 1, factor, factor] - else: - s = [1, factor, factor, 1] - - # Execute. - pad0 = (k.w - factor + cw) // 2 + padding * factor - pad1 = (k.w - factor + cw - 1) // 2 + padding * factor - x = _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl) - return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format) - -#---------------------------------------------------------------------------- -# Internal helpers. - -class _FilterKernel: - def __init__(self, k, gain=1): - k = np.asarray(k, dtype=np.float32) - k /= np.sum(k) - - # Separable. - if k.ndim == 1 and k.size >= 8: - self.w = k.size - self.h = k.size - self.kx = k[np.newaxis, :] - self.ky = k[:, np.newaxis] * gain - self.kxy = None - - # Non-separable. - else: - if k.ndim == 1: - k = np.outer(k, k) - assert k.ndim == 2 - self.w = k.shape[1] - self.h = k.shape[0] - self.kx = None - self.ky = None - self.kxy = k * gain - -def _simple_upfirdn_2d(x, k, up=1, down=1, pad0=0, pad1=0, data_format='NCHW', impl='cuda'): - assert isinstance(k, _FilterKernel) - assert data_format in ['NCHW', 'NHWC'] - assert x.shape.rank == 4 - y = x - if data_format == 'NCHW': - y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1]) - if k.kx is not None: - y = upfirdn_2d(y, k.kx, upx=up, downx=down, padx0=pad0, padx1=pad1, impl=impl) - if k.ky is not None: - y = upfirdn_2d(y, k.ky, upy=up, downy=down, pady0=pad0, pady1=pad1, impl=impl) - if k.kxy is not None: - y = upfirdn_2d(y, k.kxy, upx=up, upy=up, downx=down, downy=down, padx0=pad0, padx1=pad1, pady0=pad0, pady1=pad1, impl=impl) - if data_format == 'NCHW': - y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)]) - return y - -def _shape(tf_expr, dim_idx): - if tf_expr.shape.rank is not None: - dim = tf_expr.shape[dim_idx].value - if dim is not None: - return dim - return tf.shape(tf_expr)[dim_idx] - -#---------------------------------------------------------------------------- diff --git a/spaces/h2oai/wave-tour/examples/meta_script_callback.py b/spaces/h2oai/wave-tour/examples/meta_script_callback.py deleted file mode 100644 index 7f629db304cf3b951fff20e1eda3ad6962ef0991..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/meta_script_callback.py +++ /dev/null @@ -1,76 +0,0 @@ -# Meta / Script / Callback -# Handle events from external Javascript libraries. -# --- -import json -import random -import math -from h2o_wave import main, app, Q, ui - -# Create some data for a random graph -node_count = 100 -edge_count = 500 -nodes = [ - dict(id=f'n{i}', label=f'Node {i}', x=random.random(), y=random.random(), size=random.random(), color='#ff0000') - for i in range(node_count)] - -edges = [dict(id=f'e{i}', source=f'n{math.floor(random.random() * node_count)}', - target=f'n{math.floor(random.random() * node_count)}', size=random.random(), color='#666') for i in - range(edge_count)] - -graph_data = dict(nodes=nodes, edges=edges) - -# Serialize graph data to Javascript / JSON. -graph_data_js = f'const graph = {json.dumps(graph_data)};' - -# Define a script that uses Sigma.js to render our graph. -render_graph = graph_data_js + ''' -const s = new sigma({ graph: graph, container: 'graph' }); -s.bind('clickNode', (e) => { - // Emit an event when a node is clicked. - // All three arguments are arbitrary. - // Here, we use: - // - 'graph' to indicate the source of the event. - // - 'node_clicked' to indicate the type of event. - // - the third argument can be a string, number, boolean or any complex structure, like { foo: 'bar', qux: 42 } - // In Python, q.events.graph.node_clicked will be set to the ID of the clicked node. - wave.emit('graph', 'node_clicked', e.data.node.id); -}); -''' - - -@app('/demo') -async def serve(q: Q): - if not q.client.initialized: - q.page['meta'] = ui.meta_card( - box='', - # Load Sigma.js - scripts=[ui.script(path='https://cdnjs.cloudflare.com/ajax/libs/sigma.js/1.2.1/sigma.min.js')], - # Call Javascript to render our graph using Sigma.js. - script=ui.inline_script( - content=render_graph, - # Ensure that Sigma.js is available before running our script. - requires=['sigma'], - # Ensure that the 'graph' element is available before running our script. - targets=['graph'] - ) - ) - # Add a placeholder named 'graph' to house our rendered graph. - q.page['vis'] = ui.markup_card( - box='1 1 6 8', - title='Select a node', - content='
        ' - ) - # Add another card to display which node was selected. - q.page['details'] = ui.markdown_card( - box='1 9 6 1', - title='', - content='The selected node will be displayed here.', - ) - q.client.initialized = True - else: - if q.events.graph: - selected_node = q.events.graph.node_clicked - if selected_node: - q.page['details'].content = f'You clicked on node {selected_node}' - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/table_tags.py b/spaces/h2oai/wave-tour/examples/table_tags.py deleted file mode 100644 index 775cdb22670f7392fc423ffdd6668201e1272ae9..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/table_tags.py +++ /dev/null @@ -1,43 +0,0 @@ -# Table / Tags -# Use tags in order to emphasize a specific value. For multiple tags in a single row use `,` as a delimiter. -# --- -from faker import Faker -from h2o_wave import main, app, Q, ui - -fake = Faker() - -_id = 0 - - -class Issue: - def __init__(self, text: str, tag: str): - global _id - _id += 1 - self.id = f'I{_id}' - self.text = text - self.tag = tag - - -# Create some issues -issues = [Issue(text=fake.sentence(), tag=('FAIL' if i % 2 == 0 else 'DONE,SUCCESS')) for i in range(10)] - -columns = [ - ui.table_column(name='text', label='Issue', min_width='400px'), - ui.table_column(name='tag', label='Badge', cell_type=ui.tag_table_cell_type(name='tags', tags=[ - ui.tag(label='FAIL', color='$red'), - ui.tag(label='DONE', color='#D2E3F8', label_color='#053975'), - ui.tag(label='SUCCESS', color='$mint'), - ])), -] - - -@app('/demo') -async def serve(q: Q): - q.page['example'] = ui.form_card(box='1 1 -1 -1', items=[ - ui.table( - name='issues', - columns=columns, - rows=[ui.table_row(name=issue.id, cells=[issue.text, issue.tag]) for issue in issues], - ) - ]) - await q.page.save() diff --git a/spaces/haakohu/deep_privacy2_face/configs/anonymizers/market1501/blackout.py b/spaces/haakohu/deep_privacy2_face/configs/anonymizers/market1501/blackout.py deleted file mode 100644 index 14da21e3c4b367a942f9a99796a1d9996b773522..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2_face/configs/anonymizers/market1501/blackout.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..FB_cse_mask_face import anonymizer, detector, common - -detector.score_threshold = .1 -detector.face_detector_cfg.confidence_threshold = .5 -detector.cse_cfg.score_thres = 0.3 -anonymizer.generators.face_G_cfg = None -anonymizer.generators.person_G_cfg = "configs/generators/dummy/maskout.py" -anonymizer.generators.cse_person_G_cfg = "configs/generators/dummy/maskout.py" \ No newline at end of file diff --git a/spaces/hakanwkwjbwbs/stablediffusionapi-anime-diffusion/app.py b/spaces/hakanwkwjbwbs/stablediffusionapi-anime-diffusion/app.py deleted file mode 100644 index 369a757d6321b54780f51859277861d451268c79..0000000000000000000000000000000000000000 --- a/spaces/hakanwkwjbwbs/stablediffusionapi-anime-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stablediffusionapi/anime-diffusion").launch() \ No newline at end of file diff --git a/spaces/hbestm/gpt-academic-play/request_llm/test_llms.py b/spaces/hbestm/gpt-academic-play/request_llm/test_llms.py deleted file mode 100644 index 14401680c217a6c980eaa156d40d2cab548d7511..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/request_llm/test_llms.py +++ /dev/null @@ -1,77 +0,0 @@ -# """ -# 对各个llm模型进行单元测试 -# """ -def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume) - sys.path.append(root_dir_assume) - -validate_path() # validate path so you can run from base directory - -from request_llm.bridge_moss import predict_no_ui_long_connection -# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection -# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection - -llm_kwargs = { - 'max_length': 512, - 'top_p': 1, - 'temperature': 1, -} - -result = predict_no_ui_long_connection(inputs="你好", - llm_kwargs=llm_kwargs, - history=[], - sys_prompt="") -print('final result:', result) - - -result = predict_no_ui_long_connection(inputs="what is a hero?", - llm_kwargs=llm_kwargs, - history=["hello world"], - sys_prompt="") -print('final result:', result) - -result = predict_no_ui_long_connection(inputs="如何理解传奇?", - llm_kwargs=llm_kwargs, - history=[], - sys_prompt="") -print('final result:', result) - -# # print(result) -# from multiprocessing import Process, Pipe -# class GetGLMHandle(Process): -# def __init__(self): -# super().__init__(daemon=True) -# pass -# def run(self): -# # 子进程执行 -# # 第一次运行,加载参数 -# def validate_path(): -# import os, sys -# dir_name = os.path.dirname(__file__) -# root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') -# os.chdir(root_dir_assume + '/request_llm/jittorllms') -# sys.path.append(root_dir_assume + '/request_llm/jittorllms') -# validate_path() # validate path so you can run from base directory - -# jittorllms_model = None -# import types -# try: -# if jittorllms_model is None: -# from models import get_model -# # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] -# args_dict = {'model': 'chatrwkv'} -# print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') -# jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) -# print('done get model') -# except: -# # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') -# raise RuntimeError("不能正常加载jittorllms的参数!") - -# x = GetGLMHandle() -# x.start() - - -# input() \ No newline at end of file diff --git a/spaces/hf4all/bingo-async-task/Dockerfile b/spaces/hf4all/bingo-async-task/Dockerfile deleted file mode 100644 index 3a82cf7a80e25f3fce184a1a1ce9a91981421730..0000000000000000000000000000000000000000 --- a/spaces/hf4all/bingo-async-task/Dockerfile +++ /dev/null @@ -1,155 +0,0 @@ -FROM python:3.11-slim-bullseye - -ENV DEBIAN_FRONTEND=noninteractive \ - TZ=Aisa/Shanghai \ - LC_CTYPE=C.UTF-8 \ - LANG=C.UTF-8 - -# Remove any third-party apt sources to avoid issues with expiring keys. -# Install some basic utilities -RUN rm -f /etc/apt/sources.list.d/*.list && \ - apt-get update && apt-get install --no-install-recommends -y \ - curl \ - ca-certificates \ - sudo \ - git \ - git-lfs \ - zip \ - unzip \ - htop \ - bzip2 \ - libx11-6 \ - nginx \ - vim \ - lsof \ - telnet \ - wget \ - build-essential \ - libsndfile-dev \ - software-properties-common \ - chromium \ - chromium-common \ - chromium-driver \ - xvfb \ - dumb-init \ - procps \ - xauth \ - && rm -rf /var/lib/apt/lists/* - -ARG BUILD_DATE -ARG VERSION -ARG CODE_RELEASE -RUN \ - echo "**** install openvscode-server runtime dependencies ****" && \ - apt-get update && \ - apt-get install -y \ - jq \ - libatomic1 \ - nano \ - net-tools \ - netcat && \ - echo "**** install openvscode-server ****" && \ - if [ -z ${CODE_RELEASE+x} ]; then \ - CODE_RELEASE=$(curl -sX GET "https://api.github.com/repos/gitpod-io/openvscode-server/releases/latest" \ - | awk '/tag_name/{print $4;exit}' FS='[""]' \ - | sed 's|^openvscode-server-v||'); \ - fi && \ - mkdir -p /app/openvscode-server && \ - curl -o \ - /tmp/openvscode-server.tar.gz -L \ - "https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-v${CODE_RELEASE}/openvscode-server-v${CODE_RELEASE}-linux-x64.tar.gz" && \ - tar xf \ - /tmp/openvscode-server.tar.gz -C \ - /app/openvscode-server/ --strip-components=1 - - -RUN echo "**** clean up ****" && \ - apt-get clean && \ - rm -rf \ - /tmp/* \ - /var/lib/apt/lists/* \ - /var/tmp/* - -# Create a working directory -WORKDIR /app - -# Create a non-root user and switch to it -RUN adduser --disabled-password --gecos '' --shell /bin/bash user \ - && chown -R user:user /app -RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user -USER user - -# All users can use /home/user as their home directory -ENV HOME=/home/user -RUN mkdir $HOME/.cache $HOME/.config \ - && chmod -R 777 $HOME - -ENV NVM_DIR $HOME/.nvm -ENV NODE_VERSION 18 - -# Install nvm with node and npm -RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \ - && . $NVM_DIR/nvm.sh \ - && nvm install $NODE_VERSION \ - && nvm alias default $NODE_VERSION \ - && nvm use default \ - && npm i -g tsx tslab http-server miniflare@2 pm2 - -ENV NODE_PATH $NVM_DIR/v$NODE_VERSION/lib/node_modules -ENV PATH $NVM_DIR/v$NODE_VERSION/bin:$PATH - -# Set up the Conda environment -ENV CONDA_AUTO_UPDATE_CONDA=false \ - PATH=$HOME/miniconda/bin:$PATH -RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh \ - && chmod +x ~/miniconda.sh \ - && ~/miniconda.sh -b -p ~/miniconda \ - && rm ~/miniconda.sh \ - && conda clean -ya - -WORKDIR $HOME/app - -####################################### -# Start root user section -####################################### - -USER root - -# User Debian packages -## Security warning : Potential user code executed as root (build time) -RUN --mount=target=/root/packages.txt,source=packages.txt \ - apt-get update && \ - xargs -r -a /root/packages.txt apt-get install -y --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN --mount=target=/root/on_startup.sh,source=on_startup.sh,readwrite \ - bash /root/on_startup.sh - -####################################### -# End root user section -####################################### - -USER user - -# Python packages -RUN --mount=target=requirements.txt,source=requirements.txt \ - pip install --no-cache-dir --upgrade -r requirements.txt - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -WORKDIR $HOME/app - -RUN chmod +x start_server.sh - -ENV PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces \ - SHELL=/bin/bash - -EXPOSE 7860 3000 - -CMD ["./start_server.sh"] \ No newline at end of file diff --git a/spaces/hiDenorIYamano/srt-translator/README.md b/spaces/hiDenorIYamano/srt-translator/README.md deleted file mode 100644 index 36d90746d5a413b8ebab1d5c7afb0956a6f5438d..0000000000000000000000000000000000000000 --- a/spaces/hiDenorIYamano/srt-translator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Srt Translator -emoji: 🏢 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/RUN_CALVINGFRONT_DETECTION.sh b/spaces/ho11laqe/nnUNet_calvingfront_detection/RUN_CALVINGFRONT_DETECTION.sh deleted file mode 100644 index de31d188a0a4609ac4bf03d1c1f21c59e98bd2bc..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/RUN_CALVINGFRONT_DETECTION.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -l - -while getopts ":m:d:" opt; do - case $opt in - m) model="$OPTARG";; - d) data="$OPTARG";; - *) echo "Unknown error occurred." - exit 1;; - esac -done -# Point to the folder with the SAR images -export data_raw=$data - -# Folders for processing -export nnUNet_raw_data_base=$data_raw'/data_nnUNet_preprocessed/NIFTI/' -export nnUNet_preprocessed=$data_raw'/data_nnUNet_preprocessed/' -export RESULTS_FOLDER=$data_raw'/calvingfronts/' - -# Convert & Preprocess -python3 nnunet/dataset_conversion/Task500_Glacier_inference.py -data_percentage 100 -base $data_raw - -# Inference -python3 nnunet/inference/predict_simple.py -i $nnUNet_raw_data_base'nnUNet_raw_data/Task500_Glacier_zonefronts/imagesTs/' -o $RESULTS_FOLDER/fold_0 -t 500 -m 2d -f 0 -p nnUNetPlansv2.1 -tr nnUNetTrainerV2 -model_folder_name $model - -# Convert model output to PNG/TIF -python3 nnunet/dataset_conversion/Task500_Glacier_reverse.py -i $RESULTS_FOLDER'fold_0/' diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/expected_epoch_times.md b/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/expected_epoch_times.md deleted file mode 100644 index 6afaecc14317657c9d43df38448e59ddef39c257..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/expected_epoch_times.md +++ /dev/null @@ -1,173 +0,0 @@ -# Introduction -Trainings can take some time. A well-running training setup is essential to get the most of nnU-Net. nnU-Net does not -require any fancy hardware, just a well-balanced system. We recommend at least 32 GB of RAM, 6 CPU cores (12 threads), -SSD storage (this can be SATA and does not have to be PCIe. DO NOT use an external SSD connected via USB!) and a -2080 ti GPU. If your system has multiple GPUs, the -other components need to scale linearly with the number of GPUs. - -# Benchmark Details -To ensure your system is running as intended, we provide some benchmark numbers against which you can compare. Here -are the details about benchmarking: - -- We benchmark **2d**, **3d_fullres** and a modified 3d_fullres that uses 3x the default batch size (called **3d_fullres large** here) -- The datasets **Task002_Heart**, **Task005_Prostate** and **Task003_Liver** of the Medical Segmentation Decathlon are used -(they provide a good spectrum of dataset properties) -- we use the nnUNetTrainerV2_5epochs trainer. This will run only for 5 epochs and it will skip validation. -From the 5 epochs, we select the fastest one as the epoch time. -- We will also be running the nnUNetTrainerV2_5epochs_dummyLoad trainer on the 3d_fullres config (called **3d_fullres dummy**). This trainer does not use -the dataloader and instead uses random dummy inputs, bypassing all data augmentation (CPU) and I/O bottlenecks. -- All trainings are done with mixed precision. This is why Pascal GPUs (Titan Xp) are so slow (they do not have -tensor cores) - -# How to run the benchmark -First go into the folder where the preprocessed data and plans file of the task you would like to use are located. For me this is -`/home/fabian/data/nnUNet_preprocessed/Task002_Heart` - -Then run the following python snippet. This will create our custom **3d_fullres_large** configuration. Note that this -large configuration will only run on GPUs with 16GB or more! We included it in the test because some GPUs -(V100, and probably also A100) can shine when they get more work to do per iteration. -```python -from batchgenerators.utilities.file_and_folder_operations import * -plans = load_pickle('nnUNetPlansv2.1_plans_3D.pkl') -stage = max(plans['plans_per_stage'].keys()) -plans['plans_per_stage'][stage]['batch_size'] *= 3 -save_pickle(plans, 'nnUNetPlansv2.1_bs3x_plans_3D.pkl') -``` - -Now you can run the benchmarks. Each should only take a couple of minutes -```bash -nnUNet_train 2d nnUNetTrainerV2_5epochs TASKID 0 -nnUNet_train 3d_fullres nnUNetTrainerV2_5epochs TASKID 0 -nnUNet_train 3d_fullres nnUNetTrainerV2_5epochs_dummyLoad TASKID 0 -nnUNet_train 3d_fullres nnUNetTrainerV2_5epochs TASKID 0 -p nnUNetPlansv2.1_bs3x # optional, only for GPUs with more than 16GB of VRAM -``` - -The time we are interested in is the epoch time. You can find it in the text output (stdout) or the log file -located in your `RESULTS_FOLDER`. Note that the trainers used here run for 5 epochs. Select the fastest time from your -output as your benchmark time. - -# Results - -The following table shows the results we are getting on our servers/workstations. We are using pytorch 1.7.1 that we -compiled ourselves using the instrucutions found [here](https://github.com/pytorch/pytorch#from-source). The cuDNN -version we used is 8.1.0.77. You should be seeing similar numbers when you -run the benchmark on your server/workstation. Note that fluctuations of a couple of seconds are normal! - -IMPORTANT: Compiling pytorch from source is currently mandatory for best performance! Pytorch 1.8 does not have -working tensorcore acceleration for 3D convolutions when installed with pip or conda! - -IMPORTANT: A100 and V100 are very fast with the newer cuDNN versions and need more CPU workers to prevent bottlenecks, -set the environment variable `nnUNet_n_proc_DA=XX` -to increase the number of data augmentation workers. Recommended: 20 for V100, 32 for A100. Datasets with many input -modalities (BraTS: 4) require A LOT of CPU and should be used with even larger values for `nnUNet_n_proc_DA` - -## Pytorch 1.7.1 compiled with cuDNN 8.1.0.77 - -| | A100 40GB (DGX A100) 400W | V100 32GB SXM3 (DGX2) 350W | V100 32GB PCIe 250W | Quadro RTX6000 24GB 260W | Titan RTX 24GB 280W | RTX 2080 ti 11GB 250W | Titan Xp 12GB 250W | -|-----------------------------------|---------------------------|----------------------------|---------------------|--------------------------|---------------------|-----------------------|--------------------| -| Task002_Heart 2d | 40.06 | 66.03 | 76.19 | 78.01 | 79.78 | 98.49 | 177.87 | -| Task002_Heart 3d_fullres | 51.17 | 85.96 | 99.29 | 110.47 | 112.34 | 148.36 | 504.93 | -| Task002_Heart 3d_fullres dummy | 48.53 | 79 | 89.66 | 105.16 | 105.56 | 138.4 | 501.64 | -| Task002_Heart 3d_fullres large | 118.5 | 220.45 | 251.25 | 322.28 | 300.96 | OOM | OOM | -| | | | | | | | | -| Task003_Liver 2d | 39.71 | 60.69 | 69.65 | 72.29 | 76.17 | 92.54 | 183.73 | -| Task003_Liver 3d_fullres | 44.48 | 75.53 | 87.19 | 85.18 | 86.17 | 106.76 | 290.87 | -| Task003_Liver 3d_fullres dummy | 41.1 | 70.96 | 80.1 | 79.43 | 79.43 | 101.54 | 289.03 | -| Task003_Liver 3d_fullres large | 115.33 | 213.27 | 250.09 | 261.54 | 266.66 | OOM | OOM | -| | | | | | | | | -| Task005_Prostate 2d | 42.21 | 68.88 | 80.46 | 83.62 | 81.59 | 102.81 | 183.68 | -| Task005_Prostate 3d_fullres | 47.19 | 76.33 | 85.4 | 100 | 102.05 | 132.82 | 415.45 | -| Task005_Prostate 3d_fullres dummy | 43.87 | 70.58 | 81.32 | 97.48 | 98.99 | 124.73 | 410.12 | -| Task005_Prostate 3d_fullres large | 117.31 | 209.12 | 234.28 | 277.14 | 284.35 | OOM | OOM | - -# Troubleshooting -Your epoch times are substantially slower than ours? That's not good! This section will help you figure out what is -wrong. Note that each system is unique and we cannot help you find bottlenecks beyond providing the information -presented in this section! - -## First step: Make sure you have the right software! -In order to get maximum performance, you need to have pytorch compiled with a recent cuDNN version (8002 or newer is a must!). -Unfortunately the currently provided pip/conda installable pytorch versions have a bug which causes their performance -to be very low (see https://github.com/pytorch/pytorch/issues/57115 and https://github.com/pytorch/pytorch/issues/50153). -They are about 2x-3x slower than the numbers we report in the table above. -You need to have a pytorch version that was compiled from source to get maximum performance as shown in the table above. -The easiest way to get that is by using the [Nvidia pytorch Docker](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch). -If you cannot use docker, you will need to compile pytorch -yourself. For that, first download and install cuDNN from the [Nvidia homepage](https://developer.nvidia.com/cudnn), then follow the -[instructions on the pytorch website](https://github.com/pytorch/pytorch#from-source) on how to compile it. - -If you compiled pytorch yourself, you can check for the correct cuDNN version by running: -```bash -python -c 'import torch;print(torch.backends.cudnn.version())' -``` -If the output is `8002` or higher, then you are good to go. If not you may have to take action. IMPORTANT: this -only applies to pytorch that was compiled from source. pip/conda installed pytorch will report a new cuDNN version -but still have poor performance due to the bug linked above. - -## Identifying the bottleneck -If the software is up to date and you are still experiencing problems, this is how you can figure out what is going on: - -While a training is running, run `htop` and `watch -n 0.1 nvidia-smi` (depending on your region you may have to use -`0,1` instead). If you have physical access to the machine, also have a look at the LED indicating I/O activity. - -Here is what you can read from that: -- `nvidia-smi` shows the GPU activity. `watch -n 0.1` makes this command refresh every 0.1s. This will allow you to -see your GPU in action. A well running training will have your GPU pegged at 90-100% with no drops in GPU utilization. -Your power should also be close to the maximum (for example `237W / 250 W`) at all times. -- `htop` gives you an overview of the CPU usage. nnU-Net uses 12 processes for data augmentation + one main process. -This means that up to 13 processes should be running simultaneously. -- the I/O LED indicates that your system is reading/writing data from/to your hard drive/SSD. Whenever this is -blinking your system is doing something with your HDD/SSD. - -### GPU bottleneck -If `nvidia-smi` is constantly showing 90-100% GPU utilization and the reported power draw is near the maximum, your -GPU is the bottleneck. This is great! That means that your other components are not slowing it down. Your epochs times -should be the same as ours reported above. If they are not then you need to investigate your software stack (see cuDNN stuff above). - -What can you do about it? -1) There is nothing holding you back. Everything is fine! -2) If you need faster training, consider upgrading your GPU. Performance numbers are above, feel free to use them for guidance. -3) Think about whether you need more (slower) GPUs or less (faster) GPUs. Make sure to include Server/Workstation -costs into your calculations. Sometimes it is better to go with more cheaper but slower GPUs run run multiple trainings -in parallel. - -### CPU bottleneck -You can recognize a CPU bottleneck as follows: -1) htop is consistently showing 10+ processes that are associated with your nnU-Net training -2) nvidia-smi is reporting jumps of GPU activity with zeroes in between - -What can you do about it? -1) Depending on your single core performance, some datasets may require more than the default 12 processes for data -augmentation. The CPU requirements for DA increase roughly linearly with the number of input modalities. Most datasets -will train fine with much less than 12 (6 or even just 4). But datasets with for example 4 modalities may require more. -If you have more than 12 CPU threads available, set the environment variable `nnUNet_n_proc_DA` to a number higher than 12. -2) If your CPU has less than 12 threads in total, running 12 threads can overburden it. Try lowering `nnUNet_n_proc_DA` -to the number of threads you have available. -3) (sounds stupid, but this is the only other way) upgrade your CPU. I have seen Servers with 8 CPU cores (16 threads) - and 8 GPUs in them. That is not well balanced. CPUs are cheap compared to GPUs. On a 'workstation' (single or dual GPU) - you can get something like a Ryzen 3900X or 3950X. On a server you could consider Xeon 6226R or 6258R on the Intel - side or the EPYC 7302P, 7402P, 7502P or 7702P on the AMD side. Make sure to scale the number of cores according to your - number of GPUs and use case. Feel free to also use our nnU-net recommendations from above. - -### I/O bottleneck -On a workstation, I/O bottlenecks can be identified by looking at the LED indicating I/O activity. This is what an -I/O bottleneck looks like: -- nvidia-smi is reporting jumps of GPU activity with zeroes in between -- htop is not showing many active CPU processes -- I/O LED is blinking rapidly or turned on constantly - -Detecting I/O bottlenecks is difficult on servers where you may not have physical access. Tools like `iotop` are -difficult to read and can only be run with sudo. However, the presence of an I/O LED is not strictly necessary. If -- nvidia-smi is reporting jumps of GPU activity with zeroes in between -- htop is not showing many active CPU processes - -then the only possible issue to my knowledge is in fact an I/O bottleneck. - -Here is what you can do about an I/O bottleneck: -1) Make sure you are actually using an SSD to store the preprocessed data (`nnUNet_preprocessed`). Do not use an -SSD connected via USB! Never use a HDD. Do not use a network drive that was not specifically designed to handle fast I/O -(Note that you can use a network drive if it was designed for this purpose. At the DKFZ we use a -[flashblade](https://www.purestorage.com/products/file-and-object/flashblade.html) connected via ethernet and that works -great) -2) A SATA SSD is only enough to feed 1-2 GPUs. If you have more GPUs installed you may have to upgrade to an nvme -drive (make sure to get PCIe interface!). diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_Verse_normalize_orientation.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_Verse_normalize_orientation.py deleted file mode 100644 index 61988d4a2d0664cfdee7aded2ecf7d8de6ad62e1..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_Verse_normalize_orientation.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -This code is copied from https://gist.github.com/nlessmann/24d405eaa82abba6676deb6be839266c. All credits go to the -original author (user nlessmann on GitHub) -""" - -import numpy as np -import SimpleITK as sitk - - -def reverse_axes(image): - return np.transpose(image, tuple(reversed(range(image.ndim)))) - - -def read_image(imagefile): - image = sitk.ReadImage(imagefile) - data = reverse_axes(sitk.GetArrayFromImage(image)) # switch from zyx to xyz - header = { - 'spacing': image.GetSpacing(), - 'origin': image.GetOrigin(), - 'direction': image.GetDirection() - } - return data, header - - -def save_image(img: np.ndarray, header: dict, output_file: str): - """ - CAREFUL you need to restore_original_slice_orientation before saving! - :param img: - :param header: - :return: - """ - # reverse back - img = reverse_axes(img) # switch from zyx to xyz - img_itk = sitk.GetImageFromArray(img) - img_itk.SetSpacing(header['spacing']) - img_itk.SetOrigin(header['origin']) - if not isinstance(header['direction'], tuple): - img_itk.SetDirection(header['direction'].flatten()) - else: - img_itk.SetDirection(header['direction']) - - sitk.WriteImage(img_itk, output_file) - - -def swap_flip_dimensions(cosine_matrix, image, header=None): - # Compute swaps and flips - swap = np.argmax(abs(cosine_matrix), axis=0) - flip = np.sum(cosine_matrix, axis=0) - - # Apply transformation to image volume - image = np.transpose(image, tuple(swap)) - image = image[tuple(slice(None, None, int(f)) for f in flip)] - - if header is None: - return image - - # Apply transformation to header - header['spacing'] = tuple(header['spacing'][s] for s in swap) - header['direction'] = np.eye(3) - - return image, header - - -def normalize_slice_orientation(image, header): - # Preserve original header so that we can easily transform back - header['original'] = header.copy() - - # Compute inverse of cosine (round first because we assume 0/1 values only) - # to determine how the image has to be transposed and flipped for cosine = identity - cosine = np.asarray(header['direction']).reshape(3, 3) - cosine_inv = np.linalg.inv(np.round(cosine)) - - return swap_flip_dimensions(cosine_inv, image, header) - - -def restore_original_slice_orientation(mask, header): - # Use original orientation for transformation because we assume the image to be in - # normalized orientation, i.e., identity cosine) - cosine = np.asarray(header['original']['direction']).reshape(3, 3) - cosine_rnd = np.round(cosine) - - # Apply transformations to both the image and the mask - return swap_flip_dimensions(cosine_rnd, mask), header['original'] diff --git a/spaces/hysts/ControlNet-v1-1/app.py b/spaces/hysts/ControlNet-v1-1/app.py deleted file mode 100644 index c1d19e1e6dce8cc41857aedf10b0b4847823220c..0000000000000000000000000000000000000000 --- a/spaces/hysts/ControlNet-v1-1/app.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import gradio as gr -import torch - -from app_canny import create_demo as create_demo_canny -from app_depth import create_demo as create_demo_depth -from app_ip2p import create_demo as create_demo_ip2p -from app_lineart import create_demo as create_demo_lineart -from app_mlsd import create_demo as create_demo_mlsd -from app_normal import create_demo as create_demo_normal -from app_openpose import create_demo as create_demo_openpose -from app_scribble import create_demo as create_demo_scribble -from app_scribble_interactive import create_demo as create_demo_scribble_interactive -from app_segmentation import create_demo as create_demo_segmentation -from app_shuffle import create_demo as create_demo_shuffle -from app_softedge import create_demo as create_demo_softedge -from model import Model -from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON - -DESCRIPTION = "# ControlNet v1.1" - -if not torch.cuda.is_available(): - DESCRIPTION += "\n

        Running on CPU 🥶 This demo does not work on CPU.

        " - -model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny") - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton( - value="Duplicate Space for private use", - elem_id="duplicate-button", - visible=SHOW_DUPLICATE_BUTTON, - ) - - with gr.Tabs(): - with gr.TabItem("Canny"): - create_demo_canny(model.process_canny) - with gr.TabItem("MLSD"): - create_demo_mlsd(model.process_mlsd) - with gr.TabItem("Scribble"): - create_demo_scribble(model.process_scribble) - with gr.TabItem("Scribble Interactive"): - create_demo_scribble_interactive(model.process_scribble_interactive) - with gr.TabItem("SoftEdge"): - create_demo_softedge(model.process_softedge) - with gr.TabItem("OpenPose"): - create_demo_openpose(model.process_openpose) - with gr.TabItem("Segmentation"): - create_demo_segmentation(model.process_segmentation) - with gr.TabItem("Depth"): - create_demo_depth(model.process_depth) - with gr.TabItem("Normal map"): - create_demo_normal(model.process_normal) - with gr.TabItem("Lineart"): - create_demo_lineart(model.process_lineart) - with gr.TabItem("Content Shuffle"): - create_demo_shuffle(model.process_shuffle) - with gr.TabItem("Instruct Pix2Pix"): - create_demo_ip2p(model.process_ip2p) - - with gr.Accordion(label="Base model", open=False): - with gr.Row(): - with gr.Column(scale=5): - current_base_model = gr.Text(label="Current base model") - with gr.Column(scale=1): - check_base_model_button = gr.Button("Check current base model") - with gr.Row(): - with gr.Column(scale=5): - new_base_model_id = gr.Text( - label="New base model", - max_lines=1, - placeholder="runwayml/stable-diffusion-v1-5", - info="The base model must be compatible with Stable Diffusion v1.5.", - interactive=ALLOW_CHANGING_BASE_MODEL, - ) - with gr.Column(scale=1): - change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL) - if not ALLOW_CHANGING_BASE_MODEL: - gr.Markdown( - """The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space.""" - ) - - check_base_model_button.click( - fn=lambda: model.base_model_id, - outputs=current_base_model, - queue=False, - api_name="check_base_model", - ) - gr.on( - triggers=[new_base_model_id.submit, change_base_model_button.click], - fn=model.set_base_model, - inputs=new_base_model_id, - outputs=current_base_model, - api_name=False, - ) - -if __name__ == "__main__": - demo.queue(max_size=20).launch() diff --git a/spaces/hzwluoye/gpt4/client/css/field.css b/spaces/hzwluoye/gpt4/client/css/field.css deleted file mode 100644 index 914425a75d9e62e6428bdb8f5de2c66c91f10d33..0000000000000000000000000000000000000000 --- a/spaces/hzwluoye/gpt4/client/css/field.css +++ /dev/null @@ -1,11 +0,0 @@ -.field { - display: flex; - align-items: center; - padding: 4px; -} - -@media screen and (max-width: 990px) { - .field { - flex-wrap: nowrap; - } -} diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/docs/eval.md b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/docs/eval.md deleted file mode 100644 index dd1d9e257367b6422680966198646c45e5a2671d..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/docs/eval.md +++ /dev/null @@ -1,31 +0,0 @@ -## Eval on ICCV2021-MFR - -coming soon. - - -## Eval IJBC -You can eval ijbc with pytorch or onnx. - - -1. Eval IJBC With Onnx -```shell -CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50 -``` - -2. Eval IJBC With Pytorch -```shell -CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \ ---model-prefix ms1mv3_arcface_r50/backbone.pth \ ---image-path IJB_release/IJBC \ ---result-dir ms1mv3_arcface_r50 \ ---batch-size 128 \ ---job ms1mv3_arcface_r50 \ ---target IJBC \ ---network iresnet50 -``` - -## Inference - -```shell -python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50 -``` diff --git a/spaces/innat/Global.Wheat.Detection.MaskRCNN/README.md b/spaces/innat/Global.Wheat.Detection.MaskRCNN/README.md deleted file mode 100644 index ad59a88a66e4daef2c04ae8c71ff0996e2fdf17a..0000000000000000000000000000000000000000 --- a/spaces/innat/Global.Wheat.Detection.MaskRCNN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Global-Wheat-Detection -emoji: 🌾 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.0.20 -python_version: 3.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Brush Up Your English By St Imam Pdf Free [TOP] Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Brush Up Your English By St Imam Pdf Free [TOP] Download.md deleted file mode 100644 index 9aa3669dba0a448413951e0f5011730697bd20cb..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Brush Up Your English By St Imam Pdf Free [TOP] Download.md +++ /dev/null @@ -1,9 +0,0 @@ -

        brush up your english by st imam pdf free download


        DOWNLOAD 🌟 https://urlin.us/2uEww8



        - -January 28, 2019 - Refresh your English from ST Imam. Free book for all FPSC PPSC CSS competitive exams Download from this link: . January 28 - Memorial Day of the Prophet Idris al-Aswad, the birthday of the Prophet Muhammad, the birthday of the Prophet and the month of Rabbi ul-Awwal (Birthday of the Messenger of Allah), the birthday of the Prophet. -On the night of January 28, from 27 to 28, the Moon will be in the sign of the zodiac Capricorn. -The Moon will be in this sign for six hours, that is, from 03:00 to 07:00 in the morning. -Therefore, at this time, for those who are engaged in the study of the Quran, and those who intend to study the Quran these days, there will be an opportunity to engage in this good. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/FX Draw Tools 20.1.15 Portable.md b/spaces/inplisQlawa/anything-midjourney-v4-1/FX Draw Tools 20.1.15 Portable.md deleted file mode 100644 index b1784845d248dd8adbbf223562a3253e9d09da12..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/FX Draw Tools 20.1.15 Portable.md +++ /dev/null @@ -1,27 +0,0 @@ - -

        How to Use FX Draw Tools 20.1.15 Portable to Create Stunning Mathematical Diagrams

        -

        If you are a mathematics teacher or student who needs to create high-quality diagrams for tests, exams, worksheets, web sites, presentations and demonstrations, you might want to check out FX Draw Tools 20.1.15 Portable. This is a powerful and easy-to-use software that lets you draw any kind of mathematical shape, graph, equation or function with just a few clicks.

        -

        FX Draw Tools 20.1.15 Portable is a portable version of FX Draw Tools, which means you can run it from any USB drive or external device without installing it on your computer. This makes it convenient and flexible for using it on different machines or locations. You can also save your work as editable files that can be opened by any FX Draw Tools program.

        -

        FX Draw Tools 20.1.15 Portable


        Download Filehttps://urlin.us/2uEw62



        -

        In this article, we will show you some of the features and benefits of using FX Draw Tools 20.1.15 Portable to create stunning mathematical diagrams.

        -

        Features and Benefits of FX Draw Tools 20.1.15 Portable

        -

        FX Draw Tools 20.1.15 Portable has over sixty specialized tools that make drawing mathematics simple and efficient. Here are some of the features and benefits of using this software:

        -
          -
        • You can draw anything from basic shapes like circles, triangles, squares and polygons to complex shapes like Venn diagrams, normal distribution curves, statistical graphs, tree diagrams, number lines, bearings diagrams and more.
        • -
        • You can sketch freehand mathematical diagrams and have them automatically converted to professional quality drawings that can be edited using the power of FX Draw.
        • -
        • You can use the same graphing engine that you will find in FX Graph to draw Cartesian functions, polar functions, slope fields, Argand diagrams, vector diagrams, 3D volumes of revolution, parametric relations, integrals, tangents and normals to curves and more.
        • -
        • You can use FX Equation to write equations for Word, Pages, LibreOffice, LaTeX and Moodle with a high-speed interface.
        • -
        • You can use FX Graph and FX Stat to access FX Draw's powerful function and statistical graphing tools with simplified interfaces.
        • -
        • You can create graphics that can be placed just about anywhere with the new Efofex Image ID system that allows you to quickly edit graphics that you have created in just about any context.
        • -
        • You can export your graphics as EPS (vector), PNG (bitmap), EMF (Windows metafile) or GIF (web) formats.
        • -
        -

        How to Use FX Draw Tools 20.1.15 Portable

        -

        To use FX Draw Tools 20.1.15 Portable, you need to download the software from the official website[^1^] or from a trusted source[^2^]. The file size is about 100 MB and it does not require installation. You just need to unzip the file and run the executable file inside the folder.

        -

        Once you run the software, you will see a main window with a toolbar on the top and a canvas on the bottom. The toolbar contains different icons that represent different tools for drawing mathematics. You can hover your mouse over each icon to see its name and description.

        -

        To start drawing, you need to select a tool from the toolbar and click on the canvas. Depending on the tool you choose, you will see different options and settings on the right side of the window. You can adjust these options according to your preferences and needs.

        -

        For example, if you want to draw a circle, you need to select the circle tool from the toolbar and click on the canvas where you want the center of the circle to be. Then you need to drag your mouse until you get the desired radius of the circle. You can also change the color, line width, fill style and other properties of the circle on the right side of the window.

        -

        To edit or delete an existing shape, you need to select it with the select tool from the toolbar and then use the options on the right side of the window or press delete on your keyboard.

        -

        -

        To save your work, you need to go to File > Save As and choose a file format and a location for your file. You can also print your work by going to File > Print or

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Melodyne Studio For Mac Crack Torrent ((FREE)).md b/spaces/inplisQlawa/anything-midjourney-v4-1/Melodyne Studio For Mac Crack Torrent ((FREE)).md deleted file mode 100644 index 324f0745d8fc1f09251b3867256d53ea3f89064e..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Melodyne Studio For Mac Crack Torrent ((FREE)).md +++ /dev/null @@ -1,80 +0,0 @@ -
        -

        How to Download and Install Melodyne Studio For Mac Crack Torrent

        -

        Melodyne Studio is a professional audio editing software that allows you to edit vocals and instruments in a musical way. You can correct pitch, timing and volume, create harmonies, transpose, quantize and manipulate audio with amazing flexibility and accuracy.

        -

        Melodyne Studio For Mac Crack Torrent


        Download Zip ☆☆☆☆☆ https://urlin.us/2uEwfO



        -

        Melodyne Studio is compatible with Mac OS and can be used as a standalone application or as a plugin in your digital audio workstation (DAW). However, Melodyne Studio is not a cheap software. It costs $699 for the full version, which may be too expensive for some music producers and hobbyists.

        -

        That's why some people are looking for Melodyne Studio For Mac Crack Torrent, which is a pirated version of the software that can be downloaded and installed for free. But how do you download and install Melodyne Studio For Mac Crack Torrent? And what are the risks and benefits of using it? In this article, we will answer these questions and guide you through the process of downloading and installing Melodyne Studio For Mac Crack Torrent.

        -

        What is Melodyne Studio For Mac Crack Torrent?

        -

        Melodyne Studio For Mac Crack Torrent is a file that contains the cracked version of Melodyne Studio for Mac OS. A cracked version of a software is a modified version that bypasses the original license or activation system of the software. This means that you can use the software without paying for it or registering it.

        -

        Melodyne Studio For Mac Crack Torrent can be downloaded from various websites or torrent platforms that offer cracked software. You can use a torrent client such as uTorrent or BitTorrent to download the file from other users who have already downloaded it. The file may be in a compressed format such as ZIP or RAR, or in a disk image format such as DMG or ISO.

        -

        -

        Once you have downloaded the file, you can unzip or mount it and run the installer or follow the instructions provided by the crack file or the readme file. You may need to copy and paste a crack file or a serial number to activate the software. You may also need to disable your internet connection and firewall during the installation process to prevent any online verification or activation.

        -

        What are the risks and benefits of using Melodyne Studio For Mac Crack Torrent?

        -

        Using Melodyne Studio For Mac Crack Torrent may seem like a good idea at first, but it comes with many risks and drawbacks that you should be aware of before you decide to do it. Here are some of them:

        -
          -
        • Risks: -
            -
          • Legal issues: Using Melodyne Studio For Mac Crack Torrent is illegal, as it violates the copyright laws and the terms of service of Celemony, the developer of Melodyne Studio. You could face legal consequences if you are caught using or distributing a cracked software, such as fines or even jail time.
          • -
          • Viruses and malware: Downloading Melodyne Studio For Mac Crack Torrent from untrusted sources can expose your computer to viruses and malware that can harm your system, steal your personal information, or encrypt your files and demand ransom. You may also download fake or corrupted files that won't work or damage your computer.
          • -
          • Lack of updates and support: Using Melodyne Studio For Mac Crack Torrent means that you won't be able to access the official updates and support from Celemony. This means that you won't be able to enjoy the latest features and bug fixes of Melodyne Studio, and you won't be able to get help if you encounter any problems or issues with the software.
          • -
          • Poor performance and compatibility: Using Melodyne Studio For Mac Crack Torrent may result in poor performance and compatibility of the software. You may experience crashes, glitches, errors, or compatibility issues with your audio interface, DAW, or other plugins. You may also lose some functionality or quality of Melodyne Studio, such as the ARA integration or the sound editor.
          • -
          • Ethical issues: Using Melodyne Studio For Mac Crack Torrent is unethical, as it deprives Celemony of their rightful income and recognition for their hard work and innovation. You may also lose respect from other music producers and professionals who use legitimate software and pay for their licenses.
          • -
          -
        • -
        • Benefits: -
            -
          • Free access: The main benefit of using Melodyne Studio For Mac Crack Torrent is that you can access the full version of Melodyne Studio for free, without paying anything or registering anything.
          • -
          • Easy installation: Another benefit of using Melodyne Studio For Mac Crack Torrent is that you can install it easily by following some simple steps. You don't need to go through any complicated online verification or activation process.
          • -
          • Creative possibilities: The last benefit of using Melodyne Studio For Mac Crack Torrent is that you can enjoy the creative possibilities of Melodyne Studio, which is one of the most advanced and powerful audio editing software for vocals and instruments. You can edit audio in a musical way, correcting pitch, timing and volume, creating harmonies, transposing, quantizing and manipulating audio with amazing flexibility and accuracy.
          • -
          -
        • -
        -

        How to download and install Melodyne Studio For Mac Crack Torrent?

        -

        If you still want to download and install Melodyne Studio For Mac Crack Torrent despite the risks and drawbacks, here are some steps that you can follow to do it safely and easily:

        -
          -
        1. Find a reliable source: The first step is to find a reliable source where you can download Melodyne Studio For Mac Crack Torrent. You can use a torrent search engine or a website that specializes in cracked software. However, be careful not to click on any ads or pop-ups that may redirect you to malicious sites or download unwanted programs. You should also check the comments and reviews of other users who have downloaded the same file to see if it is safe and working.
        2. -
        3. Download the file: The next step is to download the file using a torrent client such as uTorrent or BitTorrent. Make sure that you have enough disk space on your computer to store the file, which may be several gigabytes in size. You should also scan the file with an antivirus program before opening it to make sure that it is clean and virus-free.
        4. -
        5. Install the software: The final step is to install

          -

          the software on your computer. You may need to unzip or mount the file first using a program such as WinRAR or Daemon Tools. Then, follow the instructions provided by the installer or the readme file. You may need to copy and paste a crack file or a serial number to activate the software. You should also disable your internet connection and firewall during the installation process to prevent any online verification or activation.

          -

          How to use Melodyne Studio For Mac Crack Torrent?

          -

          Once you have installed Melodyne Studio For Mac Crack Torrent on your computer, you can start using it to edit your audio files. Here are some tips on how to use Melodyne Studio For Mac Crack Torrent:

          -
            -
          • Open Melodyne Studio: You can open Melodyne Studio as a standalone application or as a plugin in your DAW. To open it as a standalone application, just double-click on the Melodyne Studio icon on your desktop or in your applications folder. To open it as a plugin in your DAW, just insert it as an effect on an audio track or a bus.
          • -
          • Load an audio file: To load an audio file into Melodyne Studio, you can either drag and drop it from your computer or use the File menu and choose Open. You can also record audio directly into Melodyne Studio using your audio interface and microphone.
          • -
          • Edit the audio: To edit the audio in Melodyne Studio, you can use the tools and functions available in the toolbar and the menu bar. You can select notes, move them, stretch them, shorten them, transpose them, quantize them, copy them, paste them, delete them, split them, join them, and more. You can also use the macros to apply automatic corrections for pitch, timing and volume.
          • -
          • Save and export: To save your edits in Melodyne Studio, you can use the File menu and choose Save or Save As. You can also export your edited audio as a new file using the File menu and choose Export Audio. You can choose from various formats and settings for your exported file.
          • -
          -

          Conclusion

          -

          Melodyne Studio For Mac Crack Torrent is a way to download and install Melodyne Studio for free on your Mac computer. However, it comes with many risks and drawbacks that may outweigh its benefits. You may face legal consequences, viruses and malware, lack of updates and support, poor performance and compatibility, and ethical issues.

          -

          If you still want to download and install Melodyne Studio For Mac Crack Torrent, you should follow some steps to do it safely and easily: find a reliable source, download the file, install the software, and use it with caution. However, if you want to enjoy the full potential of Melodyne Studio without any hassle or worry, you should buy the software from Celemony's official website or authorized dealers.

          -

          How to optimize your audio with Melodyne Studio For Mac Crack Torrent?

          -

          Melodyne Studio For Mac Crack Torrent can help you optimize your audio by improving its pitch, timing and volume. Here are some tips on how to optimize your audio with Melodyne Studio For Mac Crack Torrent:

          -
            -
          • Correct pitch: To correct the pitch of your audio, you can use the Pitch tool or the Pitch macro. The Pitch tool allows you to manually adjust the pitch of each note by dragging it up or down. The Pitch macro allows you to automatically correct the pitch of your audio by choosing a preset or adjusting the parameters.
          • -
          • Correct timing: To correct the timing of your audio, you can use the Time tool or the Timing macro. The Time tool allows you to manually adjust the timing of each note by dragging it left or right. The Timing macro allows you to automatically correct the timing of your audio by choosing a preset or adjusting the parameters.
          • -
          • Correct volume: To correct the volume of your audio, you can use the Amplitude tool or the Volume macro. The Amplitude tool allows you to manually adjust the volume of each note by dragging it up or down. The Volume macro allows you to automatically correct the volume of your audio by choosing a preset or adjusting the parameters.
          • -
          -

          How to enhance your audio with Melodyne Studio For Mac Crack Torrent?

          -

          Melodyne Studio For Mac Crack Torrent can help you enhance your audio by adding creative effects and transformations. Here are some tips on how to enhance your audio with Melodyne Studio For Mac Crack Torrent:

          -
            -
          • Create harmonies: To create harmonies for your audio, you can use the Harmonize tool or the Harmonize macro. The Harmonize tool allows you to manually create harmonies for each note by dragging it up or down. The Harmonize macro allows you to automatically create harmonies for your audio by choosing a preset or adjusting the parameters.
          • -
          • Transpose: To transpose your audio, you can use the Transpose tool or the Transpose macro. The Transpose tool allows you to manually transpose your audio by dragging it up or down. The Transpose macro allows you to automatically transpose your audio by choosing a preset or adjusting the parameters.
          • -
          • Quantize: To quantize your audio, you can use the Quantize tool or the Quantize macro. The Quantize tool allows you to manually quantize your audio by dragging it left or right. The Quantize macro allows you to automatically quantize your audio by choosing a preset or adjusting the parameters.
          • -
          • Manipulate: To manipulate your audio, you can use the Formant tool, the Vibrato tool, or the Sound Editor. The Formant tool allows you to change the formant of each note by dragging it left or right. The Vibrato tool allows you to change the vibrato of each note by dragging it up or down. The Sound Editor allows you to change various aspects of each note such as attack, decay, sustain and release.
          • -
          -

          Conclusion

          -

          Melodyne Studio For Mac Crack Torrent is a way to download and install Melodyne Studio for free on your Mac computer. However, it comes with many risks and drawbacks that may outweigh its benefits. You may face legal consequences, viruses and malware, lack of updates and support, poor performance and compatibility, and ethical issues.

          -

          If you still want to download and install Melodyne Studio For Mac Crack Torrent, you should follow some steps to do it safely and easily: find a reliable source, download the file, install the software, and use it with caution. However, if you want to enjoy -

          the full potential of Melodyne Studio without any hassle or worry, you should buy the software from Celemony's official website or authorized dealers.

          -

          Melodyne Studio is a professional audio editing software that allows you to edit vocals and instruments in a musical way. You can correct pitch, timing and volume, create harmonies, transpose, quantize and manipulate audio with amazing flexibility and accuracy.

          -

          Melodyne Studio is compatible with Mac OS and can be used as a standalone application or as a plugin in your DAW. However, Melodyne Studio is not a cheap software. It costs $699 for the full version, which may be too expensive for some music producers and hobbyists.

          -

          If you are looking for a way to download and install Melodyne Studio for free on your Mac computer, you may be tempted to use Melodyne Studio For Mac Crack Torrent, which is a pirated version of the software that can be downloaded and installed for free. But before you do that, you should be aware of the risks and drawbacks of using Melodyne Studio For Mac Crack Torrent.

          -

          Using Melodyne Studio For Mac Crack Torrent is illegal, as it violates the copyright laws and the terms of service of Celemony, the developer of Melodyne Studio. You could face legal consequences if you are caught using or distributing a cracked software, such as fines or even jail time.

          -

          Using Melodyne Studio For Mac Crack Torrent can also expose your computer to viruses and malware that can harm your system, steal your personal information, or encrypt your files and demand ransom. You may also download fake or corrupted files that won't work or damage your computer.

          -

          Using Melodyne Studio For Mac Crack Torrent also means that you won't be able to access the official updates and support from Celemony. This means that you won't be able to enjoy the latest features and bug fixes of Melodyne Studio, and you won't be able to get help if you encounter any problems or issues with the software.

          -

          Using Melodyne Studio For Mac Crack Torrent may also result in poor performance and compatibility of the software. You may experience crashes, glitches, errors, or compatibility issues with your audio interface, DAW, or other plugins. You may also lose some functionality or quality of Melodyne Studio, such as the ARA integration or the sound editor.

          -

          Using Melodyne Studio For Mac Crack Torrent is also unethical, as it deprives Celemony of their rightful income and recognition for their hard work and innovation. You may also lose respect from other music producers and professionals who use legitimate software and pay for their licenses.

          -

          If you still want to download and install Melodyne Studio For Mac Crack Torrent, you should follow some steps to do it safely and easily: find a reliable source, download the file, install the software, and use it with caution. However, if you want to enjoy the full potential of Melodyne Studio without any hassle or worry, you should buy the software from Celemony's official website or authorized dealers.

          -

          We hope this article has helped you understand what Melodyne Studio For Mac Crack Torrent is, what are the risks and benefits of using it, and how to download and install it on your Mac. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Crack Per Autocad 2013 Ita BEST.md b/spaces/inreVtussa/clothingai/Examples/Crack Per Autocad 2013 Ita BEST.md deleted file mode 100644 index 730e92a6205ff2b8b933a3d8ea003f16626670fd..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Crack Per Autocad 2013 Ita BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

          crack per autocad 2013 ita


          DOWNLOAD ►►► https://tiurll.com/2uCkfe



          -
          -Free Autodesk Revit Architecture 2010 Crack Download ⚙⚙⚙ DOWNLOAD ... Product Version: Revit Architecture 2013. ... Autodesk Revit Mep 2014 Ita Torrent ---> DOWNLOAD (Mirror #1) autodesk revit ... Utilizzate il software Revit per produrre progetti e documenti edilizi completi, coerenti e coordinati, basati sui modelli. 1fdad05405
          -
          -
          -

          diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py deleted file mode 100644 index 17c008b9a6a1218f6e51add4fda83acb92ee06ce..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Basic McIdas support for PIL -# -# History: -# 1997-05-05 fl Created (8-bit images only) -# 2009-03-08 fl Added 16/32-bit support. -# -# Thanks to Richard Jones and Craig Swank for specs and samples. -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1997. -# -# See the README file for information on usage and redistribution. -# - -import struct - -from . import Image, ImageFile - - -def _accept(s): - return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04" - - -## -# Image plugin for McIdas area images. - - -class McIdasImageFile(ImageFile.ImageFile): - format = "MCIDAS" - format_description = "McIdas area file" - - def _open(self): - # parse area file directory - s = self.fp.read(256) - if not _accept(s) or len(s) != 256: - msg = "not an McIdas area file" - raise SyntaxError(msg) - - self.area_descriptor_raw = s - self.area_descriptor = w = [0] + list(struct.unpack("!64i", s)) - - # get mode - if w[11] == 1: - mode = rawmode = "L" - elif w[11] == 2: - # FIXME: add memory map support - mode = "I" - rawmode = "I;16B" - elif w[11] == 4: - # FIXME: add memory map support - mode = "I" - rawmode = "I;32B" - else: - msg = "unsupported McIdas format" - raise SyntaxError(msg) - - self.mode = mode - self._size = w[10], w[9] - - offset = w[34] + w[15] - stride = w[15] + w[10] * w[11] * w[14] - - self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] - - -# -------------------------------------------------------------------- -# registry - -Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) - -# no default extension diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py deleted file mode 100644 index 2cb1e56365dc369d6719717f0f6775c8c9e2fdd4..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py +++ /dev/null @@ -1,347 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PPM support for PIL -# -# History: -# 96-03-24 fl Created -# 98-03-06 fl Write RGBA images (as RGB, that is) -# -# Copyright (c) Secret Labs AB 1997-98. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# - - -from . import Image, ImageFile -from ._binary import i16be as i16 -from ._binary import o8 -from ._binary import o32le as o32 - -# -# -------------------------------------------------------------------- - -b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" - -MODES = { - # standard - b"P1": "1", - b"P2": "L", - b"P3": "RGB", - b"P4": "1", - b"P5": "L", - b"P6": "RGB", - # extensions - b"P0CMYK": "CMYK", - # PIL extensions (for test purposes only) - b"PyP": "P", - b"PyRGBA": "RGBA", - b"PyCMYK": "CMYK", -} - - -def _accept(prefix): - return prefix[0:1] == b"P" and prefix[1] in b"0123456y" - - -## -# Image plugin for PBM, PGM, and PPM images. - - -class PpmImageFile(ImageFile.ImageFile): - format = "PPM" - format_description = "Pbmplus image" - - def _read_magic(self): - magic = b"" - # read until whitespace or longest available magic number - for _ in range(6): - c = self.fp.read(1) - if not c or c in b_whitespace: - break - magic += c - return magic - - def _read_token(self): - token = b"" - while len(token) <= 10: # read until next whitespace or limit of 10 characters - c = self.fp.read(1) - if not c: - break - elif c in b_whitespace: # token ended - if not token: - # skip whitespace at start - continue - break - elif c == b"#": - # ignores rest of the line; stops at CR, LF or EOF - while self.fp.read(1) not in b"\r\n": - pass - continue - token += c - if not token: - # Token was not even 1 byte - msg = "Reached EOF while reading header" - raise ValueError(msg) - elif len(token) > 10: - msg = f"Token too long in file header: {token.decode()}" - raise ValueError(msg) - return token - - def _open(self): - magic_number = self._read_magic() - try: - mode = MODES[magic_number] - except KeyError: - msg = "not a PPM file" - raise SyntaxError(msg) - - if magic_number in (b"P1", b"P4"): - self.custom_mimetype = "image/x-portable-bitmap" - elif magic_number in (b"P2", b"P5"): - self.custom_mimetype = "image/x-portable-graymap" - elif magic_number in (b"P3", b"P6"): - self.custom_mimetype = "image/x-portable-pixmap" - - maxval = None - decoder_name = "raw" - if magic_number in (b"P1", b"P2", b"P3"): - decoder_name = "ppm_plain" - for ix in range(3): - token = int(self._read_token()) - if ix == 0: # token is the x size - xsize = token - elif ix == 1: # token is the y size - ysize = token - if mode == "1": - self.mode = "1" - rawmode = "1;I" - break - else: - self.mode = rawmode = mode - elif ix == 2: # token is maxval - maxval = token - if not 0 < maxval < 65536: - msg = "maxval must be greater than 0 and less than 65536" - raise ValueError(msg) - if maxval > 255 and mode == "L": - self.mode = "I" - - if decoder_name != "ppm_plain": - # If maxval matches a bit depth, use the raw decoder directly - if maxval == 65535 and mode == "L": - rawmode = "I;16B" - elif maxval != 255: - decoder_name = "ppm" - - args = (rawmode, 0, 1) if decoder_name == "raw" else (rawmode, maxval) - self._size = xsize, ysize - self.tile = [(decoder_name, (0, 0, xsize, ysize), self.fp.tell(), args)] - - -# -# -------------------------------------------------------------------- - - -class PpmPlainDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def _read_block(self): - return self.fd.read(ImageFile.SAFEBLOCK) - - def _find_comment_end(self, block, start=0): - a = block.find(b"\n", start) - b = block.find(b"\r", start) - return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1) - - def _ignore_comments(self, block): - if self._comment_spans: - # Finish current comment - while block: - comment_end = self._find_comment_end(block) - if comment_end != -1: - # Comment ends in this block - # Delete tail of comment - block = block[comment_end + 1 :] - break - else: - # Comment spans whole block - # So read the next block, looking for the end - block = self._read_block() - - # Search for any further comments - self._comment_spans = False - while True: - comment_start = block.find(b"#") - if comment_start == -1: - # No comment found - break - comment_end = self._find_comment_end(block, comment_start) - if comment_end != -1: - # Comment ends in this block - # Delete comment - block = block[:comment_start] + block[comment_end + 1 :] - else: - # Comment continues to next block(s) - block = block[:comment_start] - self._comment_spans = True - break - return block - - def _decode_bitonal(self): - """ - This is a separate method because in the plain PBM format, all data tokens are - exactly one byte, so the inter-token whitespace is optional. - """ - data = bytearray() - total_bytes = self.state.xsize * self.state.ysize - - while len(data) != total_bytes: - block = self._read_block() # read next block - if not block: - # eof - break - - block = self._ignore_comments(block) - - tokens = b"".join(block.split()) - for token in tokens: - if token not in (48, 49): - msg = b"Invalid token for this mode: %s" % bytes([token]) - raise ValueError(msg) - data = (data + tokens)[:total_bytes] - invert = bytes.maketrans(b"01", b"\xFF\x00") - return data.translate(invert) - - def _decode_blocks(self, maxval): - data = bytearray() - max_len = 10 - out_byte_count = 4 if self.mode == "I" else 1 - out_max = 65535 if self.mode == "I" else 255 - bands = Image.getmodebands(self.mode) - total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count - - half_token = False - while len(data) != total_bytes: - block = self._read_block() # read next block - if not block: - if half_token: - block = bytearray(b" ") # flush half_token - else: - # eof - break - - block = self._ignore_comments(block) - - if half_token: - block = half_token + block # stitch half_token to new block - half_token = False - - tokens = block.split() - - if block and not block[-1:].isspace(): # block might split token - half_token = tokens.pop() # save half token for later - if len(half_token) > max_len: # prevent buildup of half_token - msg = ( - b"Token too long found in data: %s" % half_token[: max_len + 1] - ) - raise ValueError(msg) - - for token in tokens: - if len(token) > max_len: - msg = b"Token too long found in data: %s" % token[: max_len + 1] - raise ValueError(msg) - value = int(token) - if value > maxval: - msg = f"Channel value too large for this mode: {value}" - raise ValueError(msg) - value = round(value / maxval * out_max) - data += o32(value) if self.mode == "I" else o8(value) - if len(data) == total_bytes: # finished! - break - return data - - def decode(self, buffer): - self._comment_spans = False - if self.mode == "1": - data = self._decode_bitonal() - rawmode = "1;8" - else: - maxval = self.args[-1] - data = self._decode_blocks(maxval) - rawmode = "I;32" if self.mode == "I" else self.mode - self.set_as_raw(bytes(data), rawmode) - return -1, 0 - - -class PpmDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def decode(self, buffer): - data = bytearray() - maxval = self.args[-1] - in_byte_count = 1 if maxval < 256 else 2 - out_byte_count = 4 if self.mode == "I" else 1 - out_max = 65535 if self.mode == "I" else 255 - bands = Image.getmodebands(self.mode) - while len(data) < self.state.xsize * self.state.ysize * bands * out_byte_count: - pixels = self.fd.read(in_byte_count * bands) - if len(pixels) < in_byte_count * bands: - # eof - break - for b in range(bands): - value = ( - pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count) - ) - value = min(out_max, round(value / maxval * out_max)) - data += o32(value) if self.mode == "I" else o8(value) - rawmode = "I;32" if self.mode == "I" else self.mode - self.set_as_raw(bytes(data), rawmode) - return -1, 0 - - -# -# -------------------------------------------------------------------- - - -def _save(im, fp, filename): - if im.mode == "1": - rawmode, head = "1;I", b"P4" - elif im.mode == "L": - rawmode, head = "L", b"P5" - elif im.mode == "I": - rawmode, head = "I;16B", b"P5" - elif im.mode in ("RGB", "RGBA"): - rawmode, head = "RGB", b"P6" - else: - msg = f"cannot write mode {im.mode} as PPM" - raise OSError(msg) - fp.write(head + b"\n%d %d\n" % im.size) - if head == b"P6": - fp.write(b"255\n") - elif head == b"P5": - if rawmode == "L": - fp.write(b"255\n") - else: - fp.write(b"65535\n") - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) - - # ALTERNATIVE: save via builtin debug function - # im._dump(filename) - - -# -# -------------------------------------------------------------------- - - -Image.register_open(PpmImageFile.format, PpmImageFile, _accept) -Image.register_save(PpmImageFile.format, _save) - -Image.register_decoder("ppm", PpmDecoder) -Image.register_decoder("ppm_plain", PpmPlainDecoder) - -Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"]) - -Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/typedefs.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/typedefs.py deleted file mode 100644 index 84283d9a4634a4836cd50cabe34efd2ae5915f56..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/typedefs.py +++ /dev/null @@ -1,64 +0,0 @@ -import json -import os -import sys -from typing import ( - TYPE_CHECKING, - Any, - Awaitable, - Callable, - Iterable, - Mapping, - Tuple, - Union, -) - -from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr -from yarl import URL - -# These are for other modules to use (to avoid repeating the conditional import). -if sys.version_info >= (3, 8): - from typing import Final as Final, Protocol as Protocol, TypedDict as TypedDict -else: - from typing_extensions import ( # noqa: F401 - Final, - Protocol as Protocol, - TypedDict as TypedDict, - ) - -DEFAULT_JSON_ENCODER = json.dumps -DEFAULT_JSON_DECODER = json.loads - -if TYPE_CHECKING: # pragma: no cover - _CIMultiDict = CIMultiDict[str] - _CIMultiDictProxy = CIMultiDictProxy[str] - _MultiDict = MultiDict[str] - _MultiDictProxy = MultiDictProxy[str] - from http.cookies import BaseCookie, Morsel - - from .web import Request, StreamResponse -else: - _CIMultiDict = CIMultiDict - _CIMultiDictProxy = CIMultiDictProxy - _MultiDict = MultiDict - _MultiDictProxy = MultiDictProxy - -Byteish = Union[bytes, bytearray, memoryview] -JSONEncoder = Callable[[Any], str] -JSONDecoder = Callable[[str], Any] -LooseHeaders = Union[Mapping[Union[str, istr], str], _CIMultiDict, _CIMultiDictProxy] -RawHeaders = Tuple[Tuple[bytes, bytes], ...] -StrOrURL = Union[str, URL] - -LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] -LooseCookiesIterables = Iterable[ - Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] -] -LooseCookies = Union[ - LooseCookiesMappings, - LooseCookiesIterables, - "BaseCookie[str]", -] - -Handler = Callable[["Request"], Awaitable["StreamResponse"]] - -PathLike = Union[str, "os.PathLike[str]"] diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/schema/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/schema/__init__.py deleted file mode 100644 index 1d4640565ae2765d9ca96a509dc9809217f62f2f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/schema/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Init file.""" diff --git a/spaces/jonathanjordan21/ads-video-generator/app.py b/spaces/jonathanjordan21/ads-video-generator/app.py deleted file mode 100644 index 3b0a74f85eb5470a8c114429434d0fcc9fd6914d..0000000000000000000000000000000000000000 --- a/spaces/jonathanjordan21/ads-video-generator/app.py +++ /dev/null @@ -1,68 +0,0 @@ -from langchain.llms import HuggingFacePipeline -import torch -from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM - -from components import caption_chain, tag_chain -from components import pexels, utils -import os, gc -import gradio as gr - -model = AutoModelForSeq2SeqLM.from_pretrained("declare-lab/flan-alpaca-gpt4-xl") -tokenizer = AutoTokenizer.from_pretrained("declare-lab/flan-alpaca-gpt4-xl") - -pipe = pipeline( - 'text2text-generation', - model=model, - tokenizer= tokenizer, - max_length=120 -) - -local_llm = HuggingFacePipeline(pipeline=pipe) - -llm_chain = caption_chain.chain(llm=local_llm) -sum_llm_chain = tag_chain.chain(llm=local_llm) - -pexels_api_key = os.getenv('pexels_api_key') - -def pred(product_name, orientation): - if orientation == "Shorts/Reels/TikTok (1080 x 1920)": - orientation = "potrait" - height = 1920 - width = 1080 - elif orientation == "Youtube Videos (1920 x 1080)": - orientation = "landscape" - height = 1080 - width = 1920 - else : - orientation = "square" - height = 1080 - width = 1080 - folder_name, sentences = pexels.generate_videos(product_name, pexels_api_key, orientation, height, width, llm_chain, sum_llm_chain) - gc.collect() - utils.combine_videos(folder_name) - return ["\n".join(sentences), os.path.join(folder_name, "Final_Ad_Video.mp4")] - #{'video':os.path.join(folder_name, "Final_Ad_Video.mp4"), - # 'captions':"\n".join(sentences)} - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Ads Generator - Create video ads based on your product name using AI - ### Note : the video generation takes about 2-4 minutes - """ - ) - dimension = gr.Dropdown( - ["Shorts/Reels/TikTok (1080 x 1920)", "Facebook/Youtube Videos (1920 x 1080)", "Square (1080 x 1080)"], - label="Video Dimension", info="Choose dimension" - ) - product_name = gr.Textbox(label="product name") - captions = gr.Textbox(label="captions") - video = gr.Video() - btn = gr.Button("Submit") - btn.click(pred, inputs=[product_name, dimension], outputs=[captions,video]) - - - -demo.launch() \ No newline at end of file diff --git a/spaces/jskalbg/ChatDev01/camel/messages/__init__.py b/spaces/jskalbg/ChatDev01/camel/messages/__init__.py deleted file mode 100644 index 4fe78e32926614bdf70ae5df5e5a949d08e31c04..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/camel/messages/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from typing import Dict, Union - -OpenAISystemMessage = Dict[str, str] -OpenAIAssistantMessage = Dict[str, str] -OpenAIUserMessage = Dict[str, str] -OpenAIChatMessage = Union[OpenAIUserMessage, OpenAIAssistantMessage] -OpenAIMessage = Union[OpenAISystemMessage, OpenAIChatMessage] - -from .base import BaseMessage # noqa: E402 -from .system_messages import ( # noqa: E402 - SystemMessage, AssistantSystemMessage, UserSystemMessage, -) -from .chat_messages import ( # noqa: E402 - ChatMessage, AssistantChatMessage, UserChatMessage, -) - -MessageType = Union[BaseMessage, SystemMessage, AssistantSystemMessage, - UserSystemMessage, ChatMessage, AssistantChatMessage, - UserChatMessage] -SystemMessageType = Union[SystemMessage, AssistantSystemMessage, - UserSystemMessage] -ChatMessageType = Union[ChatMessage, AssistantChatMessage, UserChatMessage] - -__all__ = [ - 'OpenAISystemMessage', - 'OpenAIAssistantMessage', - 'OpenAIUserMessage', - 'OpenAIChatMessage', - 'OpenAIMessage', - 'BaseMessage', - 'SystemMessage', - 'AssistantSystemMessage', - 'UserSystemMessage', - 'ChatMessage', - 'AssistantChatMessage', - 'UserChatMessage', - 'MessageType', - 'SystemMessageType', - 'ChatMessageType', -] diff --git a/spaces/kamaldeep132/pdfGPT/app.py b/spaces/kamaldeep132/pdfGPT/app.py deleted file mode 100644 index 08a19891ebfca3180a7adcb4c8f52e6e5817b605..0000000000000000000000000000000000000000 --- a/spaces/kamaldeep132/pdfGPT/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import json -from tempfile import _TemporaryFileWrapper - -import gradio as gr -import requests - - -def ask_api( - lcserve_host: str, - url: str, - file: _TemporaryFileWrapper, - question: str, - openAI_key: str, -) -> str: - if not lcserve_host.startswith('http'): - return '[ERROR]: Invalid API Host' - - if url.strip() == '' and file == None: - return '[ERROR]: Both URL and PDF is empty. Provide atleast one.' - - if url.strip() != '' and file != None: - return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).' - - if question.strip() == '': - return '[ERROR]: Question field is empty' - - _data = { - 'question': question, - 'envs': { - 'OPENAI_API_KEY': openAI_key, - }, - } - - if url.strip() != '': - r = requests.post( - f'{lcserve_host}/ask_url', - json={'url': url, **_data}, - ) - - else: - with open(file.name, 'rb') as f: - r = requests.post( - f'{lcserve_host}/ask_file', - params={'input_data': json.dumps(_data)}, - files={'file': f}, - ) - - if r.status_code != 200: - raise ValueError(f'[ERROR]: {r.text}') - - return r.json()['result'] - - -title = 'PDF GPT' -description = """ PDF GPT allows you to chat with your PDF file using Universal Sentence Encoder and Open AI. It gives hallucination free response than other tools as the embeddings are better than OpenAI. The returned response can even cite the page number in square brackets([]) where the information is located, adding credibility to the responses and helping to locate pertinent information quickly.""" - -with gr.Blocks() as demo: - gr.Markdown(f'

          {title}

          ') - gr.Markdown(description) - - with gr.Row(): - with gr.Group(): - lcserve_host = gr.Textbox( - label='Enter your API Host here', - value='https://langchain-80341177bb.wolf.jina.ai', - placeholder='http://localhost:8080', - ) - gr.Markdown( - f'

          Get your Open AI API key here

          ' - ) - openAI_key = gr.Textbox( - label='Enter your OpenAI API key here', type='password' - ) - pdf_url = gr.Textbox(label='Enter PDF URL here') - gr.Markdown("

          OR

          ") - file = gr.File( - label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'] - ) - question = gr.Textbox(label='Enter your question here') - btn = gr.Button(value='Submit') - btn.style(full_width=True) - - with gr.Group(): - answer = gr.Textbox(label='The answer to your question is :') - - btn.click( - ask_api, - inputs=[lcserve_host, pdf_url, file, question, openAI_key], - outputs=[answer], - ) - -# demo.app.servers.timeout = 60000 # Set the maximum return time for the results of accessing the upstream server - -demo.launch(server_port=7860, enable_queue=True) # `enable_queue=True` to ensure the validity of multi-user requests diff --git a/spaces/kcagle/AutoGPT/autogpt/commands/image_gen.py b/spaces/kcagle/AutoGPT/autogpt/commands/image_gen.py deleted file mode 100644 index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/commands/image_gen.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Image Generation Module for AutoGPT.""" -import io -import os.path -import uuid -from base64 import b64decode - -import openai -import requests -from PIL import Image - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def generate_image(prompt: str, size: int = 256) -> str: - """Generate an image from a prompt. - - Args: - prompt (str): The prompt to use - size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace) - - Returns: - str: The filename of the image - """ - filename = f"{str(uuid.uuid4())}.jpg" - - # DALL-E - if CFG.image_provider == "dalle": - return generate_image_with_dalle(prompt, filename, size) - # HuggingFace - elif CFG.image_provider == "huggingface": - return generate_image_with_hf(prompt, filename) - # SD WebUI - elif CFG.image_provider == "sdwebui": - return generate_image_with_sd_webui(prompt, filename, size) - return "No Image Provider Set" - - -def generate_image_with_hf(prompt: str, filename: str) -> str: - """Generate an image with HuggingFace's API. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - API_URL = ( - f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}" - ) - if CFG.huggingface_api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - headers = { - "Authorization": f"Bearer {CFG.huggingface_api_token}", - "X-Use-Cache": "false", - } - - response = requests.post( - API_URL, - headers=headers, - json={ - "inputs": prompt, - }, - ) - - image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") - - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" - - -def generate_image_with_dalle(prompt: str, filename: str) -> str: - """Generate an image with DALL-E. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - openai.api_key = CFG.openai_api_key - - # Check for supported image sizes - if size not in [256, 512, 1024]: - closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( - f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." - ) - size = closest - - response = openai.Image.create( - prompt=prompt, - n=1, - size=f"{size}x{size}", - response_format="b64_json", - ) - - print(f"Image Generated for prompt:{prompt}") - - image_data = b64decode(response["data"][0]["b64_json"]) - - with open(path_in_workspace(filename), mode="wb") as png: - png.write(image_data) - - return f"Saved to disk:{filename}" - - -def generate_image_with_sd_webui( - prompt: str, - filename: str, - size: int = 512, - negative_prompt: str = "", - extra: dict = {}, -) -> str: - """Generate an image with Stable Diffusion webui. - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - size (int, optional): The size of the image. Defaults to 256. - negative_prompt (str, optional): The negative prompt to use. Defaults to "". - extra (dict, optional): Extra parameters to pass to the API. Defaults to {}. - Returns: - str: The filename of the image - """ - # Create a session and set the basic auth if needed - s = requests.Session() - if CFG.sd_webui_auth: - username, password = CFG.sd_webui_auth.split(":") - s.auth = (username, password or "") - - # Generate the images - response = requests.post( - f"{CFG.sd_webui_url}/sdapi/v1/txt2img", - json={ - "prompt": prompt, - "negative_prompt": negative_prompt, - "sampler_index": "DDIM", - "steps": 20, - "cfg_scale": 7.0, - "width": size, - "height": size, - "n_iter": 1, - **extra, - }, - ) - - print(f"Image Generated for prompt:{prompt}") - - # Save the image to disk - response = response.json() - b64 = b64decode(response["images"][0].split(",", 1)[0]) - image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" diff --git a/spaces/kcagle/AutoGPT/autogpt/speech/macos_tts.py b/spaces/kcagle/AutoGPT/autogpt/speech/macos_tts.py deleted file mode 100644 index 4c072ce256782e83a578b5181abf1a7b524c621b..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/speech/macos_tts.py +++ /dev/null @@ -1,21 +0,0 @@ -""" MacOS TTS Voice. """ -import os - -from autogpt.speech.base import VoiceBase - - -class MacOSTTS(VoiceBase): - """MacOS TTS Voice.""" - - def _setup(self) -> None: - pass - - def _speech(self, text: str, voice_index: int = 0) -> bool: - """Play the given text.""" - if voice_index == 0: - os.system(f'say "{text}"') - elif voice_index == 1: - os.system(f'say -v "Ava (Premium)" "{text}"') - else: - os.system(f'say -v Samantha "{text}"') - return True diff --git a/spaces/keithhon/logo-generator/dalle/models/tokenizer.py b/spaces/keithhon/logo-generator/dalle/models/tokenizer.py deleted file mode 100644 index ffa3e7e3c5a5a6e22a9ac0fca1df7ccbb38c2846..0000000000000000000000000000000000000000 --- a/spaces/keithhon/logo-generator/dalle/models/tokenizer.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------------------ -# Minimal DALL-E -# Copyright (c) 2021 KakaoBrain. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------ - -import os -from functools import partial -from tokenizers import CharBPETokenizer - - -def build_tokenizer(path: str, - context_length: int = 64, - *args, - **kwargs): - from_file = partial(CharBPETokenizer.from_file, - vocab_filename=os.path.join(path, 'bpe-16k-vocab.json'), - merges_filename=os.path.join(path, 'bpe-16k-merges.txt'), - unk_token='[UNK]') - tokenizer = from_file(*args, **kwargs) - tokenizer.add_special_tokens(['[PAD]']) - tokenizer.enable_padding(length=context_length, - pad_id=tokenizer.token_to_id('[PAD]')) - tokenizer.enable_truncation(max_length=context_length) - print(f'{path} successfully restored..') - return tokenizer diff --git a/spaces/ken4005/Uhi-ChatGPT/run_Linux.sh b/spaces/ken4005/Uhi-ChatGPT/run_Linux.sh deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/ken4005/Uhi-ChatGPT/run_Linux.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/upernet_uniformer.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/upernet_uniformer.py deleted file mode 100644 index 41aa4db809dc6e2c508e98051f61807d07477903..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/upernet_uniformer.py +++ /dev/null @@ -1,43 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained=None, - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - mlp_ratio=4., - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1), - decode_head=dict( - type='UPerHead', - in_channels=[64, 128, 320, 512], - in_index=[0, 1, 2, 3], - pool_scales=(1, 2, 3, 6), - channels=512, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=320, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py deleted file mode 100644 index eb756680fa7dc31a14ba45c216776a6d60c16b60..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import itertools -import os -import csv -from collections import defaultdict -from six.moves import zip -import io -import wget -import sys - -from subprocess import check_call, check_output - -# scripts and data locations -CWD = os.getcwd() -UTILS = f"{CWD}/utils" - -MOSES = f"{UTILS}/mosesdecoder" - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - - -# please donwload mosesdecoder here: -detok_cmd = f'{MOSES}/scripts/tokenizer/detokenizer.perl' - - -def call(cmd): - print(f"Executing: {cmd}") - check_call(cmd, shell=True) - -class MultiLingualAlignedCorpusReader(object): - """A class to read TED talk dataset - """ - - def __init__(self, corpus_path, delimiter='\t', - target_token=True, bilingual=True, corpus_type='file', - lang_dict={'source': ['fr'], 'target': ['en']}, - eval_lang_dict=None, zero_shot=False, - detok=True, - ): - - self.empty_line_flag = 'NULL' - self.corpus_path = corpus_path - self.delimiter = delimiter - self.bilingual = bilingual - self.lang_dict = lang_dict - self.lang_set = set() - self.target_token = target_token - self.zero_shot = zero_shot - self.eval_lang_dict = eval_lang_dict - self.corpus_type = corpus_type - self.detok = detok - - for list_ in self.lang_dict.values(): - for lang in list_: - self.lang_set.add(lang) - - self.data = dict() - self.data['train'] = self.read_aligned_corpus(split_type='train') - self.data['test'] = self.read_aligned_corpus(split_type='test') - self.data['dev'] = self.read_aligned_corpus(split_type='dev') - - def read_data(self, file_loc_): - data_list = list() - with io.open(file_loc_, 'r', encoding='utf8') as fp: - for line in fp: - try: - text = line.strip() - except IndexError: - text = self.empty_line_flag - data_list.append(text) - return data_list - - def filter_text(self, dict_): - if self.target_token: - field_index = 1 - else: - field_index = 0 - data_dict = defaultdict(list) - list1 = dict_['source'] - list2 = dict_['target'] - for sent1, sent2 in zip(list1, list2): - try: - src_sent = ' '.join(sent1.split()[field_index: ]) - except IndexError: - src_sent = 'NULL' - - if src_sent.find(self.empty_line_flag) != -1 or len(src_sent) == 0: - continue - - elif sent2.find(self.empty_line_flag) != -1 or len(sent2) == 0: - continue - - else: - data_dict['source'].append(sent1) - data_dict['target'].append(sent2) - return data_dict - - def read_file(self, split_type, data_type): - return self.data[split_type][data_type] - - def save_file(self, path_, split_type, data_type, lang): - tok_file = tok_file_name(path_, lang) - with io.open(tok_file, 'w', encoding='utf8') as fp: - for line in self.data[split_type][data_type]: - fp.write(line + '\n') - if self.detok: - de_tok(tok_file, lang) - - def add_target_token(self, list_, lang_id): - new_list = list() - token = '__' + lang_id + '__' - for sent in list_: - new_list.append(token + ' ' + sent) - return new_list - - def read_from_single_file(self, path_, s_lang, t_lang): - data_dict = defaultdict(list) - with io.open(path_, 'r', encoding='utf8') as fp: - reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) - for row in reader: - data_dict['source'].append(row[s_lang]) - data_dict['target'].append(row[t_lang]) - - if self.target_token: - text = self.add_target_token(data_dict['source'], t_lang) - data_dict['source'] = text - - return data_dict['source'], data_dict['target'] - - def read_aligned_corpus(self, split_type='train'): - data_dict = defaultdict(list) - iterable = [] - s_list = [] - t_list = [] - - if self.zero_shot: - if split_type == "train": - iterable = zip(self.lang_dict['source'], self.lang_dict['target']) - else: - iterable = zip(self.eval_lang_dict['source'], self.eval_lang_dict['target']) - - elif self.bilingual: - iterable = itertools.product(self.lang_dict['source'], self.lang_dict['target']) - - for s_lang, t_lang in iterable: - if s_lang == t_lang: - continue - if self.corpus_type == 'file': - split_type_file_path = os.path.join(self.corpus_path, - "all_talks_{}.tsv".format(split_type)) - s_list, t_list = self.read_from_single_file(split_type_file_path, - s_lang=s_lang, - t_lang=t_lang) - data_dict['source'] += s_list - data_dict['target'] += t_list - new_data_dict = self.filter_text(data_dict) - return new_data_dict - - -def read_langs(corpus_path): - split_type_file_path = os.path.join(corpus_path, 'extracted', - "all_talks_dev.tsv") - with io.open(split_type_file_path, 'r', encoding='utf8') as fp: - reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) - header = next(reader) - return [k for k in header.keys() if k != 'talk_name'] - -def extra_english(corpus_path, split): - split_type_file_path = os.path.join(corpus_path, - f"all_talks_{split}.tsv") - output_split_type_file_path = os.path.join(corpus_path, - f"all_talks_{split}.en") - with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw: - reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) - for row in reader: - line = row['en'] - fw.write(line + '\n') - de_tok(output_split_type_file_path, 'en') - - - -def tok_file_name(filename, lang): - seps = filename.split('.') - seps.insert(-1, 'tok') - tok_file = '.'.join(seps) - return tok_file - -def de_tok(tok_file, lang): - # seps = tok_file.split('.') - # seps.insert(-1, 'detok') - # de_tok_file = '.'.join(seps) - de_tok_file = tok_file.replace('.tok.', '.') - cmd = 'perl {detok_cmd} -l {lang} < {tok_file} > {de_tok_file}'.format( - detok_cmd=detok_cmd, tok_file=tok_file, - de_tok_file=de_tok_file, lang=lang[:2]) - call(cmd) - -def extra_bitex( - ted_data_path, - lsrc_lang, - ltrg_lang, - target_token, - output_data_path, -): - def get_ted_lang(lang): - long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca'] - if lang[:5] in long_langs: - return lang[:5] - elif lang[:4] =='calv': - return lang[:5] - elif lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']: - return lang.lower().replace('_', '-') - return lang[:2] - src_lang = get_ted_lang(lsrc_lang) - trg_lang = get_ted_lang(ltrg_lang) - train_lang_dict={'source': [src_lang], 'target': [trg_lang]} - eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]} - - obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path, - lang_dict=train_lang_dict, - target_token=target_token, - corpus_type='file', - eval_lang_dict=eval_lang_dict, - zero_shot=False, - bilingual=True) - - os.makedirs(output_data_path, exist_ok=True) - lsrc_lang = lsrc_lang.replace('-', '_') - ltrg_lang = ltrg_lang.replace('-', '_') - obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", - split_type='train', data_type='source', lang=src_lang) - obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", - split_type='train', data_type='target', lang=trg_lang) - - obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", - split_type='test', data_type='source', lang=src_lang) - obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", - split_type='test', data_type='target', lang=trg_lang) - - obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", - split_type='dev', data_type='source', lang=src_lang) - obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", - split_type='dev', data_type='target', lang=trg_lang) - - -def bar_custom(current, total, width=80): - print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r') - - -def download_and_extract(download_to, extract_to): - url = 'http://phontron.com/data/ted_talks.tar.gz' - filename = f"{download_to}/ted_talks.tar.gz" - if os.path.exists(filename): - print(f'{filename} has already been downloaded so skip') - else: - filename = wget.download(url, filename, bar=bar_custom) - if os.path.exists(f'{extract_to}/all_talks_train.tsv'): - print(f'Already extracted so skip') - else: - extract_cmd = f'tar xzfv "{filename}" -C "{extract_to}"' - call(extract_cmd) - - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--ted_data_path', type=str, default=WORKDIR_ROOT, required=False) - parser.add_argument( - '--direction-list', - type=str, - # default=None, - #for ML50 - default=( - "bn_IN-en_XX,he_IL-en_XX,fa_IR-en_XX,id_ID-en_XX,sv_SE-en_XX,pt_XX-en_XX,ka_GE-en_XX,ka_GE-en_XX,th_TH-en_XX," - "mr_IN-en_XX,hr_HR-en_XX,uk_UA-en_XX,az_AZ-en_XX,mk_MK-en_XX,gl_ES-en_XX,sl_SI-en_XX,mn_MN-en_XX," - #non-english directions - # "fr_XX-de_DE," # replaced with wmt20 - # "ja_XX-ko_KR,es_XX-pt_XX,ru_RU-sv_SE,hi_IN-bn_IN,id_ID-ar_AR,cs_CZ-pl_PL,ar_AR-tr_TR" - ), - required=False) - parser.add_argument('--target-token', action='store_true', default=False) - parser.add_argument('--extract-all-english', action='store_true', default=False) - - args = parser.parse_args() - - import sys - import json - - # TED Talks data directory - ted_data_path = args.ted_data_path - - download_to = f'{ted_data_path}/downloads' - extract_to = f'{ted_data_path}/extracted' - - #DESTDIR=${WORKDIR_ROOT}/ML50/raw/ - output_path = f'{ted_data_path}/ML50/raw' - os.makedirs(download_to, exist_ok=True) - os.makedirs(extract_to, exist_ok=True) - os.makedirs(output_path, exist_ok=True) - download_and_extract(download_to, extract_to) - - - if args.extract_all_english: - for split in ['train', 'dev', 'test']: - extra_english(ted_data_path, split) - exit(0) - if args.direction_list is not None: - directions = args.direction_list.strip().split(',') - directions = [tuple(d.strip().split('-', 1)) for d in directions if d] - else: - langs = read_langs(ted_data_path) - # directions = [ - # '{}.{}'.format(src, tgt) - # for src in langs - # for tgt in langs - # if src < tgt - # ] - directions = [('en', tgt) for tgt in langs if tgt != 'en'] - print(f'num directions={len(directions)}: {directions}') - - for src_lang, trg_lang in directions: - print('--working on {}-{}'.format(src_lang, trg_lang)) - extra_bitex( - extract_to, - src_lang, - trg_lang, - target_token=args.target_token, - output_data_path=output_path - ) diff --git a/spaces/krazyxki/V-1488abed/src/proxy/rewriters/add-key.ts b/spaces/krazyxki/V-1488abed/src/proxy/rewriters/add-key.ts deleted file mode 100644 index 2b6554439d78332994882349a5ee7fa3cc81755f..0000000000000000000000000000000000000000 --- a/spaces/krazyxki/V-1488abed/src/proxy/rewriters/add-key.ts +++ /dev/null @@ -1,12 +0,0 @@ -import type { ExpressHttpProxyReqCallback } from "."; -import { Key, keys } from "../../keys"; - -/** Add an OpenAI key from the pool to the request. */ -export const addKey: ExpressHttpProxyReqCallback = (proxyReq, req) => { - if (!req.proxy) { - let assignedKey: Key; - assignedKey = keys.get(req.body?.model || "gpt-3.5")!; - req.key = assignedKey; - proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`); - } -}; diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/core/__init__.py b/spaces/kukuhtw/VToonify/vtoonify/model/raft/core/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kxqt/Expedit-SAM/segment_anything/predictor.py b/spaces/kxqt/Expedit-SAM/segment_anything/predictor.py deleted file mode 100644 index 57c089d1fc4a6bbf5786e1ef62c59e22d582f5aa..0000000000000000000000000000000000000000 --- a/spaces/kxqt/Expedit-SAM/segment_anything/predictor.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -from segment_anything.modeling import Sam - -from typing import Optional, Tuple - -from .utils.transforms import ResizeLongestSide - - -class SamPredictor: - def __init__( - self, - sam_model: Sam, - ) -> None: - """ - Uses SAM to calculate the image embedding for an image, and then - allow repeated, efficient mask prediction given prompts. - - Arguments: - sam_model (Sam): The model to use for mask prediction. - """ - super().__init__() - self.model = sam_model - self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) - self.reset_image() - - def set_image( - self, - image: np.ndarray, - image_format: str = "RGB", - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. - - Arguments: - image (np.ndarray): The image for calculating masks. Expects an - image in HWC uint8 format, with pixel values in [0, 255]. - image_format (str): The color format of the image, in ['RGB', 'BGR']. - """ - assert image_format in [ - "RGB", - "BGR", - ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." - if image_format != self.model.image_format: - image = image[..., ::-1] - - # Transform the image to the form expected by the model - input_image = self.transform.apply_image(image) - input_image_torch = torch.as_tensor(input_image, device=self.device) - input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] - - self.set_torch_image(input_image_torch, image.shape[:2]) - - @torch.no_grad() - def set_torch_image( - self, - transformed_image: torch.Tensor, - original_image_size: Tuple[int, ...], - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. Expects the input - image to be already transformed to the format expected by the model. - - Arguments: - transformed_image (torch.Tensor): The input image, with shape - 1x3xHxW, which has been transformed with ResizeLongestSide. - original_image_size (tuple(int, int)): The size of the image - before transformation, in (H, W) format. - """ - assert ( - len(transformed_image.shape) == 4 - and transformed_image.shape[1] == 3 - and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size - ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." - self.reset_image() - - self.original_size = original_image_size - self.input_size = tuple(transformed_image.shape[-2:]) - input_image = self.model.preprocess(transformed_image) - self.features = self.model.image_encoder(input_image) - self.is_image_set = True - - def predict( - self, - point_coords: Optional[np.ndarray] = None, - point_labels: Optional[np.ndarray] = None, - box: Optional[np.ndarray] = None, - mask_input: Optional[np.ndarray] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - - Arguments: - point_coords (np.ndarray or None): A Nx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (np.ndarray or None): A length N array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray or None): A length 4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form 1xHxW, where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - # Transform input prompts - coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None - if point_coords is not None: - assert ( - point_labels is not None - ), "point_labels must be supplied if point_coords is supplied." - point_coords = self.transform.apply_coords(point_coords, self.original_size) - coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) - labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) - coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] - if box is not None: - box = self.transform.apply_boxes(box, self.original_size) - box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) - box_torch = box_torch[None, :] - if mask_input is not None: - mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) - mask_input_torch = mask_input_torch[None, :, :, :] - - masks, iou_predictions, low_res_masks = self.predict_torch( - coords_torch, - labels_torch, - box_torch, - mask_input_torch, - multimask_output, - return_logits=return_logits, - ) - - masks = masks[0].detach().cpu().numpy() - iou_predictions = iou_predictions[0].detach().cpu().numpy() - low_res_masks = low_res_masks[0].detach().cpu().numpy() - return masks, iou_predictions, low_res_masks - - @torch.no_grad() - def predict_torch( - self, - point_coords: Optional[torch.Tensor], - point_labels: Optional[torch.Tensor], - boxes: Optional[torch.Tensor] = None, - mask_input: Optional[torch.Tensor] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - Input prompts are batched torch tensors and are expected to already be - transformed to the input frame using ResizeLongestSide. - - Arguments: - point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (torch.Tensor or None): A BxN array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray or None): A Bx4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form Bx1xHxW, where - for SAM, H=W=256. Masks returned by a previous iteration of the - predict method do not need further transformation. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (torch.Tensor): The output masks in BxCxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (torch.Tensor): An array of shape BxC containing the model's - predictions for the quality of each mask. - (torch.Tensor): An array of shape BxCxHxW, where C is the number - of masks and H=W=256. These low res logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - if point_coords is not None: - points = (point_coords, point_labels) - else: - points = None - - # Embed prompts - sparse_embeddings, dense_embeddings = self.model.prompt_encoder( - points=points, - boxes=boxes, - masks=mask_input, - ) - - # Predict masks - low_res_masks, iou_predictions = self.model.mask_decoder( - image_embeddings=self.features, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - ) - - # Upscale the masks to the original image resolution - masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) - - if not return_logits: - masks = masks > self.model.mask_threshold - - return masks, iou_predictions, low_res_masks - - def get_image_embedding(self) -> torch.Tensor: - """ - Returns the image embeddings for the currently set image, with - shape 1xCxHxW, where C is the embedding dimension and (H,W) are - the embedding spatial dimension of SAM (typically C=256, H=W=64). - """ - if not self.is_image_set: - raise RuntimeError( - "An image must be set with .set_image(...) to generate an embedding." - ) - assert self.features is not None, "Features must exist if an image has been set." - return self.features - - @property - def device(self) -> torch.device: - return self.model.device - - def reset_image(self) -> None: - """Resets the currently set image.""" - self.is_image_set = False - self.features = None - self.orig_h = None - self.orig_w = None - self.input_h = None - self.input_w = None diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/core.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/core.py deleted file mode 100644 index 5abfb0f3c2f872275962732b370fed1202f1144a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/core.py +++ /dev/null @@ -1,2998 +0,0 @@ -import enum -import errno -import inspect -import os -import sys -import typing as t -from collections import abc -from contextlib import contextmanager -from contextlib import ExitStack -from functools import partial -from functools import update_wrapper -from gettext import gettext as _ -from gettext import ngettext -from itertools import repeat - -from . import types -from .exceptions import Abort -from .exceptions import BadParameter -from .exceptions import ClickException -from .exceptions import Exit -from .exceptions import MissingParameter -from .exceptions import UsageError -from .formatting import HelpFormatter -from .formatting import join_options -from .globals import pop_context -from .globals import push_context -from .parser import _flag_needs_value -from .parser import OptionParser -from .parser import split_opt -from .termui import confirm -from .termui import prompt -from .termui import style -from .utils import _detect_program_name -from .utils import _expand_args -from .utils import echo -from .utils import make_default_short_help -from .utils import make_str -from .utils import PacifyFlushWrapper - -if t.TYPE_CHECKING: - import typing_extensions as te - from .shell_completion import CompletionItem - -F = t.TypeVar("F", bound=t.Callable[..., t.Any]) -V = t.TypeVar("V") - - -def _complete_visible_commands( - ctx: "Context", incomplete: str -) -> t.Iterator[t.Tuple[str, "Command"]]: - """List all the subcommands of a group that start with the - incomplete value and aren't hidden. - - :param ctx: Invocation context for the group. - :param incomplete: Value being completed. May be empty. - """ - multi = t.cast(MultiCommand, ctx.command) - - for name in multi.list_commands(ctx): - if name.startswith(incomplete): - command = multi.get_command(ctx, name) - - if command is not None and not command.hidden: - yield name, command - - -def _check_multicommand( - base_command: "MultiCommand", cmd_name: str, cmd: "Command", register: bool = False -) -> None: - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = ( - "It is not possible to add multi commands as children to" - " another multi command that is in chain mode." - ) - else: - hint = ( - "Found a multi command as subcommand to a multi command" - " that is in chain mode. This is not supported." - ) - raise RuntimeError( - f"{hint}. Command {base_command.name!r} is set to chain and" - f" {cmd_name!r} was added as a subcommand but it in itself is a" - f" multi command. ({cmd_name!r} is a {type(cmd).__name__}" - f" within a chained {type(base_command).__name__} named" - f" {base_command.name!r})." - ) - - -def batch(iterable: t.Iterable[V], batch_size: int) -> t.List[t.Tuple[V, ...]]: - return list(zip(*repeat(iter(iterable), batch_size))) - - -@contextmanager -def augment_usage_errors( - ctx: "Context", param: t.Optional["Parameter"] = None -) -> t.Iterator[None]: - """Context manager that attaches extra information to exceptions.""" - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing( - invocation_order: t.Sequence["Parameter"], - declaration_order: t.Sequence["Parameter"], -) -> t.List["Parameter"]: - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - - def sort_key(item: "Parameter") -> t.Tuple[bool, float]: - try: - idx: float = invocation_order.index(item) - except ValueError: - idx = float("inf") - - return not item.is_eager, idx - - return sorted(declaration_order, key=sort_key) - - -class ParameterSource(enum.Enum): - """This is an :class:`~enum.Enum` that indicates the source of a - parameter's value. - - Use :meth:`click.Context.get_parameter_source` to get the - source for a parameter by name. - - .. versionchanged:: 8.0 - Use :class:`~enum.Enum` and drop the ``validate`` method. - - .. versionchanged:: 8.0 - Added the ``PROMPT`` value. - """ - - COMMANDLINE = enum.auto() - """The value was provided by the command line args.""" - ENVIRONMENT = enum.auto() - """The value was provided with an environment variable.""" - DEFAULT = enum.auto() - """Used the default specified by the parameter.""" - DEFAULT_MAP = enum.auto() - """Used a default provided by :attr:`Context.default_map`.""" - PROMPT = enum.auto() - """Used a prompt to confirm a default or provide a value.""" - - -class Context: - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. Default values will also be - ignored. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - :param show_default: Show the default value for commands. If this - value is not set, it defaults to the value from the parent - context. ``Command.show_default`` overrides this default for the - specific command. - - .. versionchanged:: 8.1 - The ``show_default`` parameter is overridden by - ``Command.show_default``, instead of the other way around. - - .. versionchanged:: 8.0 - The ``show_default`` parameter defaults to the value from the - parent context. - - .. versionchanged:: 7.1 - Added the ``show_default`` parameter. - - .. versionchanged:: 4.0 - Added the ``color``, ``ignore_unknown_options``, and - ``max_content_width`` parameters. - - .. versionchanged:: 3.0 - Added the ``allow_extra_args`` and ``allow_interspersed_args`` - parameters. - - .. versionchanged:: 2.0 - Added the ``resilient_parsing``, ``help_option_names``, and - ``token_normalize_func`` parameters. - """ - - #: The formatter class to create with :meth:`make_formatter`. - #: - #: .. versionadded:: 8.0 - formatter_class: t.Type["HelpFormatter"] = HelpFormatter - - def __init__( - self, - command: "Command", - parent: t.Optional["Context"] = None, - info_name: t.Optional[str] = None, - obj: t.Optional[t.Any] = None, - auto_envvar_prefix: t.Optional[str] = None, - default_map: t.Optional[t.Dict[str, t.Any]] = None, - terminal_width: t.Optional[int] = None, - max_content_width: t.Optional[int] = None, - resilient_parsing: bool = False, - allow_extra_args: t.Optional[bool] = None, - allow_interspersed_args: t.Optional[bool] = None, - ignore_unknown_options: t.Optional[bool] = None, - help_option_names: t.Optional[t.List[str]] = None, - token_normalize_func: t.Optional[t.Callable[[str], str]] = None, - color: t.Optional[bool] = None, - show_default: t.Optional[bool] = None, - ) -> None: - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: Map of parameter names to their parsed values. Parameters - #: with ``expose_value=False`` are not stored. - self.params: t.Dict[str, t.Any] = {} - #: the leftover arguments. - self.args: t.List[str] = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args: t.List[str] = [] - #: the collected prefixes of the command's options. - self._opt_prefixes: t.Set[str] = set(parent._opt_prefixes) if parent else set() - - if obj is None and parent is not None: - obj = parent.obj - - #: the user object stored. - self.obj: t.Any = obj - self._meta: t.Dict[str, t.Any] = getattr(parent, "meta", {}) - - #: A dictionary (-like object) with defaults for parameters. - if ( - default_map is None - and info_name is not None - and parent is not None - and parent.default_map is not None - ): - default_map = parent.default_map.get(info_name) - - self.default_map: t.Optional[t.Dict[str, t.Any]] = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`result_callback`. - self.invoked_subcommand: t.Optional[str] = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - - #: The width of the terminal (None is autodetection). - self.terminal_width: t.Optional[int] = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width: t.Optional[int] = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args: bool = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options: bool = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ["--help"] - - #: The names for the help options. - self.help_option_names: t.List[str] = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func: t.Optional[ - t.Callable[[str], str] - ] = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures and default values - #: will be ignored. Useful for completion. - self.resilient_parsing: bool = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if ( - parent is not None - and parent.auto_envvar_prefix is not None - and self.info_name is not None - ): - auto_envvar_prefix = ( - f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" - ) - else: - auto_envvar_prefix = auto_envvar_prefix.upper() - - if auto_envvar_prefix is not None: - auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") - - self.auto_envvar_prefix: t.Optional[str] = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color: t.Optional[bool] = color - - if show_default is None and parent is not None: - show_default = parent.show_default - - #: Show option default values when formatting help text. - self.show_default: t.Optional[bool] = show_default - - self._close_callbacks: t.List[t.Callable[[], t.Any]] = [] - self._depth = 0 - self._parameter_source: t.Dict[str, ParameterSource] = {} - self._exit_stack = ExitStack() - - def to_info_dict(self) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. This traverses the entire CLI - structure. - - .. code-block:: python - - with Context(cli) as ctx: - info = ctx.to_info_dict() - - .. versionadded:: 8.0 - """ - return { - "command": self.command.to_info_dict(self), - "info_name": self.info_name, - "allow_extra_args": self.allow_extra_args, - "allow_interspersed_args": self.allow_interspersed_args, - "ignore_unknown_options": self.ignore_unknown_options, - "auto_envvar_prefix": self.auto_envvar_prefix, - } - - def __enter__(self) -> "Context": - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): # type: ignore - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup: bool = True) -> t.Iterator["Context"]: - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self) -> t.Dict[str, t.Any]: - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utilities can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = f'{__name__}.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self) -> HelpFormatter: - """Creates the :class:`~click.HelpFormatter` for the help and - usage output. - - To quickly customize the formatter class used without overriding - this method, set the :attr:`formatter_class` attribute. - - .. versionchanged:: 8.0 - Added the :attr:`formatter_class` attribute. - """ - return self.formatter_class( - width=self.terminal_width, max_width=self.max_content_width - ) - - def with_resource(self, context_manager: t.ContextManager[V]) -> V: - """Register a resource as if it were used in a ``with`` - statement. The resource will be cleaned up when the context is - popped. - - Uses :meth:`contextlib.ExitStack.enter_context`. It calls the - resource's ``__enter__()`` method and returns the result. When - the context is popped, it closes the stack, which calls the - resource's ``__exit__()`` method. - - To register a cleanup function for something that isn't a - context manager, use :meth:`call_on_close`. Or use something - from :mod:`contextlib` to turn it into a context manager first. - - .. code-block:: python - - @click.group() - @click.option("--name") - @click.pass_context - def cli(ctx): - ctx.obj = ctx.with_resource(connect_db(name)) - - :param context_manager: The context manager to enter. - :return: Whatever ``context_manager.__enter__()`` returns. - - .. versionadded:: 8.0 - """ - return self._exit_stack.enter_context(context_manager) - - def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: - """Register a function to be called when the context tears down. - - This can be used to close resources opened during the script - execution. Resources that support Python's context manager - protocol which would be used in a ``with`` statement should be - registered with :meth:`with_resource` instead. - - :param f: The function to execute on teardown. - """ - return self._exit_stack.callback(f) - - def close(self) -> None: - """Invoke all close callbacks registered with - :meth:`call_on_close`, and exit all context managers entered - with :meth:`with_resource`. - """ - self._exit_stack.close() - # In case the context is reused, create a new exit stack. - self._exit_stack = ExitStack() - - @property - def command_path(self) -> str: - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = "" - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - parent_command_path = [self.parent.command_path] - - if isinstance(self.parent.command, Command): - for param in self.parent.command.get_params(self): - parent_command_path.extend(param.get_usage_pieces(self)) - - rv = f"{' '.join(parent_command_path)} {rv}" - return rv.lstrip() - - def find_root(self) -> "Context": - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type: t.Type[V]) -> t.Optional[V]: - """Finds the closest object of a given type.""" - node: t.Optional["Context"] = self - - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - - node = node.parent - - return None - - def ensure_object(self, object_type: t.Type[V]) -> V: - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - @t.overload - def lookup_default( - self, name: str, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def lookup_default( - self, name: str, call: "te.Literal[False]" = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def lookup_default(self, name: str, call: bool = True) -> t.Optional[t.Any]: - """Get the default for a parameter from :attr:`default_map`. - - :param name: Name of the parameter. - :param call: If the default is a callable, call it. Disable to - return the callable instead. - - .. versionchanged:: 8.0 - Added the ``call`` parameter. - """ - if self.default_map is not None: - value = self.default_map.get(name) - - if call and callable(value): - return value() - - return value - - return None - - def fail(self, message: str) -> "te.NoReturn": - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self) -> "te.NoReturn": - """Aborts the script.""" - raise Abort() - - def exit(self, code: int = 0) -> "te.NoReturn": - """Exits the application with a given exit code.""" - raise Exit(code) - - def get_usage(self) -> str: - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self) -> str: - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def _make_sub_context(self, command: "Command") -> "Context": - """Create a new context of the same type as this context, but - for a new command. - - :meta private: - """ - return type(self)(command, info_name=command.name, parent=self) - - def invoke( - __self, # noqa: B902 - __callback: t.Union["Command", t.Callable[..., t.Any]], - *args: t.Any, - **kwargs: t.Any, - ) -> t.Any: - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - - .. versionchanged:: 8.0 - All ``kwargs`` are tracked in :attr:`params` so they will be - passed if :meth:`forward` is called at multiple levels. - """ - if isinstance(__callback, Command): - other_cmd = __callback - - if other_cmd.callback is None: - raise TypeError( - "The given command does not have a callback that can be invoked." - ) - else: - __callback = other_cmd.callback - - ctx = __self._make_sub_context(other_cmd) - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.type_cast_value( # type: ignore - ctx, param.get_default(ctx) - ) - - # Track all kwargs as params, so that forward() will pass - # them on in subsequent calls. - ctx.params.update(kwargs) - else: - ctx = __self - - with augment_usage_errors(__self): - with ctx: - return __callback(*args, **kwargs) - - def forward( - __self, __cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902 - ) -> t.Any: - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - - .. versionchanged:: 8.0 - All ``kwargs`` are tracked in :attr:`params` so they will be - passed if ``forward`` is called at multiple levels. - """ - # Can only forward to other commands, not direct callbacks. - if not isinstance(__cmd, Command): - raise TypeError("Callback is not a command.") - - for param in __self.params: - if param not in kwargs: - kwargs[param] = __self.params[param] - - return __self.invoke(__cmd, *args, **kwargs) - - def set_parameter_source(self, name: str, source: ParameterSource) -> None: - """Set the source of a parameter. This indicates the location - from which the value of the parameter was obtained. - - :param name: The name of the parameter. - :param source: A member of :class:`~click.core.ParameterSource`. - """ - self._parameter_source[name] = source - - def get_parameter_source(self, name: str) -> t.Optional[ParameterSource]: - """Get the source of a parameter. This indicates the location - from which the value of the parameter was obtained. - - This can be useful for determining when a user specified a value - on the command line that is the same as the default value. It - will be :attr:`~click.core.ParameterSource.DEFAULT` only if the - value was actually taken from the default. - - :param name: The name of the parameter. - :rtype: ParameterSource - - .. versionchanged:: 8.0 - Returns ``None`` if the parameter was not provided from any - source. - """ - return self._parameter_source.get(name) - - -class BaseCommand: - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - - #: The context class to create with :meth:`make_context`. - #: - #: .. versionadded:: 8.0 - context_class: t.Type[Context] = Context - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__( - self, - name: t.Optional[str], - context_settings: t.Optional[t.Dict[str, t.Any]] = None, - ) -> None: - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - - if context_settings is None: - context_settings = {} - - #: an optional dictionary with defaults passed to the context. - self.context_settings: t.Dict[str, t.Any] = context_settings - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. This traverses the entire structure - below this command. - - Use :meth:`click.Context.to_info_dict` to traverse the entire - CLI structure. - - :param ctx: A :class:`Context` representing this command. - - .. versionadded:: 8.0 - """ - return {"name": self.name} - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.name}>" - - def get_usage(self, ctx: Context) -> str: - raise NotImplementedError("Base commands cannot get usage") - - def get_help(self, ctx: Context) -> str: - raise NotImplementedError("Base commands cannot get help") - - def make_context( - self, - info_name: t.Optional[str], - args: t.List[str], - parent: t.Optional[Context] = None, - **extra: t.Any, - ) -> Context: - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - To quickly customize the context class used without overriding - this method, set the :attr:`context_class` attribute. - - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the command. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - - .. versionchanged:: 8.0 - Added the :attr:`context_class` attribute. - """ - for key, value in self.context_settings.items(): - if key not in extra: - extra[key] = value - - ctx = self.context_class( - self, info_name=info_name, parent=parent, **extra # type: ignore - ) - - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError("Base commands do not know how to parse arguments.") - - def invoke(self, ctx: Context) -> t.Any: - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError("Base commands are not invokable by default") - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of chained multi-commands. - - Any command could be part of a chained multi-command, so sibling - commands are valid at any point during command completion. Other - command classes will return more completions. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results: t.List["CompletionItem"] = [] - - while ctx.parent is not None: - ctx = ctx.parent - - if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - results.extend( - CompletionItem(name, help=command.get_short_help_str()) - for name, command in _complete_visible_commands(ctx, incomplete) - if name not in ctx.protected_args - ) - - return results - - @t.overload - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: "te.Literal[True]" = True, - **extra: t.Any, - ) -> "te.NoReturn": - ... - - @t.overload - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: bool = ..., - **extra: t.Any, - ) -> t.Any: - ... - - def main( - self, - args: t.Optional[t.Sequence[str]] = None, - prog_name: t.Optional[str] = None, - complete_var: t.Optional[str] = None, - standalone_mode: bool = True, - windows_expand_args: bool = True, - **extra: t.Any, - ) -> t.Any: - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog_name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param windows_expand_args: Expand glob patterns, user dir, and - env vars in command line args on Windows. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - - .. versionchanged:: 8.0.1 - Added the ``windows_expand_args`` parameter to allow - disabling command line arg expansion on Windows. - - .. versionchanged:: 8.0 - When taking arguments from ``sys.argv`` on Windows, glob - patterns, user dir, and env vars are expanded. - - .. versionchanged:: 3.0 - Added the ``standalone_mode`` parameter. - """ - if args is None: - args = sys.argv[1:] - - if os.name == "nt" and windows_expand_args: - args = _expand_args(args) - else: - args = list(args) - - if prog_name is None: - prog_name = _detect_program_name() - - # Process shell completion requests and exit early. - self._main_shell_completion(extra, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - # it's not safe to `ctx.exit(rv)` here! - # note that `rv` may actually contain data like "1" which - # has obvious effects - # more subtle case: `rv=[None, None]` can come out of - # chained commands which all returned `None` -- so it's not - # even always obvious that `rv` indicates success/failure - # by its truthiness/falsiness - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() from None - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except OSError as e: - if e.errno == errno.EPIPE: - sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) - sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) - sys.exit(1) - else: - raise - except Exit as e: - if standalone_mode: - sys.exit(e.exit_code) - else: - # in non-standalone mode, return the exit code - # note that this is only reached if `self.invoke` above raises - # an Exit explicitly -- thus bypassing the check there which - # would return its result - # the results of non-standalone execution may therefore be - # somewhat ambiguous: if there are codepaths which lead to - # `ctx.exit(1)` and to `return 1`, the caller won't be able to - # tell the difference between the two - return e.exit_code - except Abort: - if not standalone_mode: - raise - echo(_("Aborted!"), file=sys.stderr) - sys.exit(1) - - def _main_shell_completion( - self, - ctx_args: t.Dict[str, t.Any], - prog_name: str, - complete_var: t.Optional[str] = None, - ) -> None: - """Check if the shell is asking for tab completion, process - that, then exit early. Called from :meth:`main` before the - program is invoked. - - :param prog_name: Name of the executable in the shell. - :param complete_var: Name of the environment variable that holds - the completion instruction. Defaults to - ``_{PROG_NAME}_COMPLETE``. - """ - if complete_var is None: - complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() - - instruction = os.environ.get(complete_var) - - if not instruction: - return - - from .shell_completion import shell_complete - - rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) - sys.exit(rv) - - def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is disabled by default. - If enabled this will add ``--help`` as argument - if no arguments are passed - :param hidden: hide this command from help outputs. - - :param deprecated: issues a message indicating that - the command is deprecated. - - .. versionchanged:: 8.1 - ``help``, ``epilog``, and ``short_help`` are stored unprocessed, - all formatting is done when outputting help text, not at init, - and is done even if not using the ``@command`` decorator. - - .. versionchanged:: 8.0 - Added a ``repr`` showing the command name. - - .. versionchanged:: 7.1 - Added the ``no_args_is_help`` parameter. - - .. versionchanged:: 2.0 - Added the ``context_settings`` parameter. - """ - - def __init__( - self, - name: t.Optional[str], - context_settings: t.Optional[t.Dict[str, t.Any]] = None, - callback: t.Optional[t.Callable[..., t.Any]] = None, - params: t.Optional[t.List["Parameter"]] = None, - help: t.Optional[str] = None, - epilog: t.Optional[str] = None, - short_help: t.Optional[str] = None, - options_metavar: t.Optional[str] = "[OPTIONS]", - add_help_option: bool = True, - no_args_is_help: bool = False, - hidden: bool = False, - deprecated: bool = False, - ) -> None: - super().__init__(name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params: t.List["Parameter"] = params or [] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - self.short_help = short_help - self.add_help_option = add_help_option - self.no_args_is_help = no_args_is_help - self.hidden = hidden - self.deprecated = deprecated - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict(ctx) - info_dict.update( - params=[param.to_info_dict() for param in self.get_params(ctx)], - help=self.help, - epilog=self.epilog, - short_help=self.short_help, - hidden=self.hidden, - deprecated=self.deprecated, - ) - return info_dict - - def get_usage(self, ctx: Context) -> str: - """Formats the usage line into a string and returns it. - - Calls :meth:`format_usage` internally. - """ - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_params(self, ctx: Context) -> t.List["Parameter"]: - rv = self.params - help_option = self.get_help_option(ctx) - - if help_option is not None: - rv = [*rv, help_option] - - return rv - - def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the usage line into the formatter. - - This is a low-level method called by :meth:`get_usage`. - """ - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, " ".join(pieces)) - - def collect_usage_pieces(self, ctx: Context) -> t.List[str]: - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] if self.options_metavar else [] - - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - - return rv - - def get_help_option_names(self, ctx: Context) -> t.List[str]: - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return list(all_names) - - def get_help_option(self, ctx: Context) -> t.Optional["Option"]: - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - - if not help_options or not self.add_help_option: - return None - - def show_help(ctx: Context, param: "Parameter", value: str) -> None: - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - return Option( - help_options, - is_flag=True, - is_eager=True, - expose_value=False, - callback=show_help, - help=_("Show this message and exit."), - ) - - def make_parser(self, ctx: Context) -> OptionParser: - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx: Context) -> str: - """Formats the help into a string and returns it. - - Calls :meth:`format_help` internally. - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_short_help_str(self, limit: int = 45) -> str: - """Gets short help for the command or makes it by shortening the - long help string. - """ - if self.short_help: - text = inspect.cleandoc(self.short_help) - elif self.help: - text = make_default_short_help(self.help, limit) - else: - text = "" - - if self.deprecated: - text = _("(Deprecated) {text}").format(text=text) - - return text.strip() - - def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the help into the formatter if it exists. - - This is a low-level method called by :meth:`get_help`. - - This calls the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the help text to the formatter if it exists.""" - text = self.help if self.help is not None else "" - - if self.deprecated: - text = _("(Deprecated) {text}").format(text=text) - - if text: - text = inspect.cleandoc(text).partition("\f")[0] - formatter.write_paragraph() - - with formatter.indentation(): - formatter.write_text(text) - - def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section(_("Options")): - formatter.write_dl(opts) - - def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - epilog = inspect.cleandoc(self.epilog) - formatter.write_paragraph() - - with formatter.indentation(): - formatter.write_text(epilog) - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing(param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail( - ngettext( - "Got unexpected extra argument ({args})", - "Got unexpected extra arguments ({args})", - len(args), - ).format(args=" ".join(map(str, args))) - ) - - ctx.args = args - ctx._opt_prefixes.update(parser._opt_prefixes) - return args - - def invoke(self, ctx: Context) -> t.Any: - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - if self.deprecated: - message = _( - "DeprecationWarning: The command {name!r} is deprecated." - ).format(name=self.name) - echo(style(message, fg="red"), err=True) - - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of options and chained multi-commands. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results: t.List["CompletionItem"] = [] - - if incomplete and not incomplete[0].isalnum(): - for param in self.get_params(ctx): - if ( - not isinstance(param, Option) - or param.hidden - or ( - not param.multiple - and ctx.get_parameter_source(param.name) # type: ignore - is ParameterSource.COMMANDLINE - ) - ): - continue - - results.extend( - CompletionItem(name, help=param.help) - for name in [*param.opts, *param.secondary_opts] - if name.startswith(incomplete) - ) - - results.extend(super().shell_complete(ctx, incomplete)) - return results - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: The result callback to attach to this multi - command. This can be set or changed later with the - :meth:`result_callback` decorator. - """ - - allow_extra_args = True - allow_interspersed_args = False - - def __init__( - self, - name: t.Optional[str] = None, - invoke_without_command: bool = False, - no_args_is_help: t.Optional[bool] = None, - subcommand_metavar: t.Optional[str] = None, - chain: bool = False, - result_callback: t.Optional[t.Callable[..., t.Any]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - - if subcommand_metavar is None: - if chain: - subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." - else: - subcommand_metavar = "COMMAND [ARGS]..." - - self.subcommand_metavar = subcommand_metavar - self.chain = chain - # The result callback that is stored. This can be set or - # overridden with the :func:`result_callback` decorator. - self._result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError( - "Multi commands in chain mode cannot have" - " optional arguments." - ) - - def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict(ctx) - commands = {} - - for name in self.list_commands(ctx): - command = self.get_command(ctx, name) - - if command is None: - continue - - sub_ctx = ctx._make_sub_context(command) - - with sub_ctx.scope(cleanup=False): - commands[name] = command.to_info_dict(sub_ctx) - - info_dict.update(commands=commands, chain=self.chain) - return info_dict - - def collect_usage_pieces(self, ctx: Context) -> t.List[str]: - rv = super().collect_usage_pieces(ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: - super().format_options(ctx, formatter) - self.format_commands(ctx, formatter) - - def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: - """Adds a result callback to the command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.result_callback() - def process_result(result, input): - return result + input - - :param replace: if set to `True` an already existing result - callback will be removed. - - .. versionchanged:: 8.0 - Renamed from ``resultcallback``. - - .. versionadded:: 3.0 - """ - - def decorator(f: F) -> F: - old_callback = self._result_callback - - if old_callback is None or replace: - self._result_callback = f - return f - - def function(__value, *args, **kwargs): # type: ignore - inner = old_callback(__value, *args, **kwargs) # type: ignore - return f(inner, *args, **kwargs) - - self._result_callback = rv = update_wrapper(t.cast(F, function), f) - return rv - - return decorator - - def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: - """Extra format methods for multi methods that adds all the commands - after the options. - """ - commands = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - if cmd.hidden: - continue - - commands.append((subcommand, cmd)) - - # allow for 3 times the default spacing - if len(commands): - limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) - - rows = [] - for subcommand, cmd in commands: - help = cmd.get_short_help_str(limit) - rows.append((subcommand, help)) - - if rows: - with formatter.section(_("Commands")): - formatter.write_dl(rows) - - def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = super().parse_args(ctx, args) - - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx: Context) -> t.Any: - def _process_result(value: t.Any) -> t.Any: - if self._result_callback is not None: - value = ctx.invoke(self._result_callback, value, **ctx.params) - return value - - if not ctx.protected_args: - if self.invoke_without_command: - # No subcommand was invoked, so the result callback is - # invoked with the group return value for regular - # groups, or an empty list for chained groups. - with ctx: - rv = super().invoke(ctx) - return _process_result([] if self.chain else rv) - ctx.fail(_("Missing command.")) - - # Fetch args back out - args = [*ctx.protected_args, *ctx.args] - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - assert cmd is not None - ctx.invoked_subcommand = cmd_name - super().invoke(ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = "*" if args else None - super().invoke(ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - assert cmd is not None - sub_ctx = cmd.make_context( - cmd_name, - args, - parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - ) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command( - self, ctx: Context, args: t.List[str] - ) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]: - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None and not ctx.resilient_parsing: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) - return cmd_name if cmd else None, cmd, args[1:] - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError - - def list_commands(self, ctx: Context) -> t.List[str]: - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. Looks - at the names of options, subcommands, and chained - multi-commands. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - from click.shell_completion import CompletionItem - - results = [ - CompletionItem(name, help=command.get_short_help_str()) - for name, command in _complete_visible_commands(ctx, incomplete) - ] - results.extend(super().shell_complete(ctx, incomplete)) - return results - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is - the most common way to implement nesting in Click. - - :param name: The name of the group command. - :param commands: A dict mapping names to :class:`Command` objects. - Can also be a list of :class:`Command`, which will use - :attr:`Command.name` to create the dict. - :param attrs: Other command arguments described in - :class:`MultiCommand`, :class:`Command`, and - :class:`BaseCommand`. - - .. versionchanged:: 8.0 - The ``commmands`` argument can be a list of command objects. - """ - - #: If set, this is used by the group's :meth:`command` decorator - #: as the default :class:`Command` class. This is useful to make all - #: subcommands use a custom command class. - #: - #: .. versionadded:: 8.0 - command_class: t.Optional[t.Type[Command]] = None - - #: If set, this is used by the group's :meth:`group` decorator - #: as the default :class:`Group` class. This is useful to make all - #: subgroups use a custom group class. - #: - #: If set to the special value :class:`type` (literally - #: ``group_class = type``), this group's class will be used as the - #: default class. This makes a custom group class continue to make - #: custom groups. - #: - #: .. versionadded:: 8.0 - group_class: t.Optional[t.Union[t.Type["Group"], t.Type[type]]] = None - # Literal[type] isn't valid, so use Type[type] - - def __init__( - self, - name: t.Optional[str] = None, - commands: t.Optional[t.Union[t.Dict[str, Command], t.Sequence[Command]]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - - if commands is None: - commands = {} - elif isinstance(commands, abc.Sequence): - commands = {c.name: c for c in commands if c.name is not None} - - #: The registered subcommands by their exported names. - self.commands: t.Dict[str, Command] = commands - - def add_command(self, cmd: Command, name: t.Optional[str] = None) -> None: - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError("Command has no name.") - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - @t.overload - def command(self, __func: t.Callable[..., t.Any]) -> Command: - ... - - @t.overload - def command( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Callable[[t.Callable[..., t.Any]], Command]: - ... - - def command( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], Command], Command]: - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` and - immediately registers the created command with this group by - calling :meth:`add_command`. - - To customize the command class used, set the - :attr:`command_class` attribute. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - - .. versionchanged:: 8.0 - Added the :attr:`command_class` attribute. - """ - from .decorators import command - - if self.command_class and kwargs.get("cls") is None: - kwargs["cls"] = self.command_class - - func: t.Optional[t.Callable] = None - - if args and callable(args[0]): - assert ( - len(args) == 1 and not kwargs - ), "Use 'command(**kwargs)(callable)' to provide arguments." - (func,) = args - args = () - - def decorator(f: t.Callable[..., t.Any]) -> Command: - cmd: Command = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - if func is not None: - return decorator(func) - - return decorator - - @t.overload - def group(self, __func: t.Callable[..., t.Any]) -> "Group": - ... - - @t.overload - def group( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Callable[[t.Callable[..., t.Any]], "Group"]: - ... - - def group( - self, *args: t.Any, **kwargs: t.Any - ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], "Group"], "Group"]: - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` and - immediately registers the created group with this group by - calling :meth:`add_command`. - - To customize the group class used, set the :attr:`group_class` - attribute. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - - .. versionchanged:: 8.0 - Added the :attr:`group_class` attribute. - """ - from .decorators import group - - func: t.Optional[t.Callable] = None - - if args and callable(args[0]): - assert ( - len(args) == 1 and not kwargs - ), "Use 'group(**kwargs)(callable)' to provide arguments." - (func,) = args - args = () - - if self.group_class is not None and kwargs.get("cls") is None: - if self.group_class is type: - kwargs["cls"] = type(self) - else: - kwargs["cls"] = self.group_class - - def decorator(f: t.Callable[..., t.Any]) -> "Group": - cmd: Group = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - if func is not None: - return decorator(func) - - return decorator - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - return self.commands.get(cmd_name) - - def list_commands(self, ctx: Context) -> t.List[str]: - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__( - self, - name: t.Optional[str] = None, - sources: t.Optional[t.List[MultiCommand]] = None, - **attrs: t.Any, - ) -> None: - super().__init__(name, **attrs) - #: The list of registered multi commands. - self.sources: t.List[MultiCommand] = sources or [] - - def add_source(self, multi_cmd: MultiCommand) -> None: - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - - return rv - - return None - - def list_commands(self, ctx: Context) -> t.List[str]: - rv: t.Set[str] = set() - - for source in self.sources: - rv.update(source.list_commands(ctx)) - - return sorted(rv) - - -def _check_iter(value: t.Any) -> t.Iterator[t.Any]: - """Check if the value is iterable but not a string. Raises a type - error, or return an iterator over the value. - """ - if isinstance(value, str): - raise TypeError - - return iter(value) - - -class Parameter: - r"""A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: A function to further process or validate the value - after type conversion. It is called as ``f(ctx, param, value)`` - and must return the value. It is called for all sources, - including prompts. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). If ``nargs=-1``, all remaining - parameters are collected. - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - :param shell_complete: A function that returns custom shell - completions. Used instead of the param's type completion if - given. Takes ``ctx, param, incomplete`` and must return a list - of :class:`~click.shell_completion.CompletionItem` or a list of - strings. - - .. versionchanged:: 8.0 - ``process_value`` validates required parameters and bounded - ``nargs``, and invokes the parameter callback before returning - the value. This allows the callback to validate prompts. - ``full_process_value`` is removed. - - .. versionchanged:: 8.0 - ``autocompletion`` is renamed to ``shell_complete`` and has new - semantics described above. The old name is deprecated and will - be removed in 8.1, until then it will be wrapped to match the - new requirements. - - .. versionchanged:: 8.0 - For ``multiple=True, nargs>1``, the default must be a list of - tuples. - - .. versionchanged:: 8.0 - Setting a default is no longer required for ``nargs>1``, it will - default to ``None``. ``multiple=True`` or ``nargs=-1`` will - default to ``()``. - - .. versionchanged:: 7.1 - Empty environment variables are ignored rather than taking the - empty string value. This makes it possible for scripts to clear - variables if they can't unset them. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. The old callback format will still work, but it will - raise a warning to give you a chance to migrate the code easier. - """ - - param_type_name = "parameter" - - def __init__( - self, - param_decls: t.Optional[t.Sequence[str]] = None, - type: t.Optional[t.Union[types.ParamType, t.Any]] = None, - required: bool = False, - default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None, - callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None, - nargs: t.Optional[int] = None, - multiple: bool = False, - metavar: t.Optional[str] = None, - expose_value: bool = True, - is_eager: bool = False, - envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None, - shell_complete: t.Optional[ - t.Callable[ - [Context, "Parameter", str], - t.Union[t.List["CompletionItem"], t.List[str]], - ] - ] = None, - ) -> None: - self.name, self.opts, self.secondary_opts = self._parse_decls( - param_decls or (), expose_value - ) - self.type = types.convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = multiple - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - self._custom_shell_complete = shell_complete - - if __debug__: - if self.type.is_composite and nargs != self.type.arity: - raise ValueError( - f"'nargs' must be {self.type.arity} (or None) for" - f" type {self.type!r}, but it was {nargs}." - ) - - # Skip no default or callable default. - check_default = default if not callable(default) else None - - if check_default is not None: - if multiple: - try: - # Only check the first value against nargs. - check_default = next(_check_iter(check_default), None) - except TypeError: - raise ValueError( - "'default' must be a list when 'multiple' is true." - ) from None - - # Can be None for multiple with empty default. - if nargs != 1 and check_default is not None: - try: - _check_iter(check_default) - except TypeError: - if multiple: - message = ( - "'default' must be a list of lists when 'multiple' is" - " true and 'nargs' != 1." - ) - else: - message = "'default' must be a list when 'nargs' != 1." - - raise ValueError(message) from None - - if nargs > 1 and len(check_default) != nargs: - subject = "item length" if multiple else "length" - raise ValueError( - f"'default' {subject} must match nargs={nargs}." - ) - - def to_info_dict(self) -> t.Dict[str, t.Any]: - """Gather information that could be useful for a tool generating - user-facing documentation. - - Use :meth:`click.Context.to_info_dict` to traverse the entire - CLI structure. - - .. versionadded:: 8.0 - """ - return { - "name": self.name, - "param_type_name": self.param_type_name, - "opts": self.opts, - "secondary_opts": self.secondary_opts, - "type": self.type.to_info_dict(), - "required": self.required, - "nargs": self.nargs, - "multiple": self.multiple, - "default": self.default, - "envvar": self.envvar, - } - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.name}>" - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - raise NotImplementedError() - - @property - def human_readable_name(self) -> str: - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name # type: ignore - - def make_metavar(self) -> str: - if self.metavar is not None: - return self.metavar - - metavar = self.type.get_metavar(self) - - if metavar is None: - metavar = self.type.name.upper() - - if self.nargs != 1: - metavar += "..." - - return metavar - - @t.overload - def get_default( - self, ctx: Context, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def get_default( - self, ctx: Context, call: bool = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def get_default( - self, ctx: Context, call: bool = True - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - """Get the default for the parameter. Tries - :meth:`Context.lookup_default` first, then the local default. - - :param ctx: Current context. - :param call: If the default is a callable, call it. Disable to - return the callable instead. - - .. versionchanged:: 8.0.2 - Type casting is no longer performed when getting a default. - - .. versionchanged:: 8.0.1 - Type casting can fail in resilient parsing mode. Invalid - defaults will not prevent showing help text. - - .. versionchanged:: 8.0 - Looks at ``ctx.default_map`` first. - - .. versionchanged:: 8.0 - Added the ``call`` parameter. - """ - value = ctx.lookup_default(self.name, call=False) # type: ignore - - if value is None: - value = self.default - - if call and callable(value): - value = value() - - return value - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - raise NotImplementedError() - - def consume_value( - self, ctx: Context, opts: t.Mapping[str, t.Any] - ) -> t.Tuple[t.Any, ParameterSource]: - value = opts.get(self.name) # type: ignore - source = ParameterSource.COMMANDLINE - - if value is None: - value = self.value_from_envvar(ctx) - source = ParameterSource.ENVIRONMENT - - if value is None: - value = ctx.lookup_default(self.name) # type: ignore - source = ParameterSource.DEFAULT_MAP - - if value is None: - value = self.get_default(ctx) - source = ParameterSource.DEFAULT - - return value, source - - def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: - """Convert and validate a value against the option's - :attr:`type`, :attr:`multiple`, and :attr:`nargs`. - """ - if value is None: - return () if self.multiple or self.nargs == -1 else None - - def check_iter(value: t.Any) -> t.Iterator: - try: - return _check_iter(value) - except TypeError: - # This should only happen when passing in args manually, - # the parser should construct an iterable when parsing - # the command line. - raise BadParameter( - _("Value must be an iterable."), ctx=ctx, param=self - ) from None - - if self.nargs == 1 or self.type.is_composite: - convert: t.Callable[[t.Any], t.Any] = partial( - self.type, param=self, ctx=ctx - ) - elif self.nargs == -1: - - def convert(value: t.Any) -> t.Tuple: - return tuple(self.type(x, self, ctx) for x in check_iter(value)) - - else: # nargs > 1 - - def convert(value: t.Any) -> t.Tuple: - value = tuple(check_iter(value)) - - if len(value) != self.nargs: - raise BadParameter( - ngettext( - "Takes {nargs} values but 1 was given.", - "Takes {nargs} values but {len} were given.", - len(value), - ).format(nargs=self.nargs, len=len(value)), - ctx=ctx, - param=self, - ) - - return tuple(self.type(x, self, ctx) for x in value) - - if self.multiple: - return tuple(convert(x) for x in check_iter(value)) - - return convert(value) - - def value_is_missing(self, value: t.Any) -> bool: - if value is None: - return True - - if (self.nargs != 1 or self.multiple) and value == (): - return True - - return False - - def process_value(self, ctx: Context, value: t.Any) -> t.Any: - value = self.type_cast_value(ctx, value) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - if self.callback is not None: - value = self.callback(ctx, self, value) - - return value - - def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: - if self.envvar is None: - return None - - if isinstance(self.envvar, str): - rv = os.environ.get(self.envvar) - - if rv: - return rv - else: - for envvar in self.envvar: - rv = os.environ.get(envvar) - - if rv: - return rv - - return None - - def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: - rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) - - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - - return rv - - def handle_parse_result( - self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str] - ) -> t.Tuple[t.Any, t.List[str]]: - with augment_usage_errors(ctx, param=self): - value, source = self.consume_value(ctx, opts) - ctx.set_parameter_source(self.name, source) # type: ignore - - try: - value = self.process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - - value = None - - if self.expose_value: - ctx.params[self.name] = value # type: ignore - - return value, args - - def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: - pass - - def get_usage_pieces(self, ctx: Context) -> t.List[str]: - return [] - - def get_error_hint(self, ctx: Context) -> str: - """Get a stringified version of the param for use in error messages to - indicate which param caused the error. - """ - hint_list = self.opts or [self.human_readable_name] - return " / ".join(f"'{x}'" for x in hint_list) - - def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: - """Return a list of completions for the incomplete value. If a - ``shell_complete`` function was given during init, it is used. - Otherwise, the :attr:`type` - :meth:`~click.types.ParamType.shell_complete` function is used. - - :param ctx: Invocation context for this command. - :param incomplete: Value being completed. May be empty. - - .. versionadded:: 8.0 - """ - if self._custom_shell_complete is not None: - results = self._custom_shell_complete(ctx, self, incomplete) - - if results and isinstance(results[0], str): - from click.shell_completion import CompletionItem - - results = [CompletionItem(c) for c in results] - - return t.cast(t.List["CompletionItem"], results) - - return self.type.shell_complete(ctx, self, incomplete) - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: Show the default value for this option in its - help text. Values are not shown by default, unless - :attr:`Context.show_default` is ``True``. If this value is a - string, it shows that string in parentheses instead of the - actual value. This is particularly useful for dynamic options. - For single option boolean flags, the default remains hidden if - its value is ``False``. - :param show_envvar: Controls if an environment variable should be - shown on the help page. Normally, environment variables are not - shown. - :param prompt: If set to ``True`` or a non empty string then the - user will be prompted for input. If set to ``True`` the prompt - will be the option name capitalized. - :param confirmation_prompt: Prompt a second time to confirm the - value if it was prompted for. Can be set to a string instead of - ``True`` to customize the message. - :param prompt_required: If set to ``False``, the user will be - prompted for input only when the option was specified as a flag - without a value. - :param hide_input: If this is ``True`` then the input on the prompt - will be hidden from the user. This is useful for password input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - :param hidden: hide this option from help outputs. - - .. versionchanged:: 8.1.0 - Help text indentation is cleaned here instead of only in the - ``@option`` decorator. - - .. versionchanged:: 8.1.0 - The ``show_default`` parameter overrides - ``Context.show_default``. - - .. versionchanged:: 8.1.0 - The default of a single option boolean flag is not shown if the - default value is ``False``. - - .. versionchanged:: 8.0.1 - ``type`` is detected from ``flag_value`` if given. - """ - - param_type_name = "option" - - def __init__( - self, - param_decls: t.Optional[t.Sequence[str]] = None, - show_default: t.Union[bool, str, None] = None, - prompt: t.Union[bool, str] = False, - confirmation_prompt: t.Union[bool, str] = False, - prompt_required: bool = True, - hide_input: bool = False, - is_flag: t.Optional[bool] = None, - flag_value: t.Optional[t.Any] = None, - multiple: bool = False, - count: bool = False, - allow_from_autoenv: bool = True, - type: t.Optional[t.Union[types.ParamType, t.Any]] = None, - help: t.Optional[str] = None, - hidden: bool = False, - show_choices: bool = True, - show_envvar: bool = False, - **attrs: t.Any, - ) -> None: - if help: - help = inspect.cleandoc(help) - - default_is_missing = "default" not in attrs - super().__init__(param_decls, type=type, multiple=multiple, **attrs) - - if prompt is True: - if self.name is None: - raise TypeError("'name' is required with 'prompt=True'.") - - prompt_text: t.Optional[str] = self.name.replace("_", " ").capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.prompt_required = prompt_required - self.hide_input = hide_input - self.hidden = hidden - - # If prompt is enabled but not required, then the option can be - # used as a flag to indicate using prompt or flag_value. - self._flag_needs_value = self.prompt is not None and not self.prompt_required - - if is_flag is None: - if flag_value is not None: - # Implicitly a flag because flag_value was set. - is_flag = True - elif self._flag_needs_value: - # Not a flag, but when used as a flag it shows a prompt. - is_flag = False - else: - # Implicitly a flag because flag options were given. - is_flag = bool(self.secondary_opts) - elif is_flag is False and not self._flag_needs_value: - # Not a flag, and prompt is not enabled, can be used as a - # flag if flag_value is set. - self._flag_needs_value = flag_value is not None - - if is_flag and default_is_missing and not self.required: - self.default: t.Union[t.Any, t.Callable[[], t.Any]] = False - - if flag_value is None: - flag_value = not self.default - - if is_flag and type is None: - # Re-guess the type from the flag value instead of the - # default. - self.type = types.convert_type(None, flag_value) - - self.is_flag: bool = is_flag - self.is_bool_flag = is_flag and isinstance(self.type, types.BoolParamType) - self.flag_value: t.Any = flag_value - - # Counting - self.count = count - if count: - if type is None: - self.type = types.IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - self.show_choices = show_choices - self.show_envvar = show_envvar - - if __debug__: - if self.nargs == -1: - raise TypeError("nargs=-1 is not supported for options.") - - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError("'prompt' is not valid for non-boolean flag.") - - if not self.is_bool_flag and self.secondary_opts: - raise TypeError("Secondary flag is not valid for non-boolean flag.") - - if self.is_bool_flag and self.hide_input and self.prompt is not None: - raise TypeError( - "'prompt' with 'hide_input' is not valid for boolean flag." - ) - - if self.count: - if self.multiple: - raise TypeError("'count' is not valid with 'multiple'.") - - if self.is_flag: - raise TypeError("'count' is not valid with 'is_flag'.") - - if self.multiple and self.is_flag: - raise TypeError("'multiple' is not valid with 'is_flag', use 'count'.") - - def to_info_dict(self) -> t.Dict[str, t.Any]: - info_dict = super().to_info_dict() - info_dict.update( - help=self.help, - prompt=self.prompt, - is_flag=self.is_flag, - flag_value=self.flag_value, - count=self.count, - hidden=self.hidden, - ) - return info_dict - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if decl.isidentifier(): - if name is not None: - raise TypeError(f"Name '{name}' defined twice") - name = decl - else: - split_char = ";" if decl[:1] == "/" else "/" - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - if first == second: - raise ValueError( - f"Boolean option {decl!r} cannot use the" - " same flag for true/false." - ) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace("-", "_").lower() - if not name.isidentifier(): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError("Could not determine name for option") - - if not opts and not secondary_opts: - raise TypeError( - f"No options defined but a name was passed ({name})." - " Did you mean to declare an argument instead? Did" - f" you mean to pass '--{name}'?" - ) - - return name, opts, secondary_opts - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - if self.multiple: - action = "append" - elif self.count: - action = "count" - else: - action = "store" - - if self.is_flag: - action = f"{action}_const" - - if self.is_bool_flag and self.secondary_opts: - parser.add_option( - obj=self, opts=self.opts, dest=self.name, action=action, const=True - ) - parser.add_option( - obj=self, - opts=self.secondary_opts, - dest=self.name, - action=action, - const=False, - ) - else: - parser.add_option( - obj=self, - opts=self.opts, - dest=self.name, - action=action, - const=self.flag_value, - ) - else: - parser.add_option( - obj=self, - opts=self.opts, - dest=self.name, - action=action, - nargs=self.nargs, - ) - - def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: - if self.hidden: - return None - - any_prefix_is_slash = False - - def _write_opts(opts: t.Sequence[str]) -> str: - nonlocal any_prefix_is_slash - - rv, any_slashes = join_options(opts) - - if any_slashes: - any_prefix_is_slash = True - - if not self.is_flag and not self.count: - rv += f" {self.make_metavar()}" - - return rv - - rv = [_write_opts(self.opts)] - - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or "" - extra = [] - - if self.show_envvar: - envvar = self.envvar - - if envvar is None: - if ( - self.allow_from_autoenv - and ctx.auto_envvar_prefix is not None - and self.name is not None - ): - envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" - - if envvar is not None: - var_str = ( - envvar - if isinstance(envvar, str) - else ", ".join(str(d) for d in envvar) - ) - extra.append(_("env var: {var}").format(var=var_str)) - - # Temporarily enable resilient parsing to avoid type casting - # failing for the default. Might be possible to extend this to - # help formatting in general. - resilient = ctx.resilient_parsing - ctx.resilient_parsing = True - - try: - default_value = self.get_default(ctx, call=False) - finally: - ctx.resilient_parsing = resilient - - show_default = False - show_default_is_str = False - - if self.show_default is not None: - if isinstance(self.show_default, str): - show_default_is_str = show_default = True - else: - show_default = self.show_default - elif ctx.show_default is not None: - show_default = ctx.show_default - - if show_default_is_str or (show_default and (default_value is not None)): - if show_default_is_str: - default_string = f"({self.show_default})" - elif isinstance(default_value, (list, tuple)): - default_string = ", ".join(str(d) for d in default_value) - elif inspect.isfunction(default_value): - default_string = _("(dynamic)") - elif self.is_bool_flag and self.secondary_opts: - # For boolean flags that have distinct True/False opts, - # use the opt without prefix instead of the value. - default_string = split_opt( - (self.opts if self.default else self.secondary_opts)[0] - )[1] - elif self.is_bool_flag and not self.secondary_opts and not default_value: - default_string = "" - else: - default_string = str(default_value) - - if default_string: - extra.append(_("default: {default}").format(default=default_string)) - - if ( - isinstance(self.type, types._NumberRangeBase) - # skip count with default range type - and not (self.count and self.type.min == 0 and self.type.max is None) - ): - range_str = self.type._describe_range() - - if range_str: - extra.append(range_str) - - if self.required: - extra.append(_("required")) - - if extra: - extra_str = "; ".join(extra) - help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" - - return ("; " if any_prefix_is_slash else " / ").join(rv), help - - @t.overload - def get_default( - self, ctx: Context, call: "te.Literal[True]" = True - ) -> t.Optional[t.Any]: - ... - - @t.overload - def get_default( - self, ctx: Context, call: bool = ... - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - ... - - def get_default( - self, ctx: Context, call: bool = True - ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: - # If we're a non boolean flag our default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value # type: ignore - - return None - - return super().get_default(ctx, call=call) - - def prompt_for_value(self, ctx: Context) -> t.Any: - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - assert self.prompt is not None - - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt( - self.prompt, - default=default, - type=self.type, - hide_input=self.hide_input, - show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x), - ) - - def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: - rv = super().resolve_envvar_value(ctx) - - if rv is not None: - return rv - - if ( - self.allow_from_autoenv - and ctx.auto_envvar_prefix is not None - and self.name is not None - ): - envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" - rv = os.environ.get(envvar) - - if rv: - return rv - - return None - - def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: - rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) - - if rv is None: - return None - - value_depth = (self.nargs != 1) + bool(self.multiple) - - if value_depth > 0: - rv = self.type.split_envvar_value(rv) - - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - - return rv - - def consume_value( - self, ctx: Context, opts: t.Mapping[str, "Parameter"] - ) -> t.Tuple[t.Any, ParameterSource]: - value, source = super().consume_value(ctx, opts) - - # The parser will emit a sentinel value if the option can be - # given as a flag without a value. This is different from None - # to distinguish from the flag not being given at all. - if value is _flag_needs_value: - if self.prompt is not None and not ctx.resilient_parsing: - value = self.prompt_for_value(ctx) - source = ParameterSource.PROMPT - else: - value = self.flag_value - source = ParameterSource.COMMANDLINE - - elif ( - self.multiple - and value is not None - and any(v is _flag_needs_value for v in value) - ): - value = [self.flag_value if v is _flag_needs_value else v for v in value] - source = ParameterSource.COMMANDLINE - - # The value wasn't set, or used the param's default, prompt if - # prompting is enabled. - elif ( - source in {None, ParameterSource.DEFAULT} - and self.prompt is not None - and (self.required or self.prompt_required) - and not ctx.resilient_parsing - ): - value = self.prompt_for_value(ctx) - source = ParameterSource.PROMPT - - return value, source - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - - param_type_name = "argument" - - def __init__( - self, - param_decls: t.Sequence[str], - required: t.Optional[bool] = None, - **attrs: t.Any, - ) -> None: - if required is None: - if attrs.get("default") is not None: - required = False - else: - required = attrs.get("nargs", 1) > 0 - - if "multiple" in attrs: - raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") - - super().__init__(param_decls, required=required, **attrs) - - if __debug__: - if self.default is not None and self.nargs == -1: - raise TypeError("'default' is not supported for nargs=-1.") - - @property - def human_readable_name(self) -> str: - if self.metavar is not None: - return self.metavar - return self.name.upper() # type: ignore - - def make_metavar(self) -> str: - if self.metavar is not None: - return self.metavar - var = self.type.get_metavar(self) - if not var: - var = self.name.upper() # type: ignore - if not self.required: - var = f"[{var}]" - if self.nargs != 1: - var += "..." - return var - - def _parse_decls( - self, decls: t.Sequence[str], expose_value: bool - ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: - if not decls: - if not expose_value: - return None, [], [] - raise TypeError("Could not determine name for argument") - if len(decls) == 1: - name = arg = decls[0] - name = name.replace("-", "_").lower() - else: - raise TypeError( - "Arguments take exactly one parameter declaration, got" - f" {len(decls)}." - ) - return name, [arg], [] - - def get_usage_pieces(self, ctx: Context) -> t.List[str]: - return [self.make_metavar()] - - def get_error_hint(self, ctx: Context) -> str: - return f"'{self.make_metavar()}'" - - def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: - parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py deleted file mode 100644 index 38b3e6bec6f5857221d9d5029b764bdd983414b6..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py +++ /dev/null @@ -1,286 +0,0 @@ -from typing import Callable -from fontTools.pens.basePen import BasePen - - -def pointToString(pt, ntos=str): - return " ".join(ntos(i) for i in pt) - - -class SVGPathPen(BasePen): - """Pen to draw SVG path d commands. - - Example:: - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((1, 1)) - >>> pen.curveTo((2, 2), (3, 3), (4, 4)) - >>> pen.closePath() - >>> pen.getCommands() - 'M0 0 1 1C2 2 3 3 4 4Z' - - Args: - glyphSet: a dictionary of drawable glyph objects keyed by name - used to resolve component references in composite glyphs. - ntos: a callable that takes a number and returns a string, to - customize how numbers are formatted (default: str). - - Note: - Fonts have a coordinate system where Y grows up, whereas in SVG, - Y grows down. As such, rendering path data from this pen in - SVG typically results in upside-down glyphs. You can fix this - by wrapping the data from this pen in an SVG group element with - transform, or wrap this pen in a transform pen. For example: - - spen = svgPathPen.SVGPathPen(glyphset) - pen= TransformPen(spen , (1, 0, 0, -1, 0, 0)) - glyphset[glyphname].draw(pen) - print(tpen.getCommands()) - """ - - def __init__(self, glyphSet, ntos: Callable[[float], str] = str): - BasePen.__init__(self, glyphSet) - self._commands = [] - self._lastCommand = None - self._lastX = None - self._lastY = None - self._ntos = ntos - - def _handleAnchor(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.moveTo((10, 10)) - >>> pen._commands - ['M10 10'] - """ - if self._lastCommand == "M": - self._commands.pop(-1) - - def _moveTo(self, pt): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen._commands - ['M0 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 0)) - >>> pen._commands - ['M10 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 10)) - >>> pen._commands - ['M0 10'] - """ - self._handleAnchor() - t = "M%s" % (pointToString(pt, self._ntos)) - self._commands.append(t) - self._lastCommand = "M" - self._lastX, self._lastY = pt - - def _lineTo(self, pt): - """ - # duplicate point - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M10 10'] - - # vertical line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 0)) - >>> pen._commands - ['M10 10', 'V0'] - - # horizontal line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((0, 10)) - >>> pen._commands - ['M10 10', 'H0'] - - # basic - >>> pen = SVGPathPen(None) - >>> pen.lineTo((70, 80)) - >>> pen._commands - ['L70 80'] - - # basic following a moveto - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M0 0', ' 10 10'] - """ - x, y = pt - # duplicate point - if x == self._lastX and y == self._lastY: - return - # vertical line - elif x == self._lastX: - cmd = "V" - pts = self._ntos(y) - # horizontal line - elif y == self._lastY: - cmd = "H" - pts = self._ntos(x) - # previous was a moveto - elif self._lastCommand == "M": - cmd = None - pts = " " + pointToString(pt, self._ntos) - # basic - else: - cmd = "L" - pts = pointToString(pt, self._ntos) - # write the string - t = "" - if cmd: - t += cmd - self._lastCommand = cmd - t += pts - self._commands.append(t) - # store for future reference - self._lastX, self._lastY = pt - - def _curveToOne(self, pt1, pt2, pt3): - """ - >>> pen = SVGPathPen(None) - >>> pen.curveTo((10, 20), (30, 40), (50, 60)) - >>> pen._commands - ['C10 20 30 40 50 60'] - """ - t = "C" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) + " " - t += pointToString(pt3, self._ntos) - self._commands.append(t) - self._lastCommand = "C" - self._lastX, self._lastY = pt3 - - def _qCurveToOne(self, pt1, pt2): - """ - >>> pen = SVGPathPen(None) - >>> pen.qCurveTo((10, 20), (30, 40)) - >>> pen._commands - ['Q10 20 30 40'] - >>> from fontTools.misc.roundTools import otRound - >>> pen = SVGPathPen(None, ntos=lambda v: str(otRound(v))) - >>> pen.qCurveTo((3, 3), (7, 5), (11, 4)) - >>> pen._commands - ['Q3 3 5 4', 'Q7 5 11 4'] - """ - assert pt2 is not None - t = "Q" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) - self._commands.append(t) - self._lastCommand = "Q" - self._lastX, self._lastY = pt2 - - def _closePath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.closePath() - >>> pen._commands - ['Z'] - """ - self._commands.append("Z") - self._lastCommand = "Z" - self._lastX = self._lastY = None - - def _endPath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.endPath() - >>> pen._commands - [] - """ - self._lastCommand = None - self._lastX = self._lastY = None - - def getCommands(self): - return "".join(self._commands) - - -def main(args=None): - """Generate per-character SVG from font and text""" - - if args is None: - import sys - - args = sys.argv[1:] - - from fontTools.ttLib import TTFont - import argparse - - parser = argparse.ArgumentParser( - "fonttools pens.svgPathPen", description="Generate SVG from text" - ) - parser.add_argument("font", metavar="font.ttf", help="Font file.") - parser.add_argument("text", metavar="text", help="Text string.") - parser.add_argument( - "--variations", - metavar="AXIS=LOC", - default="", - help="List of space separated locations. A location consist in " - "the name of a variation axis, followed by '=' and a number. E.g.: " - "wght=700 wdth=80. The default is the location of the base master.", - ) - - options = parser.parse_args(args) - - font = TTFont(options.font) - text = options.text - - location = {} - for tag_v in options.variations.split(): - fields = tag_v.split("=") - tag = fields[0].strip() - v = int(fields[1]) - location[tag] = v - - hhea = font["hhea"] - ascent, descent = hhea.ascent, hhea.descent - - glyphset = font.getGlyphSet(location=location) - cmap = font["cmap"].getBestCmap() - - s = "" - width = 0 - for u in text: - g = cmap[ord(u)] - glyph = glyphset[g] - - pen = SVGPathPen(glyphset) - glyph.draw(pen) - commands = pen.getCommands() - - s += '\n' % ( - width, - ascent, - commands, - ) - - width += glyph.width - - print('') - print( - '' - % (width, ascent - descent) - ) - print(s, end="") - print("") - - -if __name__ == "__main__": - import sys - - if len(sys.argv) == 1: - import doctest - - sys.exit(doctest.testmod().failed) - - sys.exit(main()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py deleted file mode 100644 index 324ffd016515f0f96e6505e53ffc5c50b149be49..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py +++ /dev/null @@ -1,1037 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import floatToFixedToStr -from fontTools.misc.textTools import byteord, safeEval - -# from itertools import * -from . import DefaultTable -from . import grUtils -from array import array -from functools import reduce -import struct, re, sys - -Silf_hdr_format = """ - > - version: 16.16F -""" - -Silf_hdr_format_3 = """ - > - version: 16.16F - compilerVersion: L - numSilf: H - x - x -""" - -Silf_part1_format_v3 = """ - > - ruleVersion: 16.16F - passOffset: H - pseudosOffset: H -""" - -Silf_part1_format = """ - > - maxGlyphID: H - extraAscent: h - extraDescent: h - numPasses: B - iSubst: B - iPos: B - iJust: B - iBidi: B - flags: B - maxPreContext: B - maxPostContext: B - attrPseudo: B - attrBreakWeight: B - attrDirectionality: B - attrMirroring: B - attrSkipPasses: B - numJLevels: B -""" - -Silf_justify_format = """ - > - attrStretch: B - attrShrink: B - attrStep: B - attrWeight: B - runto: B - x - x - x -""" - -Silf_part2_format = """ - > - numLigComp: H - numUserDefn: B - maxCompPerLig: B - direction: B - attCollisions: B - x - x - x - numCritFeatures: B -""" - -Silf_pseudomap_format = """ - > - unicode: L - nPseudo: H -""" - -Silf_pseudomap_format_h = """ - > - unicode: H - nPseudo: H -""" - -Silf_classmap_format = """ - > - numClass: H - numLinear: H -""" - -Silf_lookupclass_format = """ - > - numIDs: H - searchRange: H - entrySelector: H - rangeShift: H -""" - -Silf_lookuppair_format = """ - > - glyphId: H - index: H -""" - -Silf_pass_format = """ - > - flags: B - maxRuleLoop: B - maxRuleContext: B - maxBackup: B - numRules: H - fsmOffset: H - pcCode: L - rcCode: L - aCode: L - oDebug: L - numRows: H - numTransitional: H - numSuccess: H - numColumns: H -""" - -aCode_info = ( - ("NOP", 0), - ("PUSH_BYTE", "b"), - ("PUSH_BYTE_U", "B"), - ("PUSH_SHORT", ">h"), - ("PUSH_SHORT_U", ">H"), - ("PUSH_LONG", ">L"), - ("ADD", 0), - ("SUB", 0), - ("MUL", 0), - ("DIV", 0), - ("MIN", 0), - ("MAX", 0), - ("NEG", 0), - ("TRUNC8", 0), - ("TRUNC16", 0), - ("COND", 0), - ("AND", 0), # x10 - ("OR", 0), - ("NOT", 0), - ("EQUAL", 0), - ("NOT_EQ", 0), - ("LESS", 0), - ("GTR", 0), - ("LESS_EQ", 0), - ("GTR_EQ", 0), - ("NEXT", 0), - ("NEXT_N", "b"), - ("COPY_NEXT", 0), - ("PUT_GLYPH_8BIT_OBS", "B"), - ("PUT_SUBS_8BIT_OBS", "bBB"), - ("PUT_COPY", "b"), - ("INSERT", 0), - ("DELETE", 0), # x20 - ("ASSOC", -1), - ("CNTXT_ITEM", "bB"), - ("ATTR_SET", "B"), - ("ATTR_ADD", "B"), - ("ATTR_SUB", "B"), - ("ATTR_SET_SLOT", "B"), - ("IATTR_SET_SLOT", "BB"), - ("PUSH_SLOT_ATTR", "Bb"), - ("PUSH_GLYPH_ATTR_OBS", "Bb"), - ("PUSH_GLYPH_METRIC", "Bbb"), - ("PUSH_FEAT", "Bb"), - ("PUSH_ATT_TO_GATTR_OBS", "Bb"), - ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), - ("PUSH_ISLOT_ATTR", "Bbb"), - ("PUSH_IGLYPH_ATTR", "Bbb"), - ("POP_RET", 0), # x30 - ("RET_ZERO", 0), - ("RET_TRUE", 0), - ("IATTR_SET", "BB"), - ("IATTR_ADD", "BB"), - ("IATTR_SUB", "BB"), - ("PUSH_PROC_STATE", "B"), - ("PUSH_VERSION", 0), - ("PUT_SUBS", ">bHH"), - ("PUT_SUBS2", 0), - ("PUT_SUBS3", 0), - ("PUT_GLYPH", ">H"), - ("PUSH_GLYPH_ATTR", ">Hb"), - ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), - ("BITOR", 0), - ("BITAND", 0), - ("BITNOT", 0), # x40 - ("BITSET", ">HH"), - ("SET_FEAT", "Bb"), -) -aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)]) - - -def disassemble(aCode): - codelen = len(aCode) - pc = 0 - res = [] - while pc < codelen: - opcode = byteord(aCode[pc : pc + 1]) - if opcode > len(aCode_info): - instr = aCode_info[0] - else: - instr = aCode_info[opcode] - pc += 1 - if instr[1] != 0 and pc >= codelen: - return res - if instr[1] == -1: - count = byteord(aCode[pc]) - fmt = "%dB" % count - pc += 1 - elif instr[1] == 0: - fmt = "" - else: - fmt = instr[1] - if fmt == "": - res.append(instr[0]) - continue - parms = struct.unpack_from(fmt, aCode[pc:]) - res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") - pc += struct.calcsize(fmt) - return res - - -instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?") - - -def assemble(instrs): - res = b"" - for inst in instrs: - m = instre.match(inst) - if not m or not m.group(1) in aCode_map: - continue - opcode, parmfmt = aCode_map[m.group(1)] - res += struct.pack("B", opcode) - if m.group(2): - if parmfmt == 0: - continue - parms = [int(x) for x in re.split(r",\s*", m.group(2))] - if parmfmt == -1: - l = len(parms) - res += struct.pack(("%dB" % (l + 1)), l, *parms) - else: - res += struct.pack(parmfmt, *parms) - return res - - -def writecode(tag, writer, instrs): - writer.begintag(tag) - writer.newline() - for l in disassemble(instrs): - writer.write(l) - writer.newline() - writer.endtag(tag) - writer.newline() - - -def readcode(content): - res = [] - for e in content_string(content).split("\n"): - e = e.strip() - if not len(e): - continue - res.append(e) - return assemble(res) - - -attrs_info = ( - "flags", - "extraAscent", - "extraDescent", - "maxGlyphID", - "numLigComp", - "numUserDefn", - "maxCompPerLig", - "direction", - "lbGID", -) -attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi") -attrs_contexts = ("maxPreContext", "maxPostContext") -attrs_attributes = ( - "attrPseudo", - "attrBreakWeight", - "attrDirectionality", - "attrMirroring", - "attrSkipPasses", - "attCollisions", -) -pass_attrs_info = ( - "flags", - "maxRuleLoop", - "maxRuleContext", - "maxBackup", - "minRulePreContext", - "maxRulePreContext", - "collisionThreshold", -) -pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns") - - -def writesimple(tag, self, writer, *attrkeys): - attrs = dict([(k, getattr(self, k)) for k in attrkeys]) - writer.simpletag(tag, **attrs) - writer.newline() - - -def getSimple(self, attrs, *attr_list): - for k in attr_list: - if k in attrs: - setattr(self, k, int(safeEval(attrs[k]))) - - -def content_string(contents): - res = "" - for element in contents: - if isinstance(element, tuple): - continue - res += element - return res.strip() - - -def wrapline(writer, dat, length=80): - currline = "" - for d in dat: - if len(currline) > length: - writer.write(currline[:-1]) - writer.newline() - currline = "" - currline += d + " " - if len(currline): - writer.write(currline[:-1]) - writer.newline() - - -class _Object: - pass - - -class table_S__i_l_f(DefaultTable.DefaultTable): - """Silf table support""" - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.silfs = [] - - def decompile(self, data, ttFont): - sstruct.unpack2(Silf_hdr_format, data, self) - self.version = float(floatToFixedToStr(self.version, precisionBits=16)) - if self.version >= 5.0: - (data, self.scheme) = grUtils.decompress(data) - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - elif self.version < 3.0: - self.numSilf = struct.unpack(">H", data[4:6]) - self.scheme = 0 - self.compilerVersion = 0 - base = 8 - else: - self.scheme = 0 - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - - silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:]) - for offset in silfoffsets: - s = Silf() - self.silfs.append(s) - s.decompile(data[offset:], ttFont, self.version) - - def compile(self, ttFont): - self.numSilf = len(self.silfs) - if self.version < 3.0: - hdr = sstruct.pack(Silf_hdr_format, self) - hdr += struct.pack(">HH", self.numSilf, 0) - else: - hdr = sstruct.pack(Silf_hdr_format_3, self) - offset = len(hdr) + 4 * self.numSilf - data = b"" - for s in self.silfs: - hdr += struct.pack(">L", offset) - subdata = s.compile(ttFont, self.version) - offset += len(subdata) - data += subdata - if self.version >= 5.0: - return grUtils.compress(self.scheme, hdr + data) - return hdr + data - - def toXML(self, writer, ttFont): - writer.comment("Attributes starting with _ are informative only") - writer.newline() - writer.simpletag( - "version", - version=self.version, - compilerVersion=self.compilerVersion, - compressionScheme=self.scheme, - ) - writer.newline() - for s in self.silfs: - writer.begintag("silf") - writer.newline() - s.toXML(writer, ttFont, self.version) - writer.endtag("silf") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.scheme = int(safeEval(attrs["compressionScheme"])) - self.version = float(safeEval(attrs["version"])) - self.compilerVersion = int(safeEval(attrs["compilerVersion"])) - return - if name == "silf": - s = Silf() - self.silfs.append(s) - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - s.fromXML(tag, attrs, subcontent, ttFont, self.version) - - -class Silf(object): - """A particular Silf subtable""" - - def __init__(self): - self.passes = [] - self.scriptTags = [] - self.critFeatures = [] - self.jLevels = [] - self.pMap = {} - - def decompile(self, data, ttFont, version=2.0): - if version >= 3.0: - _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) - self.ruleVersion = float( - floatToFixedToStr(self.ruleVersion, precisionBits=16) - ) - _, data = sstruct.unpack2(Silf_part1_format, data, self) - for jlevel in range(self.numJLevels): - j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) - self.jLevels.append(j) - _, data = sstruct.unpack2(Silf_part2_format, data, self) - if self.numCritFeatures: - self.critFeatures = struct.unpack_from( - (">%dH" % self.numCritFeatures), data - ) - data = data[self.numCritFeatures * 2 + 1 :] - (numScriptTag,) = struct.unpack_from("B", data) - if numScriptTag: - self.scriptTags = [ - struct.unpack("4s", data[x : x + 4])[0].decode("ascii") - for x in range(1, 1 + 4 * numScriptTag, 4) - ] - data = data[1 + 4 * numScriptTag :] - (self.lbGID,) = struct.unpack(">H", data[:2]) - if self.numPasses: - self.oPasses = struct.unpack( - (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses] - ) - data = data[6 + 4 * self.numPasses :] - (numPseudo,) = struct.unpack(">H", data[:2]) - for i in range(numPseudo): - if version >= 3.0: - pseudo = sstruct.unpack( - Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object() - ) - else: - pseudo = sstruct.unpack( - Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object() - ) - self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) - data = data[8 + 6 * numPseudo :] - currpos = ( - sstruct.calcsize(Silf_part1_format) - + sstruct.calcsize(Silf_justify_format) * self.numJLevels - + sstruct.calcsize(Silf_part2_format) - + 2 * self.numCritFeatures - + 1 - + 1 - + 4 * numScriptTag - + 6 - + 4 * self.numPasses - + 8 - + 6 * numPseudo - ) - if version >= 3.0: - currpos += sstruct.calcsize(Silf_part1_format_v3) - self.classes = Classes() - self.classes.decompile(data, ttFont, version) - for i in range(self.numPasses): - p = Pass() - self.passes.append(p) - p.decompile( - data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos], - ttFont, - version, - ) - - def compile(self, ttFont, version=2.0): - self.numPasses = len(self.passes) - self.numJLevels = len(self.jLevels) - self.numCritFeatures = len(self.critFeatures) - numPseudo = len(self.pMap) - data = b"" - if version >= 3.0: - hdroffset = sstruct.calcsize(Silf_part1_format_v3) - else: - hdroffset = 0 - data += sstruct.pack(Silf_part1_format, self) - for j in self.jLevels: - data += sstruct.pack(Silf_justify_format, j) - data += sstruct.pack(Silf_part2_format, self) - if self.numCritFeatures: - data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) - data += struct.pack("BB", 0, len(self.scriptTags)) - if len(self.scriptTags): - tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags] - data += b"".join(tdata) - data += struct.pack(">H", self.lbGID) - self.passOffset = len(data) - - data1 = grUtils.bininfo(numPseudo, 6) - currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) - self.pseudosOffset = currpos + len(data1) - for u, p in sorted(self.pMap.items()): - data1 += struct.pack( - (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p) - ) - data1 += self.classes.compile(ttFont, version) - currpos += len(data1) - data2 = b"" - datao = b"" - for i, p in enumerate(self.passes): - base = currpos + len(data2) - datao += struct.pack(">L", base) - data2 += p.compile(ttFont, base, version) - datao += struct.pack(">L", currpos + len(data2)) - - if version >= 3.0: - data3 = sstruct.pack(Silf_part1_format_v3, self) - else: - data3 = b"" - return data3 + data + datao + data1 + data2 - - def toXML(self, writer, ttFont, version=2.0): - if version >= 3.0: - writer.simpletag("version", ruleVersion=self.ruleVersion) - writer.newline() - writesimple("info", self, writer, *attrs_info) - writesimple("passindexes", self, writer, *attrs_passindexes) - writesimple("contexts", self, writer, *attrs_contexts) - writesimple("attributes", self, writer, *attrs_attributes) - if len(self.jLevels): - writer.begintag("justifications") - writer.newline() - jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) - for i, j in enumerate(self.jLevels): - attrs = dict([(k, getattr(j, k)) for k in jnames]) - writer.simpletag("justify", **attrs) - writer.newline() - writer.endtag("justifications") - writer.newline() - if len(self.critFeatures): - writer.begintag("critFeatures") - writer.newline() - writer.write(" ".join(map(str, self.critFeatures))) - writer.newline() - writer.endtag("critFeatures") - writer.newline() - if len(self.scriptTags): - writer.begintag("scriptTags") - writer.newline() - writer.write(" ".join(self.scriptTags)) - writer.newline() - writer.endtag("scriptTags") - writer.newline() - if self.pMap: - writer.begintag("pseudoMap") - writer.newline() - for k, v in sorted(self.pMap.items()): - writer.simpletag("pseudo", unicode=hex(k), pseudo=v) - writer.newline() - writer.endtag("pseudoMap") - writer.newline() - self.classes.toXML(writer, ttFont, version) - if len(self.passes): - writer.begintag("passes") - writer.newline() - for i, p in enumerate(self.passes): - writer.begintag("pass", _index=i) - writer.newline() - p.toXML(writer, ttFont, version) - writer.endtag("pass") - writer.newline() - writer.endtag("passes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "version": - self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0"))) - if name == "info": - getSimple(self, attrs, *attrs_info) - elif name == "passindexes": - getSimple(self, attrs, *attrs_passindexes) - elif name == "contexts": - getSimple(self, attrs, *attrs_contexts) - elif name == "attributes": - getSimple(self, attrs, *attrs_attributes) - elif name == "justifications": - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "justify": - j = _Object() - for k, v in attrs.items(): - setattr(j, k, int(v)) - self.jLevels.append(j) - elif name == "critFeatures": - self.critFeatures = [] - element = content_string(content) - self.critFeatures.extend(map(int, element.split())) - elif name == "scriptTags": - self.scriptTags = [] - element = content_string(content) - for n in element.split(): - self.scriptTags.append(n) - elif name == "pseudoMap": - self.pMap = {} - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "pseudo": - k = int(attrs["unicode"], 16) - v = attrs["pseudo"] - self.pMap[k] = v - elif name == "classes": - self.classes = Classes() - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - self.classes.fromXML(tag, attrs, subcontent, ttFont, version) - elif name == "passes": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "pass": - p = Pass() - for e in subcontent: - if not isinstance(e, tuple): - continue - p.fromXML(e[0], e[1], e[2], ttFont, version) - self.passes.append(p) - - -class Classes(object): - def __init__(self): - self.linear = [] - self.nonLinear = [] - - def decompile(self, data, ttFont, version=2.0): - sstruct.unpack2(Silf_classmap_format, data, self) - if version >= 4.0: - oClasses = struct.unpack( - (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass] - ) - else: - oClasses = struct.unpack( - (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass] - ) - for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]): - self.linear.append( - ttFont.getGlyphName(x) - for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e]) - ) - for s, e in zip( - oClasses[self.numLinear : self.numClass], - oClasses[self.numLinear + 1 : self.numClass + 1], - ): - nonLinids = [ - struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4) - ] - nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) - self.nonLinear.append(nonLin) - - def compile(self, ttFont, version=2.0): - data = b"" - oClasses = [] - if version >= 4.0: - offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) - else: - offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) - for l in self.linear: - oClasses.append(len(data) + offset) - gs = [ttFont.getGlyphID(x) for x in l] - data += struct.pack((">%dH" % len(l)), *gs) - for l in self.nonLinear: - oClasses.append(len(data) + offset) - gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] - data += grUtils.bininfo(len(gs)) - data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)]) - oClasses.append(len(data) + offset) - self.numClass = len(oClasses) - 1 - self.numLinear = len(self.linear) - return ( - sstruct.pack(Silf_classmap_format, self) - + struct.pack( - ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses - ) - + data - ) - - def toXML(self, writer, ttFont, version=2.0): - writer.begintag("classes") - writer.newline() - writer.begintag("linearClasses") - writer.newline() - for i, l in enumerate(self.linear): - writer.begintag("linear", _index=i) - writer.newline() - wrapline(writer, l) - writer.endtag("linear") - writer.newline() - writer.endtag("linearClasses") - writer.newline() - writer.begintag("nonLinearClasses") - writer.newline() - for i, l in enumerate(self.nonLinear): - writer.begintag("nonLinear", _index=i + self.numLinear) - writer.newline() - for inp, ind in l.items(): - writer.simpletag("map", glyph=inp, index=ind) - writer.newline() - writer.endtag("nonLinear") - writer.newline() - writer.endtag("nonLinearClasses") - writer.newline() - writer.endtag("classes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "linearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "linear": - l = content_string(subcontent).split() - self.linear.append(l) - elif name == "nonLinearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "nonLinear": - l = {} - for e in subcontent: - if not isinstance(e, tuple): - continue - tag, attrs, subsubcontent = e - if tag == "map": - l[attrs["glyph"]] = int(safeEval(attrs["index"])) - self.nonLinear.append(l) - - -class Pass(object): - def __init__(self): - self.colMap = {} - self.rules = [] - self.rulePreContexts = [] - self.ruleSortKeys = [] - self.ruleConstraints = [] - self.passConstraints = b"" - self.actions = [] - self.stateTrans = [] - self.startStates = [] - - def decompile(self, data, ttFont, version=2.0): - _, data = sstruct.unpack2(Silf_pass_format, data, self) - (numRange, _, _, _) = struct.unpack(">4H", data[:8]) - data = data[8:] - for i in range(numRange): - (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6]) - for g in range(first, last + 1): - self.colMap[ttFont.getGlyphName(g)] = col - data = data[6 * numRange :] - oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) - data = data[2 + 2 * self.numSuccess :] - rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) - self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])] - data = data[2 * oRuleMap[-1] :] - (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2]) - numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 - self.startStates = struct.unpack( - (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2] - ) - data = data[2 + numStartStates * 2 :] - self.ruleSortKeys = struct.unpack( - (">%dH" % self.numRules), data[: 2 * self.numRules] - ) - data = data[2 * self.numRules :] - self.rulePreContexts = struct.unpack( - ("%dB" % self.numRules), data[: self.numRules] - ) - data = data[self.numRules :] - (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) - oConstraints = list( - struct.unpack( - (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2] - ) - ) - data = data[5 + self.numRules * 2 :] - oActions = list( - struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2]) - ) - data = data[2 * self.numRules + 2 :] - for i in range(self.numTransitional): - a = array( - "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2] - ) - if sys.byteorder != "big": - a.byteswap() - self.stateTrans.append(a) - data = data[self.numTransitional * self.numColumns * 2 + 1 :] - self.passConstraints = data[:pConstraint] - data = data[pConstraint:] - for i in range(len(oConstraints) - 2, -1, -1): - if oConstraints[i] == 0: - oConstraints[i] = oConstraints[i + 1] - self.ruleConstraints = [ - (data[s:e] if (e - s > 1) else b"") - for (s, e) in zip(oConstraints, oConstraints[1:]) - ] - data = data[oConstraints[-1] :] - self.actions = [ - (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:]) - ] - data = data[oActions[-1] :] - # not using debug - - def compile(self, ttFont, base, version=2.0): - # build it all up backwards - oActions = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, []) - )[1] - oConstraints = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), - self.ruleConstraints + [b""], - (1, []), - )[1] - constraintCode = b"\000" + b"".join(self.ruleConstraints) - transes = [] - for t in self.stateTrans: - if sys.byteorder != "big": - t.byteswap() - transes.append(t.tobytes()) - if sys.byteorder != "big": - t.byteswap() - if not len(transes): - self.startStates = [0] - oRuleMap = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, []) - )[1] - passRanges = [] - gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) - for e in grUtils.entries(gidcolmap, sameval=True): - if e[1]: - passRanges.append((e[0], e[0] + e[1] - 1, e[2][0])) - self.numRules = len(self.actions) - self.fsmOffset = ( - sstruct.calcsize(Silf_pass_format) - + 8 - + len(passRanges) * 6 - + len(oRuleMap) * 2 - + 2 * oRuleMap[-1] - + 2 - + 2 * len(self.startStates) - + 3 * self.numRules - + 3 - + 4 * self.numRules - + 4 - ) - self.pcCode = ( - self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base - ) - self.rcCode = self.pcCode + len(self.passConstraints) - self.aCode = self.rcCode + len(constraintCode) - self.oDebug = 0 - # now generate output - data = sstruct.pack(Silf_pass_format, self) - data += grUtils.bininfo(len(passRanges), 6) - data += b"".join(struct.pack(">3H", *p) for p in passRanges) - data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) - flatrules = reduce(lambda a, x: a + x, self.rules, []) - data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) - data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) - data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) - data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) - data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) - data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) - data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints) - data += struct.pack((">%dH" % (self.numRules + 1)), *oActions) - return ( - data - + b"".join(transes) - + struct.pack("B", 0) - + self.passConstraints - + constraintCode - + b"".join(self.actions) - ) - - def toXML(self, writer, ttFont, version=2.0): - writesimple("info", self, writer, *pass_attrs_info) - writesimple("fsminfo", self, writer, *pass_attrs_fsm) - writer.begintag("colmap") - writer.newline() - wrapline( - writer, - [ - "{}={}".format(*x) - for x in sorted( - self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0]) - ) - ], - ) - writer.endtag("colmap") - writer.newline() - writer.begintag("staterulemap") - writer.newline() - for i, r in enumerate(self.rules): - writer.simpletag( - "state", - number=self.numRows - self.numSuccess + i, - rules=" ".join(map(str, r)), - ) - writer.newline() - writer.endtag("staterulemap") - writer.newline() - writer.begintag("rules") - writer.newline() - for i in range(len(self.actions)): - writer.begintag( - "rule", - index=i, - precontext=self.rulePreContexts[i], - sortkey=self.ruleSortKeys[i], - ) - writer.newline() - if len(self.ruleConstraints[i]): - writecode("constraint", writer, self.ruleConstraints[i]) - writecode("action", writer, self.actions[i]) - writer.endtag("rule") - writer.newline() - writer.endtag("rules") - writer.newline() - if len(self.passConstraints): - writecode("passConstraint", writer, self.passConstraints) - if len(self.stateTrans): - writer.begintag("fsm") - writer.newline() - writer.begintag("starts") - writer.write(" ".join(map(str, self.startStates))) - writer.endtag("starts") - writer.newline() - for i, s in enumerate(self.stateTrans): - writer.begintag("row", _i=i) - # no newlines here - writer.write(" ".join(map(str, s))) - writer.endtag("row") - writer.newline() - writer.endtag("fsm") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "info": - getSimple(self, attrs, *pass_attrs_info) - elif name == "fsminfo": - getSimple(self, attrs, *pass_attrs_fsm) - elif name == "colmap": - e = content_string(content) - for w in e.split(): - x = w.split("=") - if len(x) != 2 or x[0] == "" or x[1] == "": - continue - self.colMap[x[0]] = int(x[1]) - elif name == "staterulemap": - for e in content: - if not isinstance(e, tuple): - continue - tag, a, c = e - if tag == "state": - self.rules.append([int(x) for x in a["rules"].split(" ")]) - elif name == "rules": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag != "rule": - continue - self.rulePreContexts.append(int(a["precontext"])) - self.ruleSortKeys.append(int(a["sortkey"])) - con = b"" - act = b"" - for e in c: - if not isinstance(e, tuple): - continue - tag, a, subc = e - if tag == "constraint": - con = readcode(subc) - elif tag == "action": - act = readcode(subc) - self.actions.append(act) - self.ruleConstraints.append(con) - elif name == "passConstraint": - self.passConstraints = readcode(content) - elif name == "fsm": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag == "row": - s = array("H") - e = content_string(c) - s.extend(map(int, e.split())) - self.stateTrans.append(s) - elif tag == "starts": - s = [] - e = content_string(c) - s.extend(map(int, e.split())) - self.startStates = s diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__5.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__5.py deleted file mode 100644 index 5edc86a9cbc9a0b710cfc014a3910f671f791e54..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__5.py +++ /dev/null @@ -1,46 +0,0 @@ -""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) -tool to store its hinting source data. - -TSI5 contains the VTT character groups. -""" -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array - - -class table_T_S_I__5(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - numGlyphs = ttFont["maxp"].numGlyphs - assert len(data) == 2 * numGlyphs - a = array.array("H") - a.frombytes(data) - if sys.byteorder != "big": - a.byteswap() - self.glyphGrouping = {} - for i in range(numGlyphs): - self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] - - def compile(self, ttFont): - glyphNames = ttFont.getGlyphOrder() - a = array.array("H") - for i in range(len(glyphNames)): - a.append(self.glyphGrouping.get(glyphNames[i], 0)) - if sys.byteorder != "big": - a.byteswap() - return a.tobytes() - - def toXML(self, writer, ttFont): - names = sorted(self.glyphGrouping.keys()) - for glyphName in names: - writer.simpletag( - "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName] - ) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphGrouping"): - self.glyphGrouping = {} - if name != "glyphgroup": - return - self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py deleted file mode 100644 index c9a48eff06cb14b1b2dc56c94ec7e02b80f11ca3..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py +++ /dev/null @@ -1,12 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table__f_e_a_t(BaseTTXConverter): - """The feature name table is an AAT (Apple Advanced Typography) table for - storing font features, settings, and their human-readable names. It should - not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS`` - tables. See `Feature Name Table `_ - in the TrueType Reference Manual for more information on the structure and - purpose of this table.""" - - pass diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt.py deleted file mode 100644 index fc6639914d35edb13d6d8762304d32155c9b994a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt.py +++ /dev/null @@ -1,1032 +0,0 @@ -import functools -import os -import sys -import traceback - -import matplotlib as mpl -from matplotlib import _api, backend_tools, cbook -from matplotlib._pylab_helpers import Gcf -from matplotlib.backend_bases import ( - _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2, - TimerBase, cursors, ToolContainerBase, MouseButton, - CloseEvent, KeyEvent, LocationEvent, MouseEvent, ResizeEvent) -import matplotlib.backends.qt_editor.figureoptions as figureoptions -from . import qt_compat -from .qt_compat import ( - QtCore, QtGui, QtWidgets, __version__, QT_API, - _enum, _to_int, _isdeleted, _maybe_allow_interrupt -) - - -# SPECIAL_KEYS are Qt::Key that do *not* return their Unicode name -# instead they have manually specified names. -SPECIAL_KEYS = { - _to_int(getattr(_enum("QtCore.Qt.Key"), k)): v for k, v in [ - ("Key_Escape", "escape"), - ("Key_Tab", "tab"), - ("Key_Backspace", "backspace"), - ("Key_Return", "enter"), - ("Key_Enter", "enter"), - ("Key_Insert", "insert"), - ("Key_Delete", "delete"), - ("Key_Pause", "pause"), - ("Key_SysReq", "sysreq"), - ("Key_Clear", "clear"), - ("Key_Home", "home"), - ("Key_End", "end"), - ("Key_Left", "left"), - ("Key_Up", "up"), - ("Key_Right", "right"), - ("Key_Down", "down"), - ("Key_PageUp", "pageup"), - ("Key_PageDown", "pagedown"), - ("Key_Shift", "shift"), - # In OSX, the control and super (aka cmd/apple) keys are switched. - ("Key_Control", "control" if sys.platform != "darwin" else "cmd"), - ("Key_Meta", "meta" if sys.platform != "darwin" else "control"), - ("Key_Alt", "alt"), - ("Key_CapsLock", "caps_lock"), - ("Key_F1", "f1"), - ("Key_F2", "f2"), - ("Key_F3", "f3"), - ("Key_F4", "f4"), - ("Key_F5", "f5"), - ("Key_F6", "f6"), - ("Key_F7", "f7"), - ("Key_F8", "f8"), - ("Key_F9", "f9"), - ("Key_F10", "f10"), - ("Key_F10", "f11"), - ("Key_F12", "f12"), - ("Key_Super_L", "super"), - ("Key_Super_R", "super"), - ] -} -# Define which modifier keys are collected on keyboard events. -# Elements are (Qt::KeyboardModifiers, Qt::Key) tuples. -# Order determines the modifier order (ctrl+alt+...) reported by Matplotlib. -_MODIFIER_KEYS = [ - (_to_int(getattr(_enum("QtCore.Qt.KeyboardModifier"), mod)), - _to_int(getattr(_enum("QtCore.Qt.Key"), key))) - for mod, key in [ - ("ControlModifier", "Key_Control"), - ("AltModifier", "Key_Alt"), - ("ShiftModifier", "Key_Shift"), - ("MetaModifier", "Key_Meta"), - ] -] -cursord = { - k: getattr(_enum("QtCore.Qt.CursorShape"), v) for k, v in [ - (cursors.MOVE, "SizeAllCursor"), - (cursors.HAND, "PointingHandCursor"), - (cursors.POINTER, "ArrowCursor"), - (cursors.SELECT_REGION, "CrossCursor"), - (cursors.WAIT, "WaitCursor"), - (cursors.RESIZE_HORIZONTAL, "SizeHorCursor"), - (cursors.RESIZE_VERTICAL, "SizeVerCursor"), - ] -} - - -@_api.caching_module_getattr -class __getattr__: - qApp = _api.deprecated( - "3.6", alternative="QtWidgets.QApplication.instance()")( - property(lambda self: QtWidgets.QApplication.instance())) - - -# lru_cache keeps a reference to the QApplication instance, keeping it from -# being GC'd. -@functools.lru_cache(1) -def _create_qApp(): - app = QtWidgets.QApplication.instance() - - # Create a new QApplication and configure it if none exists yet, as only - # one QApplication can exist at a time. - if app is None: - # display_is_valid returns False only if on Linux and neither X11 - # nor Wayland display can be opened. - if not mpl._c_internal_utils.display_is_valid(): - raise RuntimeError('Invalid DISPLAY variable') - - # Check to make sure a QApplication from a different major version - # of Qt is not instantiated in the process - if QT_API in {'PyQt6', 'PySide6'}: - other_bindings = ('PyQt5', 'PySide2') - elif QT_API in {'PyQt5', 'PySide2'}: - other_bindings = ('PyQt6', 'PySide6') - else: - raise RuntimeError("Should never be here") - - for binding in other_bindings: - mod = sys.modules.get(f'{binding}.QtWidgets') - if mod is not None and mod.QApplication.instance() is not None: - other_core = sys.modules.get(f'{binding}.QtCore') - _api.warn_external( - f'Matplotlib is using {QT_API} which wraps ' - f'{QtCore.qVersion()} however an instantiated ' - f'QApplication from {binding} which wraps ' - f'{other_core.qVersion()} exists. Mixing Qt major ' - 'versions may not work as expected.' - ) - break - try: - QtWidgets.QApplication.setAttribute( - QtCore.Qt.AA_EnableHighDpiScaling) - except AttributeError: # Only for Qt>=5.6, <6. - pass - try: - QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy( - QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) - except AttributeError: # Only for Qt>=5.14. - pass - app = QtWidgets.QApplication(["matplotlib"]) - if sys.platform == "darwin": - image = str(cbook._get_data_path('images/matplotlib.svg')) - icon = QtGui.QIcon(image) - app.setWindowIcon(icon) - app.lastWindowClosed.connect(app.quit) - cbook._setup_new_guiapp() - - try: - app.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps) # Only for Qt<6. - except AttributeError: - pass - - return app - - -class TimerQT(TimerBase): - """Subclass of `.TimerBase` using QTimer events.""" - - def __init__(self, *args, **kwargs): - # Create a new timer and connect the timeout() signal to the - # _on_timer method. - self._timer = QtCore.QTimer() - self._timer.timeout.connect(self._on_timer) - super().__init__(*args, **kwargs) - - def __del__(self): - # The check for deletedness is needed to avoid an error at animation - # shutdown with PySide2. - if not _isdeleted(self._timer): - self._timer_stop() - - def _timer_set_single_shot(self): - self._timer.setSingleShot(self._single) - - def _timer_set_interval(self): - self._timer.setInterval(self._interval) - - def _timer_start(self): - self._timer.start() - - def _timer_stop(self): - self._timer.stop() - - -class FigureCanvasQT(FigureCanvasBase, QtWidgets.QWidget): - required_interactive_framework = "qt" - _timer_cls = TimerQT - manager_class = _api.classproperty(lambda cls: FigureManagerQT) - - buttond = { - getattr(_enum("QtCore.Qt.MouseButton"), k): v for k, v in [ - ("LeftButton", MouseButton.LEFT), - ("RightButton", MouseButton.RIGHT), - ("MiddleButton", MouseButton.MIDDLE), - ("XButton1", MouseButton.BACK), - ("XButton2", MouseButton.FORWARD), - ] - } - - def __init__(self, figure=None): - _create_qApp() - super().__init__(figure=figure) - - self._draw_pending = False - self._is_drawing = False - self._draw_rect_callback = lambda painter: None - self._in_resize_event = False - - self.setAttribute( - _enum("QtCore.Qt.WidgetAttribute").WA_OpaquePaintEvent) - self.setMouseTracking(True) - self.resize(*self.get_width_height()) - - palette = QtGui.QPalette(QtGui.QColor("white")) - self.setPalette(palette) - - def _update_pixel_ratio(self): - if self._set_device_pixel_ratio( - self.devicePixelRatioF() or 1): # rarely, devicePixelRatioF=0 - # The easiest way to resize the canvas is to emit a resizeEvent - # since we implement all the logic for resizing the canvas for - # that event. - event = QtGui.QResizeEvent(self.size(), self.size()) - self.resizeEvent(event) - - def _update_screen(self, screen): - # Handler for changes to a window's attached screen. - self._update_pixel_ratio() - if screen is not None: - screen.physicalDotsPerInchChanged.connect(self._update_pixel_ratio) - screen.logicalDotsPerInchChanged.connect(self._update_pixel_ratio) - - def showEvent(self, event): - # Set up correct pixel ratio, and connect to any signal changes for it, - # once the window is shown (and thus has these attributes). - window = self.window().windowHandle() - window.screenChanged.connect(self._update_screen) - self._update_screen(window.screen()) - - def set_cursor(self, cursor): - # docstring inherited - self.setCursor(_api.check_getitem(cursord, cursor=cursor)) - - def mouseEventCoords(self, pos=None): - """ - Calculate mouse coordinates in physical pixels. - - Qt uses logical pixels, but the figure is scaled to physical - pixels for rendering. Transform to physical pixels so that - all of the down-stream transforms work as expected. - - Also, the origin is different and needs to be corrected. - """ - if pos is None: - pos = self.mapFromGlobal(QtGui.QCursor.pos()) - elif hasattr(pos, "position"): # qt6 QtGui.QEvent - pos = pos.position() - elif hasattr(pos, "pos"): # qt5 QtCore.QEvent - pos = pos.pos() - # (otherwise, it's already a QPoint) - x = pos.x() - # flip y so y=0 is bottom of canvas - y = self.figure.bbox.height / self.device_pixel_ratio - pos.y() - return x * self.device_pixel_ratio, y * self.device_pixel_ratio - - def enterEvent(self, event): - # Force querying of the modifiers, as the cached modifier state can - # have been invalidated while the window was out of focus. - mods = QtWidgets.QApplication.instance().queryKeyboardModifiers() - LocationEvent("figure_enter_event", self, - *self.mouseEventCoords(event), - modifiers=self._mpl_modifiers(mods), - guiEvent=event)._process() - - def leaveEvent(self, event): - QtWidgets.QApplication.restoreOverrideCursor() - LocationEvent("figure_leave_event", self, - *self.mouseEventCoords(), - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def mousePressEvent(self, event): - button = self.buttond.get(event.button()) - if button is not None: - MouseEvent("button_press_event", self, - *self.mouseEventCoords(event), button, - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def mouseDoubleClickEvent(self, event): - button = self.buttond.get(event.button()) - if button is not None: - MouseEvent("button_press_event", self, - *self.mouseEventCoords(event), button, dblclick=True, - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def mouseMoveEvent(self, event): - MouseEvent("motion_notify_event", self, - *self.mouseEventCoords(event), - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def mouseReleaseEvent(self, event): - button = self.buttond.get(event.button()) - if button is not None: - MouseEvent("button_release_event", self, - *self.mouseEventCoords(event), button, - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def wheelEvent(self, event): - # from QWheelEvent::pixelDelta doc: pixelDelta is sometimes not - # provided (`isNull()`) and is unreliable on X11 ("xcb"). - if (event.pixelDelta().isNull() - or QtWidgets.QApplication.instance().platformName() == "xcb"): - steps = event.angleDelta().y() / 120 - else: - steps = event.pixelDelta().y() - if steps: - MouseEvent("scroll_event", self, - *self.mouseEventCoords(event), step=steps, - modifiers=self._mpl_modifiers(), - guiEvent=event)._process() - - def keyPressEvent(self, event): - key = self._get_key(event) - if key is not None: - KeyEvent("key_press_event", self, - key, *self.mouseEventCoords(), - guiEvent=event)._process() - - def keyReleaseEvent(self, event): - key = self._get_key(event) - if key is not None: - KeyEvent("key_release_event", self, - key, *self.mouseEventCoords(), - guiEvent=event)._process() - - def resizeEvent(self, event): - if self._in_resize_event: # Prevent PyQt6 recursion - return - self._in_resize_event = True - try: - w = event.size().width() * self.device_pixel_ratio - h = event.size().height() * self.device_pixel_ratio - dpival = self.figure.dpi - winch = w / dpival - hinch = h / dpival - self.figure.set_size_inches(winch, hinch, forward=False) - # pass back into Qt to let it finish - QtWidgets.QWidget.resizeEvent(self, event) - # emit our resize events - ResizeEvent("resize_event", self)._process() - self.draw_idle() - finally: - self._in_resize_event = False - - def sizeHint(self): - w, h = self.get_width_height() - return QtCore.QSize(w, h) - - def minumumSizeHint(self): - return QtCore.QSize(10, 10) - - @staticmethod - def _mpl_modifiers(modifiers=None, *, exclude=None): - if modifiers is None: - modifiers = QtWidgets.QApplication.instance().keyboardModifiers() - modifiers = _to_int(modifiers) - # get names of the pressed modifier keys - # 'control' is named 'control' when a standalone key, but 'ctrl' when a - # modifier - # bit twiddling to pick out modifier keys from modifiers bitmask, - # if exclude is a MODIFIER, it should not be duplicated in mods - return [SPECIAL_KEYS[key].replace('control', 'ctrl') - for mask, key in _MODIFIER_KEYS - if exclude != key and modifiers & mask] - - def _get_key(self, event): - event_key = event.key() - mods = self._mpl_modifiers(exclude=event_key) - try: - # for certain keys (enter, left, backspace, etc) use a word for the - # key, rather than Unicode - key = SPECIAL_KEYS[event_key] - except KeyError: - # Unicode defines code points up to 0x10ffff (sys.maxunicode) - # QT will use Key_Codes larger than that for keyboard keys that are - # not Unicode characters (like multimedia keys) - # skip these - # if you really want them, you should add them to SPECIAL_KEYS - if event_key > sys.maxunicode: - return None - - key = chr(event_key) - # qt delivers capitalized letters. fix capitalization - # note that capslock is ignored - if 'shift' in mods: - mods.remove('shift') - else: - key = key.lower() - - return '+'.join(mods + [key]) - - def flush_events(self): - # docstring inherited - QtWidgets.QApplication.instance().processEvents() - - def start_event_loop(self, timeout=0): - # docstring inherited - if hasattr(self, "_event_loop") and self._event_loop.isRunning(): - raise RuntimeError("Event loop already running") - self._event_loop = event_loop = QtCore.QEventLoop() - if timeout > 0: - _ = QtCore.QTimer.singleShot(int(timeout * 1000), event_loop.quit) - - with _maybe_allow_interrupt(event_loop): - qt_compat._exec(event_loop) - - def stop_event_loop(self, event=None): - # docstring inherited - if hasattr(self, "_event_loop"): - self._event_loop.quit() - - def draw(self): - """Render the figure, and queue a request for a Qt draw.""" - # The renderer draw is done here; delaying causes problems with code - # that uses the result of the draw() to update plot elements. - if self._is_drawing: - return - with cbook._setattr_cm(self, _is_drawing=True): - super().draw() - self.update() - - def draw_idle(self): - """Queue redraw of the Agg buffer and request Qt paintEvent.""" - # The Agg draw needs to be handled by the same thread Matplotlib - # modifies the scene graph from. Post Agg draw request to the - # current event loop in order to ensure thread affinity and to - # accumulate multiple draw requests from event handling. - # TODO: queued signal connection might be safer than singleShot - if not (getattr(self, '_draw_pending', False) or - getattr(self, '_is_drawing', False)): - self._draw_pending = True - QtCore.QTimer.singleShot(0, self._draw_idle) - - def blit(self, bbox=None): - # docstring inherited - if bbox is None and self.figure: - bbox = self.figure.bbox # Blit the entire canvas if bbox is None. - # repaint uses logical pixels, not physical pixels like the renderer. - l, b, w, h = [int(pt / self.device_pixel_ratio) for pt in bbox.bounds] - t = b + h - self.repaint(l, self.rect().height() - t, w, h) - - def _draw_idle(self): - with self._idle_draw_cntx(): - if not self._draw_pending: - return - self._draw_pending = False - if self.height() < 0 or self.width() < 0: - return - try: - self.draw() - except Exception: - # Uncaught exceptions are fatal for PyQt5, so catch them. - traceback.print_exc() - - def drawRectangle(self, rect): - # Draw the zoom rectangle to the QPainter. _draw_rect_callback needs - # to be called at the end of paintEvent. - if rect is not None: - x0, y0, w, h = [int(pt / self.device_pixel_ratio) for pt in rect] - x1 = x0 + w - y1 = y0 + h - def _draw_rect_callback(painter): - pen = QtGui.QPen( - QtGui.QColor("black"), - 1 / self.device_pixel_ratio - ) - - pen.setDashPattern([3, 3]) - for color, offset in [ - (QtGui.QColor("black"), 0), - (QtGui.QColor("white"), 3), - ]: - pen.setDashOffset(offset) - pen.setColor(color) - painter.setPen(pen) - # Draw the lines from x0, y0 towards x1, y1 so that the - # dashes don't "jump" when moving the zoom box. - painter.drawLine(x0, y0, x0, y1) - painter.drawLine(x0, y0, x1, y0) - painter.drawLine(x0, y1, x1, y1) - painter.drawLine(x1, y0, x1, y1) - else: - def _draw_rect_callback(painter): - return - self._draw_rect_callback = _draw_rect_callback - self.update() - - -class MainWindow(QtWidgets.QMainWindow): - closing = QtCore.Signal() - - def closeEvent(self, event): - self.closing.emit() - super().closeEvent(event) - - -class FigureManagerQT(FigureManagerBase): - """ - Attributes - ---------- - canvas : `FigureCanvas` - The FigureCanvas instance - num : int or str - The Figure number - toolbar : qt.QToolBar - The qt.QToolBar - window : qt.QMainWindow - The qt.QMainWindow - """ - - def __init__(self, canvas, num): - self.window = MainWindow() - super().__init__(canvas, num) - self.window.closing.connect( - # The lambda prevents the event from being immediately gc'd. - lambda: CloseEvent("close_event", self.canvas)._process()) - self.window.closing.connect(self._widgetclosed) - - if sys.platform != "darwin": - image = str(cbook._get_data_path('images/matplotlib.svg')) - icon = QtGui.QIcon(image) - self.window.setWindowIcon(icon) - - self.window._destroying = False - - if self.toolbar: - self.window.addToolBar(self.toolbar) - tbs_height = self.toolbar.sizeHint().height() - else: - tbs_height = 0 - - # resize the main window so it will display the canvas with the - # requested size: - cs = canvas.sizeHint() - cs_height = cs.height() - height = cs_height + tbs_height - self.window.resize(cs.width(), height) - - self.window.setCentralWidget(self.canvas) - - if mpl.is_interactive(): - self.window.show() - self.canvas.draw_idle() - - # Give the keyboard focus to the figure instead of the manager: - # StrongFocus accepts both tab and click to focus and will enable the - # canvas to process event without clicking. - # https://doc.qt.io/qt-5/qt.html#FocusPolicy-enum - self.canvas.setFocusPolicy(_enum("QtCore.Qt.FocusPolicy").StrongFocus) - self.canvas.setFocus() - - self.window.raise_() - - def full_screen_toggle(self): - if self.window.isFullScreen(): - self.window.showNormal() - else: - self.window.showFullScreen() - - def _widgetclosed(self): - if self.window._destroying: - return - self.window._destroying = True - try: - Gcf.destroy(self) - except AttributeError: - pass - # It seems that when the python session is killed, - # Gcf can get destroyed before the Gcf.destroy - # line is run, leading to a useless AttributeError. - - def resize(self, width, height): - # The Qt methods return sizes in 'virtual' pixels so we do need to - # rescale from physical to logical pixels. - width = int(width / self.canvas.device_pixel_ratio) - height = int(height / self.canvas.device_pixel_ratio) - extra_width = self.window.width() - self.canvas.width() - extra_height = self.window.height() - self.canvas.height() - self.canvas.resize(width, height) - self.window.resize(width + extra_width, height + extra_height) - - @classmethod - def start_main_loop(cls): - qapp = QtWidgets.QApplication.instance() - if qapp: - with _maybe_allow_interrupt(qapp): - qt_compat._exec(qapp) - - def show(self): - self.window.show() - if mpl.rcParams['figure.raise_window']: - self.window.activateWindow() - self.window.raise_() - - def destroy(self, *args): - # check for qApp first, as PySide deletes it in its atexit handler - if QtWidgets.QApplication.instance() is None: - return - if self.window._destroying: - return - self.window._destroying = True - if self.toolbar: - self.toolbar.destroy() - self.window.close() - - def get_window_title(self): - return self.window.windowTitle() - - def set_window_title(self, title): - self.window.setWindowTitle(title) - - -class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar): - message = QtCore.Signal(str) - - toolitems = [*NavigationToolbar2.toolitems] - toolitems.insert( - # Add 'customize' action after 'subplots' - [name for name, *_ in toolitems].index("Subplots") + 1, - ("Customize", "Edit axis, curve and image parameters", - "qt4_editor_options", "edit_parameters")) - - def __init__(self, canvas, parent=None, coordinates=True): - """coordinates: should we show the coordinates on the right?""" - QtWidgets.QToolBar.__init__(self, parent) - self.setAllowedAreas(QtCore.Qt.ToolBarArea( - _to_int(_enum("QtCore.Qt.ToolBarArea").TopToolBarArea) | - _to_int(_enum("QtCore.Qt.ToolBarArea").BottomToolBarArea))) - - self.coordinates = coordinates - self._actions = {} # mapping of toolitem method names to QActions. - self._subplot_dialog = None - - for text, tooltip_text, image_file, callback in self.toolitems: - if text is None: - self.addSeparator() - else: - a = self.addAction(self._icon(image_file + '.png'), - text, getattr(self, callback)) - self._actions[callback] = a - if callback in ['zoom', 'pan']: - a.setCheckable(True) - if tooltip_text is not None: - a.setToolTip(tooltip_text) - - # Add the (x, y) location widget at the right side of the toolbar - # The stretch factor is 1 which means any resizing of the toolbar - # will resize this label instead of the buttons. - if self.coordinates: - self.locLabel = QtWidgets.QLabel("", self) - self.locLabel.setAlignment(QtCore.Qt.AlignmentFlag( - _to_int(_enum("QtCore.Qt.AlignmentFlag").AlignRight) | - _to_int(_enum("QtCore.Qt.AlignmentFlag").AlignVCenter))) - self.locLabel.setSizePolicy(QtWidgets.QSizePolicy( - _enum("QtWidgets.QSizePolicy.Policy").Expanding, - _enum("QtWidgets.QSizePolicy.Policy").Ignored, - )) - labelAction = self.addWidget(self.locLabel) - labelAction.setVisible(True) - - NavigationToolbar2.__init__(self, canvas) - - def _icon(self, name): - """ - Construct a `.QIcon` from an image file *name*, including the extension - and relative to Matplotlib's "images" data directory. - """ - # use a high-resolution icon with suffix '_large' if available - # note: user-provided icons may not have '_large' versions - path_regular = cbook._get_data_path('images', name) - path_large = path_regular.with_name( - path_regular.name.replace('.png', '_large.png')) - filename = str(path_large if path_large.exists() else path_regular) - - pm = QtGui.QPixmap(filename) - pm.setDevicePixelRatio( - self.devicePixelRatioF() or 1) # rarely, devicePixelRatioF=0 - if self.palette().color(self.backgroundRole()).value() < 128: - icon_color = self.palette().color(self.foregroundRole()) - mask = pm.createMaskFromColor( - QtGui.QColor('black'), - _enum("QtCore.Qt.MaskMode").MaskOutColor) - pm.fill(icon_color) - pm.setMask(mask) - return QtGui.QIcon(pm) - - def edit_parameters(self): - axes = self.canvas.figure.get_axes() - if not axes: - QtWidgets.QMessageBox.warning( - self.canvas.parent(), "Error", "There are no axes to edit.") - return - elif len(axes) == 1: - ax, = axes - else: - titles = [ - ax.get_label() or - ax.get_title() or - ax.get_title("left") or - ax.get_title("right") or - " - ".join(filter(None, [ax.get_xlabel(), ax.get_ylabel()])) or - f"" - for ax in axes] - duplicate_titles = [ - title for title in titles if titles.count(title) > 1] - for i, ax in enumerate(axes): - if titles[i] in duplicate_titles: - titles[i] += f" (id: {id(ax):#x})" # Deduplicate titles. - item, ok = QtWidgets.QInputDialog.getItem( - self.canvas.parent(), - 'Customize', 'Select axes:', titles, 0, False) - if not ok: - return - ax = axes[titles.index(item)] - figureoptions.figure_edit(ax, self) - - def _update_buttons_checked(self): - # sync button checkstates to match active mode - if 'pan' in self._actions: - self._actions['pan'].setChecked(self.mode.name == 'PAN') - if 'zoom' in self._actions: - self._actions['zoom'].setChecked(self.mode.name == 'ZOOM') - - def pan(self, *args): - super().pan(*args) - self._update_buttons_checked() - - def zoom(self, *args): - super().zoom(*args) - self._update_buttons_checked() - - def set_message(self, s): - self.message.emit(s) - if self.coordinates: - self.locLabel.setText(s) - - def draw_rubberband(self, event, x0, y0, x1, y1): - height = self.canvas.figure.bbox.height - y1 = height - y1 - y0 = height - y0 - rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)] - self.canvas.drawRectangle(rect) - - def remove_rubberband(self): - self.canvas.drawRectangle(None) - - def configure_subplots(self): - if self._subplot_dialog is None: - self._subplot_dialog = SubplotToolQt( - self.canvas.figure, self.canvas.parent()) - self.canvas.mpl_connect( - "close_event", lambda e: self._subplot_dialog.reject()) - self._subplot_dialog.update_from_current_subplotpars() - self._subplot_dialog.show() - return self._subplot_dialog - - def save_figure(self, *args): - filetypes = self.canvas.get_supported_filetypes_grouped() - sorted_filetypes = sorted(filetypes.items()) - default_filetype = self.canvas.get_default_filetype() - - startpath = os.path.expanduser(mpl.rcParams['savefig.directory']) - start = os.path.join(startpath, self.canvas.get_default_filename()) - filters = [] - selectedFilter = None - for name, exts in sorted_filetypes: - exts_list = " ".join(['*.%s' % ext for ext in exts]) - filter = '%s (%s)' % (name, exts_list) - if default_filetype in exts: - selectedFilter = filter - filters.append(filter) - filters = ';;'.join(filters) - - fname, filter = qt_compat._getSaveFileName( - self.canvas.parent(), "Choose a filename to save to", start, - filters, selectedFilter) - if fname: - # Save dir for next time, unless empty str (i.e., use cwd). - if startpath != "": - mpl.rcParams['savefig.directory'] = os.path.dirname(fname) - try: - self.canvas.figure.savefig(fname) - except Exception as e: - QtWidgets.QMessageBox.critical( - self, "Error saving file", str(e), - _enum("QtWidgets.QMessageBox.StandardButton").Ok, - _enum("QtWidgets.QMessageBox.StandardButton").NoButton) - - def set_history_buttons(self): - can_backward = self._nav_stack._pos > 0 - can_forward = self._nav_stack._pos < len(self._nav_stack._elements) - 1 - if 'back' in self._actions: - self._actions['back'].setEnabled(can_backward) - if 'forward' in self._actions: - self._actions['forward'].setEnabled(can_forward) - - -class SubplotToolQt(QtWidgets.QDialog): - def __init__(self, targetfig, parent): - super().__init__() - self.setWindowIcon(QtGui.QIcon( - str(cbook._get_data_path("images/matplotlib.png")))) - self.setObjectName("SubplotTool") - self._spinboxes = {} - main_layout = QtWidgets.QHBoxLayout() - self.setLayout(main_layout) - for group, spinboxes, buttons in [ - ("Borders", - ["top", "bottom", "left", "right"], - [("Export values", self._export_values)]), - ("Spacings", - ["hspace", "wspace"], - [("Tight layout", self._tight_layout), - ("Reset", self._reset), - ("Close", self.close)])]: - layout = QtWidgets.QVBoxLayout() - main_layout.addLayout(layout) - box = QtWidgets.QGroupBox(group) - layout.addWidget(box) - inner = QtWidgets.QFormLayout(box) - for name in spinboxes: - self._spinboxes[name] = spinbox = QtWidgets.QDoubleSpinBox() - spinbox.setRange(0, 1) - spinbox.setDecimals(3) - spinbox.setSingleStep(0.005) - spinbox.setKeyboardTracking(False) - spinbox.valueChanged.connect(self._on_value_changed) - inner.addRow(name, spinbox) - layout.addStretch(1) - for name, method in buttons: - button = QtWidgets.QPushButton(name) - # Don't trigger on , which is used to input values. - button.setAutoDefault(False) - button.clicked.connect(method) - layout.addWidget(button) - if name == "Close": - button.setFocus() - self._figure = targetfig - self._defaults = {} - self._export_values_dialog = None - self.update_from_current_subplotpars() - - def update_from_current_subplotpars(self): - self._defaults = {spinbox: getattr(self._figure.subplotpars, name) - for name, spinbox in self._spinboxes.items()} - self._reset() # Set spinbox current values without triggering signals. - - def _export_values(self): - # Explicitly round to 3 decimals (which is also the spinbox precision) - # to avoid numbers of the form 0.100...001. - self._export_values_dialog = QtWidgets.QDialog() - layout = QtWidgets.QVBoxLayout() - self._export_values_dialog.setLayout(layout) - text = QtWidgets.QPlainTextEdit() - text.setReadOnly(True) - layout.addWidget(text) - text.setPlainText( - ",\n".join(f"{attr}={spinbox.value():.3}" - for attr, spinbox in self._spinboxes.items())) - # Adjust the height of the text widget to fit the whole text, plus - # some padding. - size = text.maximumSize() - size.setHeight( - QtGui.QFontMetrics(text.document().defaultFont()) - .size(0, text.toPlainText()).height() + 20) - text.setMaximumSize(size) - self._export_values_dialog.show() - - def _on_value_changed(self): - spinboxes = self._spinboxes - # Set all mins and maxes, so that this can also be used in _reset(). - for lower, higher in [("bottom", "top"), ("left", "right")]: - spinboxes[higher].setMinimum(spinboxes[lower].value() + .001) - spinboxes[lower].setMaximum(spinboxes[higher].value() - .001) - self._figure.subplots_adjust( - **{attr: spinbox.value() for attr, spinbox in spinboxes.items()}) - self._figure.canvas.draw_idle() - - def _tight_layout(self): - self._figure.tight_layout() - for attr, spinbox in self._spinboxes.items(): - spinbox.blockSignals(True) - spinbox.setValue(getattr(self._figure.subplotpars, attr)) - spinbox.blockSignals(False) - self._figure.canvas.draw_idle() - - def _reset(self): - for spinbox, value in self._defaults.items(): - spinbox.setRange(0, 1) - spinbox.blockSignals(True) - spinbox.setValue(value) - spinbox.blockSignals(False) - self._on_value_changed() - - -class ToolbarQt(ToolContainerBase, QtWidgets.QToolBar): - def __init__(self, toolmanager, parent=None): - ToolContainerBase.__init__(self, toolmanager) - QtWidgets.QToolBar.__init__(self, parent) - self.setAllowedAreas(QtCore.Qt.ToolBarArea( - _to_int(_enum("QtCore.Qt.ToolBarArea").TopToolBarArea) | - _to_int(_enum("QtCore.Qt.ToolBarArea").BottomToolBarArea))) - message_label = QtWidgets.QLabel("") - message_label.setAlignment(QtCore.Qt.AlignmentFlag( - _to_int(_enum("QtCore.Qt.AlignmentFlag").AlignRight) | - _to_int(_enum("QtCore.Qt.AlignmentFlag").AlignVCenter))) - message_label.setSizePolicy(QtWidgets.QSizePolicy( - _enum("QtWidgets.QSizePolicy.Policy").Expanding, - _enum("QtWidgets.QSizePolicy.Policy").Ignored, - )) - self._message_action = self.addWidget(message_label) - self._toolitems = {} - self._groups = {} - - def add_toolitem( - self, name, group, position, image_file, description, toggle): - - button = QtWidgets.QToolButton(self) - if image_file: - button.setIcon(NavigationToolbar2QT._icon(self, image_file)) - button.setText(name) - if description: - button.setToolTip(description) - - def handler(): - self.trigger_tool(name) - if toggle: - button.setCheckable(True) - button.toggled.connect(handler) - else: - button.clicked.connect(handler) - - self._toolitems.setdefault(name, []) - self._add_to_group(group, name, button, position) - self._toolitems[name].append((button, handler)) - - def _add_to_group(self, group, name, button, position): - gr = self._groups.get(group, []) - if not gr: - sep = self.insertSeparator(self._message_action) - gr.append(sep) - before = gr[position] - widget = self.insertWidget(before, button) - gr.insert(position, widget) - self._groups[group] = gr - - def toggle_toolitem(self, name, toggled): - if name not in self._toolitems: - return - for button, handler in self._toolitems[name]: - button.toggled.disconnect(handler) - button.setChecked(toggled) - button.toggled.connect(handler) - - def remove_toolitem(self, name): - for button, handler in self._toolitems[name]: - button.setParent(None) - del self._toolitems[name] - - def set_message(self, s): - self.widgetForAction(self._message_action).setText(s) - - -@backend_tools._register_tool_class(FigureCanvasQT) -class ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._subplot_dialog = None - - def trigger(self, *args): - NavigationToolbar2QT.configure_subplots(self) - - -@backend_tools._register_tool_class(FigureCanvasQT) -class SaveFigureQt(backend_tools.SaveFigureBase): - def trigger(self, *args): - NavigationToolbar2QT.save_figure( - self._make_classic_style_pseudo_toolbar()) - - -@backend_tools._register_tool_class(FigureCanvasQT) -class RubberbandQt(backend_tools.RubberbandBase): - def draw_rubberband(self, x0, y0, x1, y1): - NavigationToolbar2QT.draw_rubberband( - self._make_classic_style_pseudo_toolbar(), None, x0, y0, x1, y1) - - def remove_rubberband(self): - NavigationToolbar2QT.remove_rubberband( - self._make_classic_style_pseudo_toolbar()) - - -@backend_tools._register_tool_class(FigureCanvasQT) -class HelpQt(backend_tools.ToolHelpBase): - def trigger(self, *args): - QtWidgets.QMessageBox.information(None, "Help", self._get_help_html()) - - -@backend_tools._register_tool_class(FigureCanvasQT) -class ToolCopyToClipboardQT(backend_tools.ToolCopyToClipboardBase): - def trigger(self, *args, **kwargs): - pixmap = self.canvas.grab() - QtWidgets.QApplication.instance().clipboard().setPixmap(pixmap) - - -FigureManagerQT._toolbar2_class = NavigationToolbar2QT -FigureManagerQT._toolmanager_toolbar_class = ToolbarQt - - -@_Backend.export -class _BackendQT(_Backend): - backend_version = __version__ - FigureCanvas = FigureCanvasQT - FigureManager = FigureManagerQT - mainloop = FigureManagerQT.start_main_loop diff --git a/spaces/kyled/PhraseSentimentEmotionAnalysis/app.py b/spaces/kyled/PhraseSentimentEmotionAnalysis/app.py deleted file mode 100644 index 95a9054e4454634db85a9f0a38f28cad529afa62..0000000000000000000000000000000000000000 --- a/spaces/kyled/PhraseSentimentEmotionAnalysis/app.py +++ /dev/null @@ -1,35 +0,0 @@ -import streamlit as st -import transformers as t -import plotly.express as px -import pandas as pd - -st.title("Phrase Emotion Analysis") -with st.spinner(text="Loading model..."): - classifier = t.pipeline("zero-shot-classification", - model="facebook/bart-large-mnli", - multi_class=True) - sentiment_task = t.pipeline("sentiment-analysis", - model="cardiffnlp/twitter-xlm-roberta-base-sentiment", - tokenizer="cardiffnlp/twitter-xlm-roberta-base-sentiment") - - -x = st.text_input("Enter your title here:") -candidate_labels = ['anger', 'sadness', 'fear', 'joy', 'interest', - 'surprise', 'disgust', 'shame', 'compassion', 'other'] - -if x != "": - with st.spinner(text="Evaluating your input..."): - output = classifier(x, candidate_labels) - sentiment = sentiment_task(x) - st.write(str(sentiment)) - - ordered_results = [] - for lbl in candidate_labels: - ind = output['labels'].index(lbl) - ordered_results.append(output['scores'][ind]) - - df = pd.DataFrame(dict(r=ordered_results, theta=candidate_labels)) - fig = px.line_polar(df, r='r', theta='theta', line_close=True) - fig.update_traces(fill='toself') - - st.plotly_chart(fig) diff --git a/spaces/leurez/moss/README.md b/spaces/leurez/moss/README.md deleted file mode 100644 index 8b5280ac8377b7811a4884095e1f9ab9ee45b78b..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/README.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: Moss -emoji: <9F><8F><83> -colorFrom: pink -colorTo: indigo -sdk: docker -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# ChatGPT Web - -> 声明:此项目只发布于 GitHub,基于 MIT 协议,免费且作为开源学习使用。并且不会有任何形式的卖号、付费服务、讨论群、讨论组等行为。谨防受骗。 - -![cover](./docs/c1.png) -![cover2](./docs/c2.png) - -- [ChatGPT Web](#chatgpt-web) - - [介绍](#介绍) - - [待实现路线](#待实现路线) - - [前置要求](#前置要求) - - [Node](#node) - - [PNPM](#pnpm) - - [填写密钥](#填写密钥) - - [安装依赖](#安装依赖) - - [后端](#后端) - - [前端](#前端) - - [测试环境运行](#测试环境运行) - - [后端服务](#后端服务) - - [前端网页](#前端网页) - - [环境变量](#环境变量) - - [打包](#打包) - - [使用 Docker](#使用-docker) - - [Docker 参数示例](#docker-参数示例) - - [Docker build \& Run](#docker-build--run) - - [Docker compose](#docker-compose) - - [使用 Railway 部署](#使用-railway-部署) - - [Railway 环境变量](#railway-环境变量) - - [手动打包](#手动打包) - - [后端服务](#后端服务-1) - - [前端网页](#前端网页-1) - - [常见问题](#常见问题) - - [参与贡献](#参与贡献) - - [赞助](#赞助) - - [License](#license) -## 介绍 - -支持双模型,提供了两种非官方 `ChatGPT API` 方法 - -| 方式 | 免费? | 可靠性 | 质量 | -| --------------------------------------------- | ------ | ---------- | ---- | -| `ChatGPTAPI(gpt-3.5-turbo-0301)` | 否 | 可靠 | 相对较笨 | -| `ChatGPTUnofficialProxyAPI(网页 accessToken)` | 是 | 相对不可靠 | 聪明 | - -对比: -1. `ChatGPTAPI` 使用 `gpt-3.5-turbo` 通过 `OpenAI` 官方 `API` 调用 `ChatGPT` -2. `ChatGPTUnofficialProxyAPI` 使用非官方代理服务器访问 `ChatGPT` 的后端`API`,绕过`Cloudflare`(依赖于第三方服务器,并且有速率限制) - -警告: -1. 你应该首先使用 `API` 方式 -2. 使用 `API` 时,如果网络不通,那是国内被墙了,你需要自建代理,绝对不要使用别人的公开代理,那是危险的。 -3. 使用 `accessToken` 方式时反向代理将向第三方暴露您的访问令牌,这样做应该不会产生任何不良影响,但在使用这种方法之前请考虑风险。 -4. 使用 `accessToken` 时,不管你是国内还是国外的机器,都会使用代理。默认代理为 [acheong08](https://github.com/acheong08) 大佬的 `https://bypass.churchless.tech/api/conversation`,这不是后门也不是监听,除非你有能力自己翻过 `CF` 验证,用前请知悉。[社区代理](https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy)(注意:只有这两个是推荐,其他第三方来源,请自行甄别) -5. 把项目发布到公共网络时,你应该设置 `AUTH_SECRET_KEY` 变量添加你的密码访问权限,你也应该修改 `index.html` 中的 `title`,防止被关键词搜索到。 - -切换方式: -1. 进入 `service/.env.example` 文件,复制内容到 `service/.env` 文件 -2. 使用 `OpenAI API Key` 请填写 `OPENAI_API_KEY` 字段 [(获取 apiKey)](https://platform.openai.com/overview) -3. 使用 `Web API` 请填写 `OPENAI_ACCESS_TOKEN` 字段 [(获取 accessToken)](https://chat.openai.com/api/auth/session) -4. 同时存在时以 `OpenAI API Key` 优先 - -环境变量: - -全部参数变量请查看或[这里](#环境变量) - -``` -/service/.env.example -``` - -## 待实现路线 -[✓] 双模型 - -[✓] 多会话储存和上下文逻辑 - -[✓] 对代码等消息类型的格式化美化处理 - -[✓] 访问权限控制 - -[✓] 数据导入、导出 - -[✓] 保存消息到本地图片 - -[✓] 界面多语言 - -[✓] 界面主题 - -[✗] More... - -## 前置要求 - -### Node - -`node` 需要 `^16 || ^18 || ^19` 版本(`node >= 14` 需要安装 [fetch polyfill](https://github.com/developit/unfetch#usage-as-a-polyfill)),使用 [nvm](https://github.com/nvm-sh/nvm) 可管理本地多个 `node` 版本 - -```shell -node -v -``` - -### PNPM -如果你没有安装过 `pnpm` -```shell -npm install pnpm -g -``` - -### 填写密钥 -获取 `Openai Api Key` 或 `accessToken` 并填写本地环境变量 [跳转](#介绍) - -``` -# service/.env 文件 - -# OpenAI API Key - https://platform.openai.com/overview -OPENAI_API_KEY= - -# change this to an `accessToken` extracted from the ChatGPT site's `https://chat.openai.com/api/auth/session` response -OPENAI_ACCESS_TOKEN= -``` - -## 安装依赖 - -> 为了简便 `后端开发人员` 的了解负担,所以并没有采用前端 `workspace` 模式,而是分文件夹存放。如果只需要前端页面做二次开发,删除 `service` 文件夹即可。 - -### 后端 - -进入文件夹 `/service` 运行以下命令 - -```shell -pnpm install -``` - -### 前端 -根目录下运行以下命令 -```shell -pnpm bootstrap -``` - -## 测试环境运行 -### 后端服务 - -进入文件夹 `/service` 运行以下命令 - -```shell -pnpm start -``` - -### 前端网页 -根目录下运行以下命令 -```shell -pnpm dev -``` - -## 环境变量 - -`API` 可用: - -- `OPENAI_API_KEY` 和 `OPENAI_ACCESS_TOKEN` 二选一 -- `OPENAI_API_MODEL` 设置模型,可选,默认:`gpt-3.5-turbo` -- `OPENAI_API_BASE_URL` 设置接口地址,可选,默认:`https://api.openai.com` -- `OPENAI_API_DISABLE_DEBUG` 设置接口关闭 debug 日志,可选,默认:empty 不关闭 - -`ACCESS_TOKEN` 可用: - -- `OPENAI_ACCESS_TOKEN` 和 `OPENAI_API_KEY` 二选一,同时存在时,`OPENAI_API_KEY` 优先 -- `API_REVERSE_PROXY` 设置反向代理,可选,默认:`https://bypass.churchless.tech/api/conversation`,[社区](https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy)(注意:只有这两个是推荐,其他第三方来源,请自行甄别) - -通用: - -- `AUTH_SECRET_KEY` 访问权限密钥,可选 -- `MAX_REQUEST_PER_HOUR` 每小时最大请求次数,可选,默认无限 -- `TIMEOUT_MS` 超时,单位毫秒,可选 -- `SOCKS_PROXY_HOST` 和 `SOCKS_PROXY_PORT` 一起时生效,可选 -- `SOCKS_PROXY_PORT` 和 `SOCKS_PROXY_HOST` 一起时生效,可选 -- `HTTPS_PROXY` 支持 `http`,`https`, `socks5`,可选 -- `ALL_PROXY` 支持 `http`,`https`, `socks5`,可选 - -## 打包 - -### 使用 Docker - -#### Docker 参数示例 - -![docker](./docs/docker.png) - -#### Docker build & Run - -```bash -docker build -t chatgpt-web . - -# 前台运行 -docker run --name chatgpt-web --rm -it -p 127.0.0.1:7860:7860 --env OPENAI_API_KEY=your_api_key chatgpt-web - -# 后台运行 -docker run --name chatgpt-web -d -p 127.0.0.1:7860:7860 --env OPENAI_API_KEY=your_api_key chatgpt-web - -# 运行地址 -http://localhost:7860/ -``` - -#### Docker compose - -[Hub 地址](https://hub.docker.com/repository/docker/chenzhaoyu94/chatgpt-web/general) - -```yml -version: '3' - -services: - app: - image: chenzhaoyu94/chatgpt-web # 总是使用 latest ,更新时重新 pull 该 tag 镜像即可 - ports: - - 127.0.0.1:7860:7860 - environment: - # 二选一 - OPENAI_API_KEY: sk-xxx - # 二选一 - OPENAI_ACCESS_TOKEN: xxx - # API接口地址,可选,设置 OPENAI_API_KEY 时可用 - OPENAI_API_BASE_URL: xxx - # API模型,可选,设置 OPENAI_API_KEY 时可用,https://platform.openai.com/docs/models - # gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-davinci-003, text-davinci-002, code-davinci-002 - OPENAI_API_MODEL: xxx - # 反向代理,可选 - API_REVERSE_PROXY: xxx - # 访问权限密钥,可选 - AUTH_SECRET_KEY: xxx - # 每小时最大请求次数,可选,默认无限 - MAX_REQUEST_PER_HOUR: 0 - # 超时,单位毫秒,可选 - TIMEOUT_MS: 60000 - # Socks代理,可选,和 SOCKS_PROXY_PORT 一起时生效 - SOCKS_PROXY_HOST: xxx - # Socks代理端口,可选,和 SOCKS_PROXY_HOST 一起时生效 - SOCKS_PROXY_PORT: xxx - # HTTPS 代理,可选,支持 http,https,socks5 - HTTPS_PROXY: http://xxx:7890 -``` -- `OPENAI_API_BASE_URL` 可选,设置 `OPENAI_API_KEY` 时可用 -- `OPENAI_API_MODEL` 可选,设置 `OPENAI_API_KEY` 时可用 -### 使用 Railway 部署 - -[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template/yytmgc) - -#### Railway 环境变量 - -| 环境变量名称 | 必填 | 备注 | -| --------------------- | ---------------------- | -------------------------------------------------------------------------------------------------- | -| `PORT` | 必填 | 默认 `7860` -| `AUTH_SECRET_KEY` | 可选 | 访问权限密钥 | -| `MAX_REQUEST_PER_HOUR` | 可选 | 每小时最大请求次数,可选,默认无限 | -| `TIMEOUT_MS` | 可选 | 超时时间,单位毫秒 | -| `OPENAI_API_KEY` | `OpenAI API` 二选一 | 使用 `OpenAI API` 所需的 `apiKey` [(获取 apiKey)](https://platform.openai.com/overview) | -| `OPENAI_ACCESS_TOKEN` | `Web API` 二选一 | 使用 `Web API` 所需的 `accessToken` [(获取 accessToken)](https://chat.openai.com/api/auth/session) | -| `OPENAI_API_BASE_URL` | 可选,`OpenAI API` 时可用 | `API`接口地址 | -| `OPENAI_API_MODEL` | 可选,`OpenAI API` 时可用 | `API`模型 | -| `API_REVERSE_PROXY` | 可选,`Web API` 时可用 | `Web API` 反向代理地址 [详情](https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy) | -| `SOCKS_PROXY_HOST` | 可选,和 `SOCKS_PROXY_PORT` 一起时生效 | Socks代理 | -| `SOCKS_PROXY_PORT` | 可选,和 `SOCKS_PROXY_HOST` 一起时生效 | Socks代理端口 | -| `SOCKS_PROXY_USERNAME` | 可选,和 `SOCKS_PROXY_HOST` 一起时生效 | Socks代理用户名 | -| `SOCKS_PROXY_PASSWORD` | 可选,和 `SOCKS_PROXY_HOST` 一起时生效 | Socks代理密码 | -| `HTTPS_PROXY` | 可选 | HTTPS 代理,支持 http,https, socks5 | -| `ALL_PROXY` | 可选 | 所有代理 代理,支持 http,https, socks5 | - -> 注意: `Railway` 修改环境变量会重新 `Deploy` - -### 手动打包 -#### 后端服务 -> 如果你不需要本项目的 `node` 接口,可以省略如下操作 - -复制 `service` 文件夹到你有 `node` 服务环境的服务器上。 - -```shell -# 安装 -pnpm install - -# 打包 -pnpm build - -# 运行 -pnpm prod -``` - -PS: 不进行打包,直接在服务器上运行 `pnpm start` 也可 - -#### 前端网页 - -1、修改根目录下 `.env` 文件中的 `VITE_GLOB_API_URL` 为你的实际后端接口地址 - -2、根目录下运行以下命令,然后将 `dist` 文件夹内的文件复制到你网站服务的根目录下 - -[参考信息](https://cn.vitejs.dev/guide/static-deploy.html#building-the-app) - -```shell -pnpm build -``` - -## 常见问题 -Q: 为什么 `Git` 提交总是报错? - -A: 因为有提交信息验证,请遵循 [Commit 指南](./CONTRIBUTING.md) - -Q: 如果只使用前端页面,在哪里改请求接口? - -A: 根目录下 `.env` 文件中的 `VITE_GLOB_API_URL` 字段。 - -Q: 文件保存时全部爆红? - -A: `vscode` 请安装项目推荐插件,或手动安装 `Eslint` 插件。 - -Q: 前端没有打字机效果? - -A: 一种可能原因是经过 Nginx 反向代理,开启了 buffer,则 Nginx 会尝试从后端缓冲一定大小的数据再发送给浏览器。请尝试在反代参数后添加 `proxy_buffering off;`,然后重载 Nginx。其他 web server 配置同理。 - -## 参与贡献 - -贡献之前请先阅读 [贡献指南](./CONTRIBUTING.md) - -感谢所有做过贡献的人! - - - - - -## 赞助 - -如果你觉得这个项目对你有帮助,并且情况允许的话,可以给我一点点支持,总之非常感谢支持~ - -
          -
          - 微信 -

          WeChat Pay

          -
          -
          - 支付宝 -

          Alipay

          -
          -
          - -## License -MIT © [MossTech](./license) diff --git a/spaces/libhost/img/imglib.py b/spaces/libhost/img/imglib.py deleted file mode 100644 index 4e634b6db0087169061654fc3ea962ff4f60aa08..0000000000000000000000000000000000000000 --- a/spaces/libhost/img/imglib.py +++ /dev/null @@ -1,191 +0,0 @@ -from html import escape -import re -import streamlit as st -import pandas as pd, numpy as np -from transformers import CLIPProcessor, CLIPModel -from st_clickable_images import clickable_images - - -num_results=75 - -@st.cache( - show_spinner=False, - hash_funcs={ - CLIPModel: lambda _: None, - CLIPProcessor: lambda _: None, - dict: lambda _: None, - }, -) -def load(): - model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") - processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") - df = {0: pd.read_csv("data.csv"), 1: pd.read_csv("data2.csv")} - embeddings = {0: np.load("embeddings.npy"), 1: np.load("embeddings2.npy")} - for k in [0, 1]: - embeddings[k] = embeddings[k] / np.linalg.norm( - embeddings[k], axis=1, keepdims=True - ) - return model, processor, df, embeddings - - -model, processor, df, embeddings = load() -source = {0: "\nSource: Unsplash", 1: "\nSource: The Movie Database (TMDB)"} - - -def compute_text_embeddings(list_of_strings): - inputs = processor(text=list_of_strings, return_tensors="pt", padding=True) - result = model.get_text_features(**inputs).detach().numpy() - return result / np.linalg.norm(result, axis=1, keepdims=True) - - -def image_search(query, corpus, n_results=num_results): - positive_embeddings = None - def concatenate_embeddings(e1, e2): - if e1 is None: - return e2 - else: - return np.concatenate((e1, e2), axis=0) - - splitted_query = query.split("EXCLUDING ") - dot_product = 0 - k = 0 if corpus == "Unsplash" else 1 - if len(splitted_query[0]) > 0: - positive_queries = splitted_query[0].split(";") - for positive_query in positive_queries: - match = re.match(r"\[(Movies|Unsplash):(\d{1,5})\](.*)", positive_query) - if match: - corpus2, idx, remainder = match.groups() - idx, remainder = int(idx), remainder.strip() - k2 = 0 if corpus2 == "Unsplash" else 1 - positive_embeddings = concatenate_embeddings( - positive_embeddings, embeddings[k2][idx : idx + 1, :] - ) - if len(remainder) > 0: - positive_embeddings = concatenate_embeddings( - positive_embeddings, compute_text_embeddings([remainder]) - ) - else: - positive_embeddings = concatenate_embeddings( - positive_embeddings, compute_text_embeddings([positive_query]) - ) - dot_product = embeddings[k] @ positive_embeddings.T - dot_product = dot_product - np.median(dot_product, axis=0) - dot_product = dot_product / np.max(dot_product, axis=0, keepdims=True) - dot_product = np.min(dot_product, axis=1) - - if len(splitted_query) > 1: - negative_queries = (" ".join(splitted_query[1:])).split(";") - negative_embeddings = compute_text_embeddings(negative_queries) - dot_product2 = embeddings[k] @ negative_embeddings.T - dot_product2 = dot_product2 - np.median(dot_product2, axis=0) - dot_product2 = dot_product2 / np.max(dot_product2, axis=0, keepdims=True) - dot_product -= np.max(np.maximum(dot_product2, 0), axis=1) - - results = np.argsort(dot_product)[-1 : -n_results - 1 : -1] - return [ - ( - df[k].iloc[i]["path"], - df[k].iloc[i]["tooltip"] + source[k], - i, - ) - for i in results - ] - - -description = """ - -# ImgLib -**Enter your query and hit enter** -""" - -howto = """ -- Click image to find similar images -- Use "**;**" to combine multiple queries) -- Use "**EXCLUDING**", to exclude a query -""" - - -def main(): - st.markdown( - """ -

          🖼 ImgLib

          - """, - unsafe_allow_html=True, - ) - st.sidebar.markdown(description) - with st.sidebar.expander("Advanced use"): - st.markdown(howto) - - - st.sidebar.markdown(f"Try these test prompts: Lord of the Rings, Interstellar, Back to the Future, Avengers, The Matrix, WALL·E, Castle , Dune, Blade Runner, Guardians of the Galaxy, Aliens, Her, Legend of the Ten Rings, Harry Potter, Logan, Dragon, Scissorhands, Captain, Deadpool, ThorArrivval, Wick, Peaks, Labyrinth, Terabithia, RoboCop, Wonder Woman, Meteor, NYC, Stork, Pink, Yellow, Orange, Blue, tulip, dog, Dragon, sunrise, kitten, Swimming, jellyfish, Beach, puppy, Coral") - st.sidebar.markdown(f"Unsplash has categories that match: backgrounds, photos, nature, iphone, etc") - st.sidebar.markdown(f"Unsplash images contain animals, apps, events, feelings, food, travel, nature, people, religion, sports, things, stock") - st.sidebar.markdown(f"Unsplash things include flag, tree, clock, money, tattoo, arrow, book, car, fireworks, ghost, health, kiss, dance, balloon, crown, eye, house, music, airplane, lighthouse, typewriter, toys") - st.sidebar.markdown(f"unsplash feelings include funny, heart, love, cool, congratulations, love, scary, cute, friendship, inspirational, hug, sad, cursed, beautiful, crazy, respect, transformation, peaceful, happy") - st.sidebar.markdown(f"unsplash people contain baby, life, women, family, girls, pregnancy, society, old people, musician, attractive, bohemian") - st.sidebar.markdown(f"imagenet queries include: photo of, photo of many, sculpture of, rendering of, graffiti of, tattoo of, embroidered, drawing of, plastic, black and white, painting, video game, doodle, origami, sketch, etc") - st.sidebar.markdown(f"by Evgeniy Hristoforu") - - - _, c, _ = st.columns((1, 3, 1)) - if "query" in st.session_state: - query = c.text_input("", value=st.session_state["query"]) - else: - - query = c.text_input("", value="lighthouse") - corpus = st.radio("", ["Unsplash"]) - #corpus = st.radio("", ["Unsplash", "Movies"]) - if len(query) > 0: - results = image_search(query, corpus) - clicked = clickable_images( - [result[0] for result in results], - titles=[result[1] for result in results], - div_style={ - "display": "flex", - "justify-content": "center", - "flex-wrap": "wrap", - }, - img_style={"margin": "2px", "height": "200px"}, - ) - if clicked >= 0: - change_query = False - if "last_clicked" not in st.session_state: - change_query = True - else: - if clicked != st.session_state["last_clicked"]: - change_query = True - if change_query: - st.session_state["query"] = f"[{corpus}:{results[clicked][2]}]" - st.experimental_rerun() - - -if __name__ == "__main__": - main() diff --git a/spaces/limcheekin/Yarn-Mistral-7B-128k-GGUF/README.md b/spaces/limcheekin/Yarn-Mistral-7B-128k-GGUF/README.md deleted file mode 100644 index 8ca6d7ef1ccc7769b0150cb841f0912241174583..0000000000000000000000000000000000000000 --- a/spaces/limcheekin/Yarn-Mistral-7B-128k-GGUF/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Yarn-Mistral-7B-128k-GGUF (Q6_K) -colorFrom: purple -colorTo: blue -sdk: docker -models: - - NousResearch/Yarn-Mistral-7b-128k - - TheBloke/Yarn-Mistral-7B-128k-GGUF -tags: - - inference api - - openai-api compatible - - llama-cpp-python - - Yarn-Mistral-7B-128k-GGUF - - gguf -pinned: false ---- - -# Yarn-Mistral-7B-128k-GGUF (Q6_K) - -Please refer to the [index.html](index.html) for more information. diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Download Game Sword Art Online For Pc.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Download Game Sword Art Online For Pc.md deleted file mode 100644 index 8e00ad31a96ba92cd18e444b955dd4c453f87fb2..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Download Game Sword Art Online For Pc.md +++ /dev/null @@ -1,14 +0,0 @@ -

          download game sword art online for pc


          DOWNLOADhttps://bytlly.com/2uGywq



          - -An original story where you are the protagonist in the world of Gun Gale Online. Create your own avatar and explore devastated lands while fighting the enemy hordes. - -A fun, frenetic gameplay experience where you pilot your mobile suit while battling both ground and air opponents. Don’t let the naysayers fool you into thinking that Gun Gale Online is nothing more than a hyperactive game of catch with a more immersive setting. - -Gun Gale Online is a fun, frenetic gameplay experience where you pilot your mobile suit while battling both ground and air opponents. Don’t let the naysayers fool you into thinking that Gun Gale Online is nothing more than a hyperactive game of catch with a more immersive setting. - -Gun Gale Online is an original story where you are the protagonist in the world of Gun Gale Online. Create your own avatar and explore devastated lands while fighting the enemy hordes. - -An original story where you are the protagonist in the world of Gun Gale 4fefd39f24
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ekamath Eka Rataka Film 11 UPDATED.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ekamath Eka Rataka Film 11 UPDATED.md deleted file mode 100644 index 11fdc3e44cd187995d20303877ca387128f8407b..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ekamath Eka Rataka Film 11 UPDATED.md +++ /dev/null @@ -1,46 +0,0 @@ -

          Ekamath Eka Rataka Film 11


          DOWNLOAD ✔✔✔ https://bytlly.com/2uGyHM



          - -exe - -This is a powerful new program that allows the user to record video and - -audio. Each format is linked to a specific group of programs, and new - -groups can be added. Easily... - -Three Inches From Heaven - -​ A Very Short History of the Movies is a documentary film by Laurent Bouzereau - -about the history of movies in France. - -Laurent Bouzereau's Three Inches From Heaven: A Very Short History of the Movies (French: - -Trois pouces d'en haut : histoire très courte de la cinématographie française) is a 2003 - -French film directed by Laurent Bouzereau. - -Cockroaches or "the enemy" in Guarani: A Human Perspective - -Cockroaches and their presence in human homes and other - -buildings is now widely accepted as an indicator of - -unsanitary conditions and human activity. C... - -unsanitary conditions and human activity. Cockroaches - -are an important group of ectoparasites - parasites of - -other animals - that feed mainly on food. Many species - -of cockroaches are also important pests in human - -homes and in commercial buildings. - -Roaches or "the enemy" in Guarani: A Human Perspective - -Cockroaches or " 4fefd39f24
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/For Supermax 9700 New Software 15.md b/spaces/lincquiQcaudo/Top-20-Diffusion/For Supermax 9700 New Software 15.md deleted file mode 100644 index 960fdc61d7bac45233742f2e0ac959f787da88b2..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/For Supermax 9700 New Software 15.md +++ /dev/null @@ -1,36 +0,0 @@ -

          for supermax 9700 new software 15


          Downloadhttps://bytlly.com/2uGwQS



          -
          -++++ Total Downloads: 0 (Total: 0) ++++ - -Thank You! - -We appreciate your feedback. We will take this under consideration. - --Team SuperMax - -A man walks in front of an Apple logo in the Manhattan borough of New York City, U.S. December 11, 2016. - -The forecast for 2017 looks grim for Apple. - -The company's first quarter numbers are in and Apple missed on both earnings and revenue, while missing on guidance for the current quarter. According to Apple's first-quarter earnings release: - -While Apple experienced growth in Services revenue in the first quarter of 2017, revenue from iPhone and iPad declined sequentially and as a percentage of total revenue. - -Meanwhile, Apple missed on guidance for the current quarter and its revenue guidance for the current quarter has also been lowered. - -If Apple's services business continues to grow, and if the iPhone and iPad's decline isn't too steep, it should easily make up for this shortfall. If not, however, Apple could see its profits fall in 2017.Pages - -8.26.2016 - -Show & Tell Wednesday #17 - -I've been gone for about a month and a half, you guys... I've not been doing much crafting since Christmas, but I've been thinking about it a lot and am really motivated and inspired. I finally got back into my craft room yesterday and have been working on a bunch of new projects and projects I've had in my mind for a while. I've been sewing again, mostly quilts. I'm starting to realize how much I missed it. I plan on getting back into the fabric world soon too, I'm really loving it and finding new things I want to make. - -I'm not ready to show you all my projects just yet, but I thought I'd share a few highlights. - -I was on the hunt for some larger sized pieces of fabric to make this quilt and stumbled across these Liberty prints. This is called City Life and I love it! I wanted to make a quick gift out of it for my niece to use and wear in her office, it will be a stylish throw blanket. I also found some smaller sized prints, check them out below. I'm not sure what I'm going to do with these yet, but I love the colors. - -I also got a few odds and ends from eBay, it's a pretty simple project, but I wanted to use all the fabric I 4fefd39f24
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator X Deluxe - Cracked By Razor1911 - Sim 64 Bit.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator X Deluxe - Cracked By Razor1911 - Sim 64 Bit.md deleted file mode 100644 index c2a7be6a5a88a99be26c9c0ef65ce155ad3a46b3..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator X Deluxe - Cracked By Razor1911 - Sim 64 Bit.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Microsoft Flight Simulator X deluxe - Cracked by Razor1911 - Sim 64 bit


          Download Filehttps://bytlly.com/2uGxct



          - -THE SIMS DELUXE BR SERIAL: 5500-5782961-40671202138 ... Microsoft Flight Simulator 2002 - Airport 2002 Volume 1serial use: A221-A24AABAA-FDBA5DD9 ... Warcraft 3 final o serial está no cd na pasta do crack chamada razor1911 ... Windows XP Professional (64 bit) QWM7G-B293Q-3B7FC-W6RPX-4FBP3 1fdad05405
          -
          -
          -

          diff --git a/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/datasets/transforms.py b/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/datasets/transforms.py deleted file mode 100644 index 91cf9269e4b31008a3ddca34a19b038a9b399991..0000000000000000000000000000000000000000 --- a/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/datasets/transforms.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import os -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from groundingdino.util.box_ops import box_xyxy_to_cxcywh -from groundingdino.util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd", "positive_map"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i : i + h, j : j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target["boxes"].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target["masks"].flatten(1).any(1) - - for field in fields: - if field in target: - target[field] = target[field][keep] - - if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": - # for debug and visualization only. - if "strings_positive" in target: - target["strings_positive"] = [ - _i for _i, _j in zip(target["strings_positive"], keep) if _j - ] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( - [w, 0, w, 0] - ) - target["boxes"] = boxes - - if "masks" in target: - target["masks"] = target["masks"].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor( - [ratio_width, ratio_height, ratio_width, ratio_height] - ) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target["masks"] = ( - interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - ) - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class ResizeDebug(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - return resize(img, target, self.size) - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): - # respect_boxes: True to keep all boxes - # False to tolerence box filter - self.min_size = min_size - self.max_size = max_size - self.respect_boxes = respect_boxes - - def __call__(self, img: PIL.Image.Image, target: dict): - init_boxes = len(target["boxes"]) - max_patience = 10 - for i in range(max_patience): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - result_img, result_target = crop(img, target, region) - if ( - not self.respect_boxes - or len(result_target["boxes"]) == init_boxes - or i == max_patience - 1 - ): - return result_img, result_target - return result_img, result_target - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.0)) - crop_left = int(round((image_width - crop_width) / 2.0)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/spaces/litagin/rvc_okiba_TTS/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/litagin/rvc_okiba_TTS/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ljsabc/Fujisaki/README.md b/spaces/ljsabc/Fujisaki/README.md deleted file mode 100644 index 8e7f1ee70107f2dc913742135e1358dc49b435b3..0000000000000000000000000000000000000000 --- a/spaces/ljsabc/Fujisaki/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fujisaki -emoji: 💻 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py b/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py deleted file mode 100644 index 97586b8f5330a9d995a0bffd1f5e7bd5b5656462..0000000000000000000000000000000000000000 --- a/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) - -train.max_iter *= 4 # 100ep -> 400ep - -lr_multiplier.scheduler.milestones = [ - milestone * 4 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/HubertSoft.py b/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/HubertSoft.py deleted file mode 100644 index e540775d9b6336953ab8642fa424a5e7e3e38c3f..0000000000000000000000000000000000000000 --- a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/HubertSoft.py +++ /dev/null @@ -1,23 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from vencoder.hubert import hubert_model -class HubertSoft(SpeechEncoder): - def __init__(self,vec_path = "pretrain/hubert-soft-0d54a1f4.pt",device=None): - print("load model(s) from {}".format(vec_path)) - hubert_soft = hubert_model.hubert_soft(vec_path) - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.hidden_dim = 256 - self.model = hubert_soft.to(self.dev) - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats[None,None,:] - with torch.inference_mode(): - units = self.model.units(feats) - return units.transpose(1,2) diff --git a/spaces/lucinnerieux23/kotkindjn/Dockerfile b/spaces/lucinnerieux23/kotkindjn/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/lucinnerieux23/kotkindjn/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/upscaler_models/codeformer_upscaler.py b/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/upscaler_models/codeformer_upscaler.py deleted file mode 100644 index 8dd3ae5fdb3c58bb30e8fa12f0cf5b6c3cb2b133..0000000000000000000000000000000000000000 --- a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/upscaler_models/codeformer_upscaler.py +++ /dev/null @@ -1,81 +0,0 @@ -import gradio as gr -from codeformer.app import inference_app - - -class CodeformerUpscalerGenerator: - def generate_image( - self, - image_path: str, - background_enhance: bool, - face_upsample: bool, - upscale: int, - codeformer_fidelity: int, - ): - - pipe = inference_app( - image=image_path, - background_enhance=background_enhance, - face_upsample=face_upsample, - upscale=upscale, - codeformer_fidelity=codeformer_fidelity, - ) - - return [pipe] - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - codeformer_upscale_image_file = gr.Image( - type="filepath", label="Image" - ).style(height=260) - - with gr.Row(): - with gr.Column(): - codeformer_face_upsample = gr.Checkbox( - label="Face Upsample", - value=True, - ) - codeformer_upscale = gr.Slider( - label="Upscale", - minimum=1, - maximum=4, - step=1, - value=2, - ) - with gr.Row(): - with gr.Column(): - codeformer_background_enhance = gr.Checkbox( - label="Background Enhance", - value=True, - ) - codeformer_upscale_fidelity = gr.Slider( - label="Codeformer Fidelity", - minimum=0.1, - maximum=1.0, - step=0.1, - value=0.5, - ) - - codeformer_upscale_predict_button = gr.Button( - value="Generator" - ) - - with gr.Column(): - output_image = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - ).style(grid=(1, 2)) - - codeformer_upscale_predict_button.click( - fn=CodeformerUpscalerGenerator().generate_image, - inputs=[ - codeformer_upscale_image_file, - codeformer_background_enhance, - codeformer_face_upsample, - codeformer_upscale, - codeformer_upscale_fidelity, - ], - outputs=[output_image], - ) diff --git a/spaces/m3hrdadfi/typo-detector/README.md b/spaces/m3hrdadfi/typo-detector/README.md deleted file mode 100644 index 3b264f7baef97c139b0930690379657c8e0eac43..0000000000000000000000000000000000000000 --- a/spaces/m3hrdadfi/typo-detector/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Typo Detector -emoji: ⚡ -colorFrom: blue -colorTo: blue -sdk: streamlit -app_file: app.py -pinned: true ---- - -# Typo Detector using Transformers \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_docstring_options.cpp b/spaces/ma-xu/LIVE/pybind11/tests/test_docstring_options.cpp deleted file mode 100644 index 8c8f79fd5f6308caab1ee2d22525af2a408eca07..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_docstring_options.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - tests/test_docstring_options.cpp -- generation of docstrings and signatures - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#include "pybind11_tests.h" - -TEST_SUBMODULE(docstring_options, m) { - // test_docstring_options - { - py::options options; - options.disable_function_signatures(); - - m.def("test_function1", [](int, int) {}, py::arg("a"), py::arg("b")); - m.def("test_function2", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); - - m.def("test_overloaded1", [](int) {}, py::arg("i"), "Overload docstring"); - m.def("test_overloaded1", [](double) {}, py::arg("d")); - - m.def("test_overloaded2", [](int) {}, py::arg("i"), "overload docstring 1"); - m.def("test_overloaded2", [](double) {}, py::arg("d"), "overload docstring 2"); - - m.def("test_overloaded3", [](int) {}, py::arg("i")); - m.def("test_overloaded3", [](double) {}, py::arg("d"), "Overload docstr"); - - options.enable_function_signatures(); - - m.def("test_function3", [](int, int) {}, py::arg("a"), py::arg("b")); - m.def("test_function4", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); - - options.disable_function_signatures().disable_user_defined_docstrings(); - - m.def("test_function5", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); - - { - py::options nested_options; - nested_options.enable_user_defined_docstrings(); - m.def("test_function6", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); - } - } - - m.def("test_function7", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); - - { - py::options options; - options.disable_user_defined_docstrings(); - - struct DocstringTestFoo { - int value; - void setValue(int v) { value = v; } - int getValue() const { return value; } - }; - py::class_(m, "DocstringTestFoo", "This is a class docstring") - .def_property("value_prop", &DocstringTestFoo::getValue, &DocstringTestFoo::setValue, "This is a property docstring") - ; - } -} diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/pointer.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/pointer.h deleted file mode 100644 index e9204978f5d5990476698917842a1d77b779b5ba..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/pointer.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - - -namespace thrust -{ - -// declare pointer with default values of template parameters -template class pointer; - -} // end thrust - - -// specialize thrust::iterator_traits to avoid problems with the name of -// pointer's constructor shadowing its nested pointer type -// do this before pointer is defined so the specialization is correctly -// used inside the definition -namespace thrust -{ - -template - struct iterator_traits > -{ - private: - typedef thrust::pointer ptr; - - public: - typedef typename ptr::iterator_category iterator_category; - typedef typename ptr::value_type value_type; - typedef typename ptr::difference_type difference_type; - // XXX implement this type (the result of operator->) later - typedef void pointer; - typedef typename ptr::reference reference; -}; // end iterator_traits - -} // end thrust - - -namespace thrust -{ - -namespace detail -{ - -// this metafunction computes the type of iterator_adaptor thrust::pointer should inherit from -template - struct pointer_base -{ - // void pointers should have no element type - // note that we remove_cv from the Element type to get the value_type - typedef typename thrust::detail::eval_if< - thrust::detail::is_void::type>::value, - thrust::detail::identity_, - thrust::detail::remove_cv - >::type value_type; - - // if no Derived type is given, just use pointer - typedef typename thrust::detail::eval_if< - thrust::detail::is_same::value, - thrust::detail::identity_ >, - thrust::detail::identity_ - >::type derived_type; - - // void pointers should have no reference type - // if no Reference type is given, just use reference - typedef typename thrust::detail::eval_if< - thrust::detail::is_void::type>::value, - thrust::detail::identity_, - thrust::detail::eval_if< - thrust::detail::is_same::value, - thrust::detail::identity_ >, - thrust::detail::identity_ - > - >::type reference_arg; - - typedef thrust::iterator_adaptor< - derived_type, // pass along the type of our Derived class to iterator_adaptor - Element *, // we adapt a raw pointer - value_type, // the value type - Tag, // system tag - thrust::random_access_traversal_tag, // pointers have random access traversal - reference_arg, // pass along our Reference type - std::ptrdiff_t - > type; -}; // end pointer_base - - -} // end detail - - -// the base type for all of thrust's tagged pointers. -// for reasonable pointer-like semantics, derived types should reimplement the following: -// 1. no-argument constructor -// 2. constructor from OtherElement * -// 3. constructor from OtherPointer related by convertibility -// 4. constructor from OtherPointer to void -// 5. assignment from OtherPointer related by convertibility -// These should just call the corresponding members of pointer. -template - class pointer - : public thrust::detail::pointer_base::type -{ - private: - typedef typename thrust::detail::pointer_base::type super_t; - - typedef typename thrust::detail::pointer_base::derived_type derived_type; - - // friend iterator_core_access to give it access to dereference - friend class thrust::iterator_core_access; - - __host__ __device__ - typename super_t::reference dereference() const; - - // don't provide access to this part of super_t's interface - using super_t::base; - using typename super_t::base_type; - - public: - typedef typename super_t::base_type raw_pointer; - - // constructors - - __host__ __device__ - pointer(); - - #if THRUST_CPP_DIALECT >= 2011 - // NOTE: This is needed so that Thrust smart pointers can be used in - // `std::unique_ptr`. - __host__ __device__ - pointer(decltype(nullptr)); - #endif - - // OtherValue shall be convertible to Value - // XXX consider making the pointer implementation a template parameter which defaults to Element * - template - __host__ __device__ - explicit pointer(OtherElement *ptr); - - // OtherPointer's element_type shall be convertible to Element - // OtherPointer's system shall be convertible to Tag - template - __host__ __device__ - pointer(const OtherPointer &other, - typename thrust::detail::enable_if_pointer_is_convertible< - OtherPointer, - pointer - >::type * = 0); - - // OtherPointer's element_type shall be void - // OtherPointer's system shall be convertible to Tag - template - __host__ __device__ - explicit - pointer(const OtherPointer &other, - typename thrust::detail::enable_if_void_pointer_is_system_convertible< - OtherPointer, - pointer - >::type * = 0); - - // assignment - - #if THRUST_CPP_DIALECT >= 2011 - // NOTE: This is needed so that Thrust smart pointers can be used in - // `std::unique_ptr`. - __host__ __device__ - derived_type& operator=(decltype(nullptr)); - #endif - - // OtherPointer's element_type shall be convertible to Element - // OtherPointer's system shall be convertible to Tag - template - __host__ __device__ - typename thrust::detail::enable_if_pointer_is_convertible< - OtherPointer, - pointer, - derived_type & - >::type - operator=(const OtherPointer &other); - - // observers - - __host__ __device__ - Element *get() const; - - #if THRUST_CPP_DIALECT >= 2011 - // NOTE: This is needed so that Thrust smart pointers can be used in - // `std::unique_ptr`. - __host__ __device__ - explicit operator bool() const; - #endif - - __host__ __device__ - static derived_type pointer_to(typename thrust::detail::pointer_traits_detail::pointer_to_param::type r) - { - return thrust::detail::pointer_traits::pointer_to(r); - } -}; // end pointer - -// Output stream operator -template -__host__ -std::basic_ostream & -operator<<(std::basic_ostream &os, - const pointer &p); - -#if THRUST_CPP_DIALECT >= 2011 -// NOTE: This is needed so that Thrust smart pointers can be used in -// `std::unique_ptr`. -template -__host__ __device__ -bool operator==(decltype(nullptr), pointer p); - -template -__host__ __device__ -bool operator==(pointer p, decltype(nullptr)); - -template -__host__ __device__ -bool operator!=(decltype(nullptr), pointer p); - -template -__host__ __device__ -bool operator!=(pointer p, decltype(nullptr)); -#endif - -} // end thrust - -#include - diff --git a/spaces/malloc/OpenNMT-EN-DE-Translation/README.md b/spaces/malloc/OpenNMT-EN-DE-Translation/README.md deleted file mode 100644 index c94b9b15e25c7a7b0610a17b1e5ad734b1c4c4a6..0000000000000000000000000000000000000000 --- a/spaces/malloc/OpenNMT-EN-DE-Translation/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: OpenNMT EN DE Translation -emoji: 🐢 -colorFrom: red -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/manhkhanhUIT/BOPBTL/Global/detection_models/__init__.py b/spaces/manhkhanhUIT/BOPBTL/Global/detection_models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/marker22/Bark-Voice-Cloning/bark/__init__.py b/spaces/marker22/Bark-Voice-Cloning/bark/__init__.py deleted file mode 100644 index e0b17c8b44869c554931c723446c65d3903821a9..0000000000000000000000000000000000000000 --- a/spaces/marker22/Bark-Voice-Cloning/bark/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt -from .generation import SAMPLE_RATE, preload_models diff --git a/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/README.md b/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/README.md deleted file mode 100644 index 416533ca3c64da7bfee7ba64b095ad15981fecab..0000000000000000000000000000000000000000 --- a/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: lb-de-fr-en-pt-COQUI-STT -emoji: 🎤 🇱🇺 📋 -colorFrom: grey -colorTo: grey -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mehdidc/text_to_image_ddgan/score_sde/models/discriminator.py b/spaces/mehdidc/text_to_image_ddgan/score_sde/models/discriminator.py deleted file mode 100644 index 9841e78929954628d8643e3c33bbe4ab3fd31710..0000000000000000000000000000000000000000 --- a/spaces/mehdidc/text_to_image_ddgan/score_sde/models/discriminator.py +++ /dev/null @@ -1,433 +0,0 @@ -# --------------------------------------------------------------- -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the NVIDIA Source Code License -# for Denoising Diffusion GAN. To view a copy of this license, see the LICENSE file. -# --------------------------------------------------------------- -import torch -import torch.nn as nn -import numpy as np - -from . import up_or_down_sampling -from . import dense_layer -from . import layers - -dense = dense_layer.dense -conv2d = dense_layer.conv2d -get_sinusoidal_positional_embedding = layers.get_timestep_embedding - -class TimestepEmbedding(nn.Module): - def __init__(self, embedding_dim, hidden_dim, output_dim, act=nn.LeakyReLU(0.2)): - super().__init__() - - self.embedding_dim = embedding_dim - self.output_dim = output_dim - self.hidden_dim = hidden_dim - - self.main = nn.Sequential( - dense(embedding_dim, hidden_dim), - act, - dense(hidden_dim, output_dim), - ) - - def forward(self, temp): - temb = get_sinusoidal_positional_embedding(temp, self.embedding_dim) - temb = self.main(temb) - return temb -#%% -class DownConvBlock(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size=3, - padding=1, - t_emb_dim = 128, - downsample=False, - act = nn.LeakyReLU(0.2), - fir_kernel=(1, 3, 3, 1) - ): - super().__init__() - - - self.fir_kernel = fir_kernel - self.downsample = downsample - - self.conv1 = nn.Sequential( - conv2d(in_channel, out_channel, kernel_size, padding=padding), - ) - - - self.conv2 = nn.Sequential( - conv2d(out_channel, out_channel, kernel_size, padding=padding,init_scale=0.) - ) - self.dense_t1= dense(t_emb_dim, out_channel) - - - self.act = act - - - self.skip = nn.Sequential( - conv2d(in_channel, out_channel, 1, padding=0, bias=False), - ) - - - - def forward(self, input, t_emb): - - out = self.act(input) - out = self.conv1(out) - out += self.dense_t1(t_emb)[..., None, None] - - out = self.act(out) - - if self.downsample: - out = up_or_down_sampling.downsample_2d(out, self.fir_kernel, factor=2) - input = up_or_down_sampling.downsample_2d(input, self.fir_kernel, factor=2) - out = self.conv2(out) - - - skip = self.skip(input) - out = (out + skip) / np.sqrt(2) - - - return out - -class Discriminator_small(nn.Module): - """A time-dependent discriminator for small images (CIFAR10, StackMNIST).""" - - def __init__(self, nc = 3, ngf = 64, t_emb_dim = 128, act=nn.LeakyReLU(0.2), cond_size=768): - super().__init__() - # Gaussian random feature embedding layer for time - self.act = act - self.cond_proj = nn.Linear(cond_size, ngf*8) - # self.cond_proj.weight.data = default_initializer()(self.cond_proj.weight.shape) - - self.t_embed = TimestepEmbedding( - embedding_dim=t_emb_dim, - hidden_dim=t_emb_dim, - output_dim=t_emb_dim, - act=act, - ) - - - - # Encoding layers where the resolution decreases - self.start_conv = conv2d(nc,ngf*2,1, padding=0) - self.conv1 = DownConvBlock(ngf*2, ngf*2, t_emb_dim = t_emb_dim,act=act) - - self.conv2 = DownConvBlock(ngf*2, ngf*4, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv3 = DownConvBlock(ngf*4, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv4 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.final_conv = conv2d(ngf*8 + 1, ngf*8, 3,padding=1, init_scale=0.) - self.end_linear = dense(ngf*8, 1) - - self.stddev_group = 4 - self.stddev_feat = 1 - - - def forward(self, x, t, x_t, cond=None): - t_embed = self.t_embed(t) - # if cond is not None: - # t_embed = t_embed + self.cond_proj(cond) - t_embed = self.act(t_embed) - input_x = torch.cat((x, x_t), dim = 1) - - h0 = self.start_conv(input_x) - h1 = self.conv1(h0,t_embed) - - h2 = self.conv2(h1,t_embed) - - h3 = self.conv3(h2,t_embed) - - - out = self.conv4(h3,t_embed) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - out = self.act(out) - - out = out.view(out.shape[0], out.shape[1], -1).sum(2) - out = self.end_linear(out) + (self.cond_proj(cond) * out).sum(dim=1, keepdim=True) - - return out - -class SmallCondAttnDiscriminator(nn.Module): - """A time-dependent discriminator for small images (CIFAR10, StackMNIST).""" - - def __init__(self, nc = 3, ngf = 64, t_emb_dim = 128, act=nn.LeakyReLU(0.2), cond_size=768): - super().__init__() - # Gaussian random feature embedding layer for time - self.act = act - self.cond_attn = layers.CondAttnBlock(ngf*8, cond_size, dim_head=64, heads=8, norm_context=False, cosine_sim_attn=False) - - self.t_embed = TimestepEmbedding( - embedding_dim=t_emb_dim, - hidden_dim=t_emb_dim, - output_dim=t_emb_dim, - act=act, - ) - - - - # Encoding layers where the resolution decreases - self.start_conv = conv2d(nc,ngf*2,1, padding=0) - self.conv1 = DownConvBlock(ngf*2, ngf*2, t_emb_dim = t_emb_dim,act=act) - - self.conv2 = DownConvBlock(ngf*2, ngf*4, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv3 = DownConvBlock(ngf*4, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv4 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.final_conv = conv2d(ngf*8 + 1, ngf*8, 3,padding=1, init_scale=0.) - self.end_linear = dense(ngf*8, 1) - self.end_linear_cond = dense(ngf*8, 1) - #self.gn_cond = nn.GroupNorm(num_groups=32, num_channels=ngf*8, eps=1e-6) - - self.stddev_group = 4 - self.stddev_feat = 1 - - - def forward(self, x, t, x_t, cond=None): - t_embed = self.t_embed(t) - # if cond is not None: - # t_embed = t_embed + self.cond_proj(cond) - t_embed = self.act(t_embed) - input_x = torch.cat((x, x_t), dim = 1) - - h0 = self.start_conv(input_x) - h1 = self.conv1(h0,t_embed) - - h2 = self.conv2(h1,t_embed) - - h3 = self.conv3(h2,t_embed) - - - out = self.conv4(h3,t_embed) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - out = self.act(out) - - cond_pooled, cond, cond_mask = cond - - out_cond = (self.cond_attn(out, cond, cond_mask)) - - out = out.view(out.shape[0], out.shape[1], -1).mean(2) - out_cond = out_cond.view(out_cond.shape[0], out_cond.shape[1], -1).mean(2) - out = self.end_linear(out) + self.end_linear_cond(out_cond) - return out - - - - -class Discriminator_large(nn.Module): - """A time-dependent discriminator for large images (CelebA, LSUN).""" - - def __init__(self, nc = 1, ngf = 32, t_emb_dim = 128, act=nn.LeakyReLU(0.2), cond_size=768, attn_pool=False, attn_pool_kw=None): - super().__init__() - # Gaussian random feature embedding layer for time - self.cond_proj = nn.Linear(cond_size, ngf*8) - self.act = act - if attn_pool: - if attn_pool_kw is None: - attn_pool_kw = dict( - depth=1, - dim_head = 64, - heads = 8, - num_latents = 64, - num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence - max_seq_len = 512, - ff_mult = 4, - cosine_sim_attn = False, - ) - self.attn_pool = layers.PerceiverResampler( - dim=cond_size, - **attn_pool_kw, - ) - max_text_len = 512 - self.null_text_embed = torch.nn.Parameter(torch.randn(1, max_text_len, cond_size)) - else: - self.attn_pool = None - self.t_embed = TimestepEmbedding( - embedding_dim=t_emb_dim, - hidden_dim=t_emb_dim, - output_dim=t_emb_dim, - act=act, - ) - - self.start_conv = conv2d(nc,ngf*2,1, padding=0) - self.conv1 = DownConvBlock(ngf*2, ngf*4, t_emb_dim = t_emb_dim, downsample = True, act=act) - - self.conv2 = DownConvBlock(ngf*4, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - self.conv3 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv4 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - self.conv5 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - self.conv6 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.final_conv = conv2d(ngf*8 + 1, ngf*8, 3,padding=1) - self.end_linear = dense(ngf*8, 1) - - self.stddev_group = 4 - self.stddev_feat = 1 - - - def forward(self, x, t, x_t, cond=None): - t_embed = self.t_embed(t) - t_embed = self.act(t_embed) - - input_x = torch.cat((x, x_t), dim = 1) - - h = self.start_conv(input_x) - h = self.conv1(h,t_embed) - - h = self.conv2(h,t_embed) - - h = self.conv3(h,t_embed) - h = self.conv4(h,t_embed) - h = self.conv5(h,t_embed) - - - out = self.conv6(h,t_embed) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - out = self.act(out) - - out = out.view(out.shape[0], out.shape[1], -1).sum(2) - - if self.attn_pool is not None: - (cond_pooled, cond, cond_mask) = cond - if len(cond_mask.shape) == 2: - cond_mask = cond_mask.view(cond_mask.shape[0], cond_mask.shape[1], 1) - cond = torch.where( - cond_mask, - cond, - self.null_text_embed[:, :cond.shape[1]] - ) - cond = self.attn_pool(cond) - cond = cond.mean(dim=1) - cond = self.cond_proj(cond) - - out = self.end_linear(out) + (cond * out).sum(dim=1, keepdim=True) - return out - - -class CondAttnDiscriminator(nn.Module): - """A time-dependent discriminator for large images (CelebA, LSUN).""" - - def __init__(self, nc = 1, ngf = 32, t_emb_dim = 128, act=nn.LeakyReLU(0.2), cond_size=768): - super().__init__() - # Gaussian random feature embedding layer for time - self.act = act - self.cond_attn = layers.CondAttnBlock(ngf*8, cond_size, dim_head=64, heads=8, norm_context=False, cosine_sim_attn=False) - - self.t_embed = TimestepEmbedding( - embedding_dim=t_emb_dim, - hidden_dim=t_emb_dim, - output_dim=t_emb_dim, - act=act, - ) - - self.start_conv = conv2d(nc,ngf*2,1, padding=0) - self.conv1 = DownConvBlock(ngf*2, ngf*4, t_emb_dim = t_emb_dim, downsample = True, act=act) - - self.conv2 = DownConvBlock(ngf*4, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - self.conv3 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.conv4 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - self.conv5 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - self.conv6 = DownConvBlock(ngf*8, ngf*8, t_emb_dim = t_emb_dim, downsample=True,act=act) - - - self.final_conv = conv2d(ngf*8 + 1, ngf*8, 3,padding=1) - self.end_linear = dense(ngf*8, 1) - self.end_linear_cond = dense(ngf*8, 1) - - self.stddev_group = 4 - self.stddev_feat = 1 - - - def forward(self, x, t, x_t, cond=None): - cond_pooled, cond, cond_mask = cond - - t_embed = self.t_embed(t) - t_embed = self.act(t_embed) - - input_x = torch.cat((x, x_t), dim = 1) - - h = self.start_conv(input_x) - h = self.conv1(h,t_embed) - - h = self.conv2(h,t_embed) - - h = self.conv3(h,t_embed) - h = self.conv4(h,t_embed) - h = self.conv5(h,t_embed) - - - out = self.conv6(h,t_embed) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - out = self.act(out) - - out_cond = self.cond_attn(out, cond, cond_mask) - - - out = out.view(out.shape[0], out.shape[1], -1).mean(2) - out_cond = out_cond.view(out_cond.shape[0], out_cond.shape[1], -1).mean(2) - out = self.end_linear(out) + self.end_linear_cond(out_cond) - return out diff --git a/spaces/meraGPT/meraKB/explorer.py b/spaces/meraGPT/meraKB/explorer.py deleted file mode 100644 index 23a6c68337dfc49811eb96c1cab09689a3642583..0000000000000000000000000000000000000000 --- a/spaces/meraGPT/meraKB/explorer.py +++ /dev/null @@ -1,14 +0,0 @@ -import streamlit as st - - -def view_document(supabase): - # Get the document from the database - response = supabase.table("documents").select("content").filter('metadata->>user', 'eq', st.session_state["username"]).execute() - # st.write("**This feature is in active development**") - # Display a list of elements from the documents - # If the user clicks on an element, display the content of the document - i = 0 - for document in response.data: - i += 1 - if st.button(document['content'][:50].replace("\n", " "), key = str(i)): - st.write(document['content']) diff --git a/spaces/merve/anonymization/public/dataset-worldviews/style.css b/spaces/merve/anonymization/public/dataset-worldviews/style.css deleted file mode 100644 index b8cdd4b074388e961c5dd22322a9e056903f2b2c..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/dataset-worldviews/style.css +++ /dev/null @@ -1,260 +0,0 @@ -:root { - --shaded-shape-color: #9e9e9e; - --not-shaded-shape-color: white; - --classifier-bg-color: #e6e6e6; -} - -.right { - float: right; -} -.left { - float: left; -} - -.gt-shaded { - fill: var(--shaded-shape-color); - stroke: black; - stroke-width: 1; -} - -.gt-unshaded { - fill: var(--not-shaded-shape-color); - stroke: black; - stroke-width: 1; -} - -.shape-label-group { - opacity: 0; -} -.shape-label-group.visible { - opacity: 100; -} - -.incorrect.is-classified { - stroke-width: 2; - transition: stroke-width 0.5s; - transition-timing-function: cubic-bezier(0, 7, 0, 7); - stroke: #d15830; -} - -.correct.is-classified { - stroke-width: 1; - stroke: green; -} - -.shape-label-rect { - opacity: 50; - fill: white; - stroke: none; -} - -.shape-label-text { - color: black; -} - -.source { - text-decoration: none; - font-size: 10px; -} - -.newspaper-image { - width: 450px; -} - -.interface-image { - width: 450px; -} -.summary-text { - opacity: 0; - padding-top: 0px; - padding-bottom: 20px; - text-indent: 50px; -} - -.summary-text.is-classified { - transition: opacity 1000ms; - transition-delay: 2500ms; - opacity: 100; -} - -.classifier { - /* fill:#c2c2c2; - stroke-width: 0;*/ - opacity: 0; -} - -.classifier.is-classified { - transition: opacity 1000ms; - transition-delay: 1500ms; - opacity: 100; - fill: #c2c2c2; - stroke-width: 2; -} - -.classifier-text { - text-anchor: middle; - /*alignment-baseline: central;*/ - font-size: 30px; -} - -.classifier-caption { - width: 800px; - text-align: center; - position: relative; - left: 50%; - margin-left: -400px; - font-size: 12px; - /*right: 50%;*/ -} - -.classifier-bg-shaded { - fill: var(--classifier-bg-color); - stroke-width: 0; -} - -.classifier-bg-unshaded { - fill: var(--classifier-bg-color); -} - -.item-text.invisible { - fill-opacity: 10; -} -.item-text { - fill-opacity: 100; -} - -.explainer-label-text { - padding-left: 2px; - padding-right: 2px; - padding-top: 1px; - padding-bottom: 1px; -} - -mark { - padding-left: 2px; - padding-right: 2px; - padding-top: 1px; - padding-bottom: 1px; - outline: 1px solid #000000; -} - -img.interface { - padding-top: 20px; - padding-right: 20px; - padding-bottom: 20px; - padding-left: 20px; -} - -.classifier-button { - padding: 10px 20px; - text-align: center; - font-family: "Google Sans", sans-serif; - margin-left: 20px; - margin-right: 20px; -} - -.classifer-bg-text { - font-family: "Consolas", "monaco", "monospace"; -} - -.emphasis { - font-weight: 500; -} - -.dropdown { - padding: 8px 7px; - min-width: 200px; - background-color: #f9f9f9; - box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2); - font-family: "Google Sans", sans-serif; - font-size: 14px; -} - -.fake-dropdown { - padding-top: 10px; - padding-bottom: 10px; - padding-left: 10px; - padding-right: 10px; -} - -.monospace { - font-family: "Consolas", "monaco", "monospace"; - font-size: 14px; - font-weight: 500; -} - -.monospace.shaded { - background-color: var(--shaded-shape-color); - outline: 1px solid #000000; - padding: 1px; - font-size: 14px; -} - -.monospace.not-shaded { - background-color: var(--not-shaded-shape-color); - outline: 1px solid #000000; - padding: 1px; - font-size: 14px; -} - -.classifier-info-blurb { - font-style: italic; - font-size: 11; -} - -.photo-button { - cursor: pointer; -} - -.photo-button rect { - fill: #ffffff; -} - -.photo-button.is-active-button rect { - stroke: #000; -} - -.explainer-button { - cursor: pointer; -} - -.explainer-button rect { - fill: #f9f9f9; - stroke: #000000; -} - -.explainer-button.explainer-active-button rect { - fill: #fefefe; - stroke-width: 3; -} - -.tooltip { - width: 180px; - text-align: center; -} - -.tooltip .correct-row span { - outline: 1px solid red; - padding: 2px; -} - -.tooltip .correct-row.is-correct-tooltip span { - outline: 1px solid green; -} - -#row.row-highlighted { - opacity: 0.2; -} - -.shape-row-unhighlighted { - opacity: 0.2; -} - -.results-table { - text-align: center; -} - -.results-table tr.active { - background-color: var(--classifier-bg-color); - outline: 1px solid; -} diff --git a/spaces/merve/anonymization/source/anonymization/make-slides.js b/spaces/merve/anonymization/source/anonymization/make-slides.js deleted file mode 100644 index 3feff55ba9248cee61cd7ec881fade8ef661e67c..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/anonymization/make-slides.js +++ /dev/null @@ -1,98 +0,0 @@ -window.makeSlides = function(){ - var slides = [ - { - xKey: 'grid', - circleDelayFn: d => axii.ageScale(d.age), - showFlipRect: 0, - populationTarget: 144, - headsProbTarget: .5, - }, - { - xKey: 'age', - showAgeAxis: 1, - }, - { - xKey: 'ageState', - showStateAxis: 1, - }, - { - showUniqueBox: 1 - }, - { - xKey: 'ageStateSeason', - showUniqueBox: 1, - showUniqueSeasonBox: 1, - showSeasonAxis: 1, - }, - { - xKey: 'heads', - showUniqueBox: 0, - showUniqueSeasonBox: 0, - showSeasonAxis: 0, - showAgeAxis: 0, - showStateAxis: 0, - showHeadAxis: 1, - }, - { - showFlipCircle: 1, - showHeadCaptionAxis: 1, - }, - - // Flip coin - { - xKey: 'plagerizedShifted', - showHeadAxis: 0, - showHeadCaptionAxis: 0, - showHistogramAxis: 1, - }, - - // Exactly how far off can these estimates be after adding noise? Flip more coins to see the distribution. - { - enterHistogram: 1, - showHistogram: 1, - // showPlagerizedAxis: 0, - showEstimate: 1, - }, - - // Reducing the random noise increases our point estimate, but risks leaking information about students. - { - animateHeadsProbSlider: 1, - animatePopulationSlider: 1, - enterHistogram: 0, - name: 'noise', - headsProbTarget: .35, - }, - - // If we collect information from lots of people, we can have high accuracy and protect everyone's privacy. - { - showEstimate: 0, - showAllStudents: 1, - name: 'population', - animateHeadsProbSlider: -1, - animatePopulationSlider: 1, - populationTarget: 400, - }, - - ] - - var keys = [] - slides.forEach((d, i) => { - keys = keys.concat(d3.keys(d)) - d.index = i - }) - _.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) - }) - - return slides -} - - - -if (window.init) window.init() diff --git a/spaces/merve/hidden-bias/public/dataset-worldviews/index.html b/spaces/merve/hidden-bias/public/dataset-worldviews/index.html deleted file mode 100644 index 7cc91d84d612bf8097d9568c37b1382c1dbf686f..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/dataset-worldviews/index.html +++ /dev/null @@ -1,288 +0,0 @@ - - - - - - - - - - - - - - - - - - Datasets Have Worldviews - - - - - - - - - - - - - - - -
          - -
          - -

          Datasets Have Worldviews

          -
          Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too.
          -

          Suppose you have a dataset of shapes. They can either be shaded or unshaded. They look something like this:

          - -
          - -

          You built a supervised machine learning classifier that will automatically classify each shape as shaded or unshaded. You call it the “Is-Shaded Classifier”.

          - -

          Click “Run Classifier” to see how your model performs.

          -

          -
          -
          -
          - -

          It’s not perfect— some of the shapes are definitely misclassified. You want to improve your model!

          - -

          To do so, you want to know more about the kinds of mistakes your model is making.

          - -

          Thinking About Bias

          - -

          In training, you only gave your model the raw image of each shape and one ground truth label: shaded and unshaded. But maybe something about your model—the distribution of the training data you used, the architecture you chose, or how you set your hyperparameters—resulted in your model performing better on some shapes than others.

          - -

          In fact, you’ve seen a lot of papers and articles citing issues of biased model performance between circles, triangles, and rectangles in shape data. One paper finds that shape detection algorithms tend to do worse on triangles; another article says color accuracy is an issue with circles. So you wonder: are there biases in your model’s misclassifications?

          - -
          Three abstract drawings of papers or articles with headlines 'Shape detection: biased against triangles?', 'Geometry experts call for more accurate rectangle data, cite fairness concerns', and 'Increasing color accuracy in circles'
          - -

          You want to make sure that your model is performing equally well across circles, triangles, and rectangles, so you decide to do a fairness analysis.

          - -

          There’s just one issue: you don’t have labels for which of your shapes are circles, triangles, or rectangles.

          - -

          So, you decide to send your data to data labelers.

          - -
          Different shapes with an arrow pointing to a group of abstract people.
          - -

          You receive feedback from your data labeling team that they’re not sure what to do with the shapes that aren’t exactly circles, triangles, or rectangles.

          - -
          An image of a computer interface and the instructions 'Please select the name of the shape below'. There is a lumpy, blob-like shape with three checkboxes that say 'circle', 'triangle', and 'rectangle'. There is a text box with a question mark next to the interface.
          - -

          For the shapes that are unclear, you can have them use their best guess or simply label them as “other”. Then, you can finally do some fairness analysis!

          - -

          Below is the interface they see:

          - -
          - -

          These shapes should be labeled…

          -
          - -
          - -
          - -

          If you go back and change the labelers’ instructions, which shapes do you perform worst on? Where do you find bias?

          - -

          You notice that your results hinge on how you choose to classify the shapes in our data.

          - -

          Because ultimately, this isn’t a world of only circles, triangles, and rectangles!

          - -

          Thinking About Classification

          - -

          What could we find out about our classifier’s performance if we used different categories altogether?

          - -

          All shapes are basically…

          -

          Everything else should be labeled…

          - -

          -

          -

          -

          - -

          With each of the different categories, which shapes do you perform worst on? Where do you find bias?

          - -

          Each way of categorizing your shapes takes a different stance about what’s important . Each one makes some features more important than others, it make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

          - -

          And each one tells you something different about what kind of bias your classifier has!

          - -

          Grouping and Regrouping

          - -

          Here’s another way to look at the same results. We can draw all the shapes that were correctly classified above the dashed line, and all the incorrectly classified shapes below it.

          - -
          - -

          We’re still looking at the same model making the same classification on the same shapes, so the same shapes stay above and below the line. But each way of grouping the results distributes the errors differently— each way tells you something different.

          - -

          Labels Tell Stories

          - -

          The decisions you make about classification, however small…

          - -

          All shapes are basically…

          - -

          …begin to shape others’ decisions…

          - -
          - -

          …they shape the analysis you can do…

          - -
          - -

          …and they shape the kinds of conversations that happen.

          - -

          - -

          It’s natural to want to find a way out of this problem by gathering more features or collecting more data. If we just have enough detail on enough data, surely we can avoid making these kinds of decisions, right?

          - -

          Unfortunately, that isn’t the case. Describing the world around us in any way—whether we’re telling a friend a story or telling a computer about shapes—requires us to choose what information is important to convey and what tools we want to use to convey it.

          - -

          Whether we think about it or not, we’re always making choices about classification. -

          - -

          All people are basically… men or women

          -

          All food is basically… sweet or savory

          -

          All content is basically… kid-friendly or adult

          -

          All speech is basically… hate speech or acceptable speech

          - -

          All results are basically… significant or insignificant

          - -

          And as we saw with shapes, all of these choices make some features more important than others, make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

          - -

          In Practice

          - -

          Let’s take a closer look at how this plays out in real machine learning applications. One straightforward example is in supervised object detection tasks.

          - - -

          For example, let’s imagine we want to train an object detection model on a dataset including this image:

          - -

          Image of the Seattle skyline
          Source: Wikimedia Commons

          - -

          We could give it the following ground truth bounding boxes:

          - -

          Image of the Seattle skyline with boxes around several items in the picture with labels like 'building' and 'tree'.

          - -

          This looks objective, right? After all, a building is a building, a bush is a bush, and a mountain is a mountain!

          -

          But even labeling the same regions in the same image, you can communicate a very different perspective:

          - -

          Image of the Seattle skyline with boxes around several items in the picture, with labels like 'plant, non medicinal' and 'structure, nonreligious'.

          - -

          Or consider the image below, with several sets of “ground truth” labels. Looking at each of these labels, consider:

          - -

          What features matter? What gets labeled? Whose worldview comes through? What might you learn from this set of labels that you wouldn’t learn from another?

          - -
          Source: Wikimedia Commons
          - -

          There is no “view from nowhere”, no universal way to organize every object, or word, or image. Datasets are always products of a particular time, place, and set of conditions; they are socially situated artifacts. They have histories; they have politics. And ignoring this fact has very real consequences.

          - -

          So what do we do with this information?

          - -

          A great place to start is to reflect on your own context and get curious about your data.

          - -

          If it’s hard to see a dataset’s values—if it feels “objective”, “universal”, or “neutral”—it may simply be reflecting a worldview you’re accustomed to. So, understanding the limitations of your own worldview can tell you about the limitations of “objective” data. What assumptions do you make about the world? What feels like common sense? What feels foreign?

          - -

          And do some sleuthing about your data! Who collected this data? Why was it collected? Who paid for it? Where did the “ground truth” come from?

          - -

          You might even find yourself questioning what kinds of assumptions underpin machine learning dataset development or even thinking more deeply about classification as a whole.

          - -

          If you find yourself with lots of questions, you’re already off to a good start.

          - -

          -

          - -

          Credits

          - -

          Dylan Baker // January 2022

          -

          Thanks to Adam Pearce, Alex Hanna, Emily Denton, Fernanda Viégas, Kevin Robinson, Nithum Thain, Razvan Amironesei, and Vinodkumar Prabhakaran for their help with this piece.

          -

          - - - - - -

          More Explorables

          -

          -

          - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/source/third_party/weepeople.css b/spaces/merve/measuring-fairness/source/third_party/weepeople.css deleted file mode 100644 index 33ed7472967ade6cddc630b1a2ad62597c1cd2b2..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/third_party/weepeople.css +++ /dev/null @@ -1,14 +0,0 @@ -/* https://github.com/propublica/weepeople This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License */ - -@font-face { - font-family: 'WeePeople'; - src: url(data:application/font-woff2;charset=utf-8;base64,d09GMgABAAAAAGlAAA8AAAAA4KwAAGjcAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP0ZGVE0cGh4GYACCeggEEQgKg644grdwATYCJAOCHAuBEAAEIAWFbAeCNj93ZWJmBhvNoxNuTDxsHIAID7ZzNqKCjRMoBrCLIFmsRdl/fWAbSx+vtlRiwYRgHiehmaIe1S1xW9y/toIZegmaX6AImBEUXWQKwMwpfrH/PueHJEX5EKmupu3squ9sUbFcpFWzu6S1LNtybEuWWxI7kW25ptlOnE7iyInTiEkllSMVAoGeAKFdCCHHhVYOjiu00J6rcK38HccdV/yTTfuqSrvTB1VdAnssWbb1CUAz3t0Dyu/iWyXdqZwWNEky0XxglOQDnn9/d+7zbVIRiiw0sWtakTKtSQwBAFUO2WPBJtCFrMo3ZxcL9pb50Lqy+P3b0q87HaXdrwWGD4YFhtRfWoj2bBJiVfo6vVX3wcxIlgcENsufOTRkwfr9r/X/VtnTdtfeFz6BSlhJABIuY7rtjK1Tp+HOfRQgWD4+z8iY3/L1i96nd1qnV9pwAKwM/qES1c44t26FBeUFMfvgmPHiluV1C8GNRjOOvGV/dWiJPWBEEz7QE9D/7y3PAuWbBxSdVHgx7EXHiWGzDWwByNQXrdEvssgDxf5PU7NlOqfTc+V0SudS6Tv+/4e2Zj6o5WAgPwFD7TMA+gBAeQUMtE8k6Bx3ma5MKXDoS9xLx15yjqvogoVu9itPSDncEhCA1hRfYewiG8iQ6zQ2oQOn6BJzkerQHmDF1v/9EBf5Jr6dVWJ4CO2LAAAQAODDP+ErAcD1M9Gv1+nDV22fYwaAHQAIBLWByNFLACCtC94KOKTXyQ8AcAc8F50magIAADjYHnpTdhnoBi8Bz/gfOvG/CcDdDt0nwKueAwB4hCjWo/l+aQqGIRpLDAJAIqLnIB7DtrvXY/RUeZYG/oNo9vddTILRBQf8yewvZ1+dfX729p/V/Uz96a8+nZseP94FaUKzEFE519GbnMXjHxCO8oLBaDJbrDaRSbKi2h1OV547vwD+BxUWebyazx8IhopLSsvKKyqrwpGoXh2riQPg+FpwXJpjAAI4OwtsgNV+wy0AgIcBmF8FQHcFAD1mAEAlf8K4fPhV91EUlZn10LkbrSZEhPQoOXPv4xB63Rj2WSpQG2ch/kZmZyKls59fhrN3zz44u2R2bPYZXZj90+yDltlt4uz2Wd/sIf/sB7Ovzz7xRsA7u3s2Ypn1m2aruNljsw0VRt9saPZtP5TsszuD3v+5b5gdEspnuw3FketyiWt20+zEe4ezhnBg1vcvV2v2w78c6d/N8rMVsyZjAW/mDQt7zmQxGhlvJJjQf8+r4Ynf36X3E9MO27Yxi8G8YwN8B9AG+eA1sGBzWqEDLTn/gu0HTFUSYG9pWlz0o5LGgcD1MAu4H41ZNwxH9adWifuifrGzcnmR3DCjvhpOxAyl6sUrwGX9xFdJgkpLqOfgCwOMbXMqtwKgDcvTArs0sTgM5kfX/ikzUIM0Y/AwRClybsGauAQwlIcVg8vEHIeibbmp1VLwfYmHwUi66jf5F7Q6MDvnRmaQIqWmxb4gjoCDXg4Xscet8d+zmJUi+UmWASiGhgHfPVxiI2W064fvPxbEiaZgiyGKRkNxwShgEqzltG1oKww9+TG9/SupJF6Wk9W7AxCVSJppfkjb1V/FcZxh6lLkuCmGr59KRomaDjT+BWLRAa2ODAIQEaDF2ebeKa6hDqGYthAFR8fSUz/EIqrjZz1sJrgJSU0Bov1EFrkbm8ujpDHFQFAf1tPDoEtKxZku+VavyGw4S7of3hRH1iBKQLCEeEVFQbFIIulmTzqr1LTXAyzqmSAHhNFq2/eTMOPIkKKroZj60Rji0SRSVh4lSiEeEtpk6msOX2Kh+kVmuYhGabMQZI5Z50G61orMumtNSdeOfuKihL4GauGdMpHxqPJvdBLDfSXvVThEScOKrQSx7ZAuzu06ypI6YwsGuMWZetbMAIESpjVESf89484AFKZM3pBUrCCS0px8l89ZvIsVD7BUjStclmGh+3RdWLJc54me0jd8jhp/qJEs2BzYkIdiLOOzD07qFaWoEvJD4y63nIlAU0FxptgzbAQhj0IbQRJVh7VW0Mw9LjQNssPE4um+dXmG2ESDvYl5DmirktI6LTXScu5ApZVaG4RM2zhcbAcMXeni3czDvu8uP6zfK5+wMCt6HboKqoNPSA1DOcLQqTx2cTSYSNH0TJcbW5TSzT2aNDgS687l1/7L1RU56eyYvdoPGMSU2e6iCmcyyMkePdhOubuh5bIuyxW4d2fQrT7lu+qICD3UkrLqh+T2OV8sq9G2RMxaL0lAVT9ULXVMTYqXWgxPe6fdJS6bGe0vNnNrTBkuW/QVfHAsd+ye4kD0tgquWA/MRH8qfTKHta7vH0gDuYEzEDUVrcVBJkBKuDhbW7xDn6gm7rXDFVZunJTeG7pfHBNf6VsJ0JgqCAGipMf5arrE1ohVpaRZ3c4hd7ycOGf4jBJqgilL7peqcIRZFU6dixBfe0Jt01eRcw1lCzteUJvKYULPZRqFrQMzOjNqCWAxuZIgMEyeDXC9wclP/04P4tvvXjZt70fPurwnuIKDQuZZTMxhdaRJnRkfyUMYs/cZGiW8NArykRsBnmF7qLsheRIC9e/IF4expS5ObtiTtsQ9Fi7xi6PrkevaWDfomi1D9SOF7hLLO5fCPGbi6FJDMSPN4ABg0WQTuzztWwDdNGaFVOymYbmhNlPxfo8NE7weVr+Dw9qnter+oN52jZw8O5hoC+sxR6ZcOshv2rUiFhBFbTFQXUum7oJ7g2DZbFrQZoMs98MEvIFBs2O8zqjCDkIEHlLvNFrysO9KybOhgkXtWFZSWwblLOVQWI0sDkJNzA0z5mKfRRcACdCBCFlFpX5eOVk712/oXWHaujNvfwiT7y5OHkKdS15VNaf99e2DBg1Rsb7YiiYSYb/sfrSQDFNcde9kDnNv5AW0jY0lAYybmpdQyC066aJW52ZYpSbYBpzCrk6ApCQ/jt96L3KDk9CpcUTqvHvSqYOZFUuXFE7qhnqga5IaKllIzZwy1gezjU8b+Rbs/xUv39VCydeMYLQreSW+OcFwCCbkmakiA69h6HfXVHt30Ze0vS8jz8kjtk86o6oMd6ijSZmVG804mQcad3tDOTyV60tTeWTV6ATuxbaHMPUGlw3FzWmlGCZqeFTjUoBQUFuCZu5Er3leTYfssWsneODc6G5g27S7cWJf1c04iQsceUSfEbPIikyZjsxe1vBGznPoyTB8UKTY/xzzut0odeaZVffkY0T76kxhBuLeFGjehbbBC6ZMXiMYHAisBT2HnUWP9qx8pQgVzemET44LE9JSu2GiC/JyX8pLlsLSgRKFdNLulLCxcS4BBEVm4iwpZsfJ27pgRqs264/LnTBAFIFy4IN+oV/nu3QAuZSR20FqnrK2j6zHI2laDn3J7grAO4UsDM9UErHgIUXp0SacidYGYL4P+IXkGPKUnpuH1EuMbXttZ0D6zPh0Q3Om5S2uWkWm76pnNLqipib0bktbPmHAZ0tAjtS03M8IOgapyixmR4gD/ILUzM/focu/MAJE8f92GqUSTwLCM1ylspIpL0FnNZwejpwfgcrrAkgNaFMkJoy44kmNSWrZ61a/KtX2U6kw3GCrvaPYyYcp28oL1Rsiw1TzaIkixDTlc0TMCKeawjbX4DzAHMzwLIrzPY+nZd2Y1qxFCx8rYQgxEDsraQkUoTfBNbvTYvHlsPtLgNdyvroo8zOVisTkkbsmpRCAfxqGHktty1mss4wNPL2dsTJvbB2iJofjQY8MjQSZMTS0hdMCdwnrprHUUmyIhM6TcgkWpWpUX2J0t/b0gw6AHOKX+wQUfTEICuTor56hgKj8ZbIbbqt64jh2YMrjmu/Q3KZ70pocBHshETpmVCIVsiEZl0+cyErqKKiXrWeFiKcsXMnJqwUB/LFYgsdVfKmuekvJZUFSUljqaqQlb7PiNqdNsl7ixL0as1vOrnPm4/dD6lla8xWtRntoaKtM6QUjuq7ILaZ6kmRVTqaN0/IyDZPSpmfAn2epcwBoncHmFbl4aGNQZlT348GGRBwxCIDOS0hOjTUXwEa6DGNMyspZwDZTDaf6dmV+qD9LghYB7xQRoVFP28kDozxeyGQenaToG5KR/SUpGBt0Vp1BjGY5FIkikX6iw25hiSrtDZza1Fg1FbpW7EAw201CwJlMlfoRpM7RbY7D4QMc4qsHlZCNGPIjrkxcp27UF28n2zkAcF48khrJaqbdUE1vgv7xe7tpW2DGrPDIAo42BjFnPr02kzOnlxLn+XybSZEKOMUarfAXUTt6cSU3OxMxM2lwep4Y0iQseagskZzVFzcXZBoe4hc1zoO2sW9BOpVnUhg5C5ONQUPwRGk7kkvH50bDwC/rwpherb9eP54D+Hc2KugkTvLFF6mMuPkNZUbPjW6L+0N5W6yuDp1RWfJRy8gWVFp30IYqxEvym/yN0s5t2sQFW8QmDmLnzbS1dVKrDh6I7ixc+8P2TyI8WRbvp4RfVFRxLEx8VnGxUu70Xe5mqUON7LQvDYdyTcqUMjgIU084pHfzaIxxpqnI3laSCg+QPrHWKnDeY9Bpt9mDEsScDEreBKLLkSMWmktbJwVR8g+VAhfLTQ/aSdg4MohuEC+/CTR+VVwPAbE23obPRTjpJWhCG72lFpu9mMhrdRdznM7yLQCeIqS43l4XuOWeANGr+cE1I+QjyQND9Jkn/fT9q2u83C21oYox4pg2uWg7c4I4hYXtQuimHEx4jRYZHuJfGNdb5RiQrhRC3ea8tkppkVo61ufxd0KHIXeJwqq7ukhAdRiLILJz8W3HJrpJPxctRJF4OS2+EumE2TrkG7xJMH4un+16FomxNWswFwQdCFxOZVY6bovrDeRrxkvhkC5A3it3evgzqAO5hM8khVkt1W30vNAwinaSzJ72fjJnSp/EQWn2WQNZTxsQkyLha8EehRSTe3KVqy8TrcdmAIkirXki2DKc4NlqhLMOngAoB9PlmbiLmaR4KG/ExUXgTh1EixOoZu41tXBW08ZrW/VjSOpI3b11eXQc4rTo9InKzXXv7uLVho7xjaiE9vG7r/SZFRlCfTnxC1MvqO0FNx2qJG2h71XF2FLKwOZ2TS5a3LtqVwaAxoSz3jCmZOUxaLDtSGUTZAUxE1Xi+jAq/h2cfp4wpb7cRtkULe7HedwG4sfv1a6LW85mgvo0otg2j67jlW8KgSDNbKGQlFFd8dUOTo5F04O2AgwZZG/8LFbFy8XN+Y1H9R4rme8VzJ2zjdVTK4kcMM7EQrUaBi55Mc27zYprbhPDTQWbEDcbqSovwVRxDlFmQdA3eq7m2M5+Q2+SS0Knqvj6dE+sKBgWqfk/GIO+y8KUnFCpHSQ2GdyLF/KYDpP5sssZfRllso2e6lWRzKdadzt0ud3q0J1bx6718y/oTAB9FrtKUex27c5ackie6CzuRfRh6BCbVw1t4ziNAZOJeSUWMWuYR2EK+0ATVYXL+FZX8nMZtplHH87vvbMQv8zewODgjW6M/4XwiMCsguRWgU2R5oFTomK0df1Z8x7eysiXW+TLlnGsozqA1Q5YoDiiU90sKpYuHx48bvkup7VGpSAmIR76er3GE/KBEcfiLHVUbZTd5/cJ2hxtWcYzlLKYAVursG7xvuis0SsfJEeRa4drg2NXbHkYasfVX+zlTi+L0SamgPqh7k6LdTVprDZ7xsla2Aii0m0ro+aUFSmxs+dw8jyX2ec7c0y8g262XCIpRlzgKo+Ntp8LOgde++X/nNZVQZ4xiGtAbKO8K9Ad1OHZ3gOoc5vVqM8CCsgmBTnYcyYeqbb3W4aV29eKkN1c++ygDnmt57RaJC5dgZEsYxixeutq55iLkdnAfo0Cn2ATa0j3Y1Cgmd0oxkYBIlqrmdG2RtiTmlmYRUnAQXUZBqLFzpyAbdM+xVoQFz0Pope4kKOfABixLZuM3kgST2O33dmI3FIqYSPfQ/eNo3Ima7bngvXiMwaZeXxN2sZvHm3N60psj+MfkDMTxgfO4Xsrwz50VJ33b3vRcHnRMaAUsBGTYoCRCKgXFO6Jj/VwRZdEu0r44ioZmkAngHuk0wAtUUhvN4VtG8ERG1FsmxaBSLYbu17dJ0rTVNqmv6h8xGO+i8NekCMpe+8dR7oaogQPjr88nmHiwwaonTl30Ijcctptj8NT2ZsNmyaXjT5D2ZLx78PGeDHs2ybn3QBYYWgT6vpmoPJ+xZ6hoHWX99pcnJvFvik2xKObOsasTzLkJE4XWziSgzgiiuEVwDU4B94D/E/ZxOErWpuVrxugYC72sMs5f2rd5x1lmN4AlbNw3ervyV2rlnqA+hqjftk5b+8blsswsTTNp937tA2VFGzyHFhLyDN10ToLtqMW+AB5iMJb9AyiQKzIJapJxcd0sKKKFNnDNfG2JkoRyg1bDa6rEx6aC9+rjAFXpnpqTm/n46i4RymA3LtBH6khj4gDritp2zb4A7C7l/KGUuSR4sbsZDs3aQ02gdFLUK+xae4KGVzLxbtCiil07XTY0WQtHt7Xajh8aeelu4tuXHoiaUzcHzXkYe/H5xlKMWPTiivSeYvJ/R2J0kdLJ/vjE7Eii8fu/27ksosn5J5lww+rdj3tWNTFHf/R0U+UfSLslm974Rr99OWT/7x8f+fhBjWa2nwuQdKT4oMf/SwHk3v/2ntXbNBq0vYBVpNmCOEkIPFJ/7qZOiu03VFWrKcWzeHrnNWJZy/RlpSuR5ERopz01s6I0bewhPyesNlmRIRoVDSZI0Az/ZdKhAbTBA0roYH0dQn2wvazZoamW5Lwx0yND4ZIsVhMV0yXrZl3XNTNsx5gZ4Ri/sh5Mu4KHCj6Z++OtQy/Nb1BpTe1W57MzbftT13WFD0TaZpNW3EeVLybHvwplkdiyT9lHCJTyjMmRTGbThxcG8OgyhC2ykCzx7dJsmnwu8BcGG7OEvV1GYXRQzqZlDEln5CVIFi05sySYih288KIci6vodSx6F1KgWQ1kzK0MTbbTX30lkB4Ze5/fney0KxR8fgbv3cC5K62wvK5QPPhs1ASRacDVMRvWNzQWzMN02C3Mq+U/gVrohu+yG66T9EPqDCakNEus4ii578NRXJp9OVkjSjBQ6fIMrF4lUFK+vi0xfUwXvf5rhgGpV7rOMbL8KGaLozbRL3bRkul4FpO5X3Geaddvc1L8m+/XXzZ/UTbz+7Z4zutWPFIoX6Ac0Yz3VTQeSmpveyV9rM2x+U/mx3mXX0RZD6cDdJ2iPlBzpyyBXYDD8wmBLWofOxV+qiWztZgX2m5lAfogs3oo1yncqYZ8WRNboIkHG8xa6SiwwfHvhvzefsvURa32xCoHdXJo9/1U5LhHAKDtCRxvCgsTW+ANoUG4Yr331lccY1MlbwUKzdMX4jTJwkpssNxcXKTg+qpbe5pZxJP+Tv0tjsQ0/zarJ1uriV4CcfzdnD9VtQH2bUeVS/Ytu784fG1dpImre0rl4e0kg9FrHYF9tHdlyYqzTmLiRoyA5BWDQKJXSXzNF8cP5ufQUDsrggrALzU3E9ZTC0SlS96iB58AIYL5q6DNhtqfj1VyAOQTXq1/RJomgnxMSJGT/jKdNQfQZ9mwj5AxflmXTgeZ+hhNNqpC4aVO9QjpDKsR4tEm9EBFyMLncgfJV+0Z1lYLrjS9/YDb6n2+WMMNSMzo2Bmh74t+NnDj21XLDJrGcoXaaR88GzN698R3JbhRxWW8ZGgSHlc9JGagjfU0oe7dq9dtediJ6SwBSGzFTRwA5o2n40HvugYC6rI7sPtrFCUxWQUCN4srIUV+1PgK1pJwRrt0JsTOEhtN/Cg+8gTD9SS3+okUWTnttsDYs3cqGEE+UPUmobF2drLI63wTGAU7cCA8SD049FaS2nCitFcROG4UW79m2VbK3/4pnoAFrLetCDuzRohpjNO+6OHszsRaISJE4jgH+Mwwf+RG4bqSp3CtXCFBlNiVXHcOnsSs4Q4aFXIShQ9qcFZPPRJund+8f5Tkb+bRbQtUcAjUsa+QnOTeOD5MDzuvqKteGkUIuikxi0oAua6oZm1gaDBQvjsOzg29DFq9BlYUh65WAOxc/Rn85NYasHSs3fopy7642bAi7o50h7xFBGd/A1n2HVNTFEAuQkJxfX11SMRC8aQz66GFT+t4sznbLqhzdLBtVXeYGNl6NGpKvkb2ieWRMGNu8js/zTZbCT381Nf/8P4uo8WdsL0AlAYN5dWuWPhq+i5kiKJXLGLH2oN1ScwjHQ4vwxfQysYG5FdD4A8RxrySBmZ4HmsoBCKKW6RfVwpzP0oXsHjZq6f2pNCit4c0zk0KRWJTRueRnbNvFbTzi3F4gVr2fXt9rFCgV8ieiA6dy7BJvqpD2ysmMxPRc8wmbqtvtPDFWfvKqV0moNtLd29Kwt5JJE8F+mKKXJ5qZpo5c8A8D+mf0K6H6/+hksGjYHMmNjT9A3QQewaHuPlEZzaYLYZ9g5pxCB6xpx0ga9hfkjv1cZODurNLKWVToeU99jDzAddHVZ4fyxSBgRRsYVLKN93r3LTxKSoGJyOF6sgDXFZXGFib8w4y5FciUTC4THAxn6SHEc/eEw8lcNCSzokHfRQ6tQ2km7ozmhoPAHyDYPfWTdyfYbY4ia7YtoQN8K0gpfKtbm+a2vRLxWKruCilN952Gd1pFpPiIW53gCIWCvWhyoNvRQ3IO9xq1pbolYV7A//+ONdtRIExkezjMWXmW7jaOypjT2WTU79ccBk/oV7tiLbNHjEtmXM/w/4ckjQJGjwiLgxNEx8lZcP3KRuRMpN1vXW2xvf1bpH3gnfZiLlYdKRX0bIhqaXJB/THzkKac3B/2dthjojWhqBri5W20FpKgQNpPQGM4Midd04yEB2rmU7gwRCgtEkpxKN3mlH+4Y8at9r0FD+2sEsHF+NccjsPTC2AkKfNfZusIdYqSORzCVhtjF94iqPS/6LRBcLeIbWtT5FROIZfibA1dLAMJZqM03UxHPo2kF6VL4ndERXnWNAyTmq568sueq68g7ixWQ+16xR21hbZmODGdQq50hjwW+KcpiEMpfJVR0L/0mY3tg2uGBxY7x8HhQdK92JerVYegRTFBYw6ECijyNoobGj78Jk+kbm1qDfEiUojMmksJyILQsZemg1SclQR/reoB+i89EP8XZUr+YE8o6lBEo78jCx0SZFK+todi8/+72J5Os1rqe9h9S2sBfstU+acy012oFQwmWF5ce4tdkh5brLs51zHigH3EpN3ZRJmYQZOhRO/WY2CAFTjQ8mQtjaoVV+Xwx1ZHwa8GxgV5WKjbBdrIQH4DdUqepw8GAt8LBVRraKMvGHwyOm37HhvkaxDC0/zuQKOoUJAMw0fvPCGIdC/BYCSR0InGkPrULaYxzTsU2z5aDA3EBz2DqOouIvqqHNs89fMQhwWO4d85mbK84yfonIXHhJIAnrkBoHo1xdIFXArFvoTfVuNFm13EOg09VO+WyrbO6bSuOGJMwWvcufi54tg4DkNvmiT13UxdL+Zk1bdLBXVk/951uZwREnayxeM/sfqXAp6xp7G1HJhWquo5QwZFkGuu2+XuBS/IchBChU69JGv9Hxs0ssY8dlZLHCS9xVNPezr9hB9PhJhIICzyuUrUp4nEN0JsZvI+WrXZFbegcAtTlyMHZOGsZpANJN9+AnQKfnRJ1rIeoTADTRghNLhQ7Mk0gBUZc1LEHege3/Ntus7jJyrme3wEMkl3E0ErpF5e7RYkZp5y100ZDcHz6S2XjpaKCdaxOvw9vqVEItJv07atoARfA4tS1AGq80h06jvvIfX3xwV3LAjM4eTXc08mU5cUxYdmNPN/dWqoavuuTj6JuUFQbtyKyPVH0tT1p5f2Bh5AT4PIuMcxtM6lXKrPSwNL2f/TVBs1zHEfsNxeu5qE2x0YfImp0rZuj4HJ1bhEi5HXYgMujqKLxcKUZra4TIQRTnyzD/v7qarM67YbgU6s4EZZuMY0vrXtKc3ZKO3ovhhrCdgzmAvmIdXevNoEEqoIzLWB3tZPuAXbWanxgqIulHOe1zElB7ETArEeyPWOutlWYP/TJOos02HdumqNbdBoBncIsOTLtoGmCsbbHnxhRtx7Tnc6vVBJP1zZy/c5Z4NlTlmsZ2mxfmBjlXc3WFiQOikmtRIKEppBD3wHyCNKyuJ12Jav+HONvwiT/8sdYNZp2Tl3TV7tU0LoHVkoeGlQZfgkbu9+xrObpgQjXQmLsN75rClecT6Ay7KAP9wfxiIA9i1vfu61R1JX1Ju97+FW0UkODHnpOVpcJjYBzrnyl8hg7Qqy0gCPbLBGZD/sQYYW1+2XYid+r+IO8CNvu9kJWvA6WNxMudicWkg/MYANfYkCK2dpxZlQXczsLb6m2vgDGYMeoXB0XmKq2HcohKS8pGFLq2TRzo5gF8OBcNZMTQn7VflbvFv1x5cD/GJWshLNV3SdnDR+puYCNmqKXAOZAnDsf48NQXzReAHI467+uyD63NDuDozzOO9aXBlYlZLY/POSf14gZ7IXXx8iJ28Eq0KBQvP/F0CpBNI7vN84nshYYB8kKcvGaWu6dIyuVAafbg27f3RLcgSChdkrfE12gfh530Td2WsX7Ffx3o6wzBPb6lOTOCTYbV2OIbdYv/uh8JOfM4/w9K+BUiZReib5SMJmkZgo+wmWA6Iobgj2Jdn68adDi75uYabFbxyqJqR6qUgjA7xidwWBCwBVaDMR/I9D99/0GP/Nhq9dVOPGSASo8NuT43olwTL399d19il+VKmRyHtwLBDJKwtJlwb41//Joq6/gXBnqfifPp2T/0Up9Pe5czvnCJg5OAQ7kpL5ty/TXa558Wm/2VjN+9Ym2Q7hovqs/1cfE12db5DNLaZsal2dz7T6zG4VhsnCyS1alZM8/w3gnngnm5slauKaju5zlRbWn3Z03AtrGDqfCXnxm7y3VHkyYs229ltzYEg1z4ffcQdVUsBE3ZCfpWM22CceQ0+skGUVEb1njk6iapCdrWIY249+wsN/kr3HUigu43O8PcnDXv2cS9YjN/eD63sNF4b+dh2zfTAZNE6KRzGm8ZqOxwRrhir2F25xdMf4fRO5eyvt5IMxTsM+YOfoKXE+chaF+28S4MxiwfXYtEp8Hch2+uF/JYPsuH1NQBdi8kQENuKKVkF8ygzTJljvL2PQnNtnk7iUQeZcxdAEyt0j4pt6ZYgcp7lfc2LmAWnjB5GKP+OLKuG5ZDvJ7Vb1icPxhj67WjbUPB2ZeU1owiskmcSAFJ9cG1yfV/laEx+6QMUNspD8aExap1RObC8UBDiaJQQCQKLENf9xGQR76d4fCfPMUiPbNTp2PItoNgvwlClgcNJmhoGYWCB68orrZ4/q2V1PZ8O89cLZeNgeoZyK3IcPccZZVjQvpTo5j2mNWqk0UDZfcVXWqOMCYh03KMJKjbkwByomJPtVJ1wkhk7wIHpGFOadbg8r83uZu3yh+r/tYpdxar7vdi8JJhn+uVsjDc8FfoHzBMFeJ2vuJgSS5zd1rq5pbFWGcPSP3OsqmbewLLDYblI0ulYR6W2VT0Nsnl7UCFOIEqQLlkuLQ2nN7feXR5YupRd275arUGK1D2cdxa35ljtbdsBPjk/xJExdZwq8c7+Hh5pvyY7YdJDt3PnpZPDfsjZZd5rkh9MddYNBuGmEDCv/2dum3THWirDE5jKgYgx3tk8AgInSybAFhoU3b3c6KeqrZ8+wHDpJj22zZAcA2u2s99zUpRbMfvuJnF20zT6ouY2d3h9ZyyNZ9zDYiJl+jQkU19DWqnFRX5pmoLc4/CE67jPDzuc0BNKDN1Z1aDbmV7qp/2Juqdd0lHW19KPEM7mEa9DtGUwrhjI7VAbP8KTQSxotnbQy6mpay00VrXRfug7+SuuTAw8ZGDROXPNpxjBbC4iWFu0ng9X1UdrtH6n5CHCpdLpmeIluYqOwlrPu6bGeEIYZvEFMFluHaQN89R0sw8Z6tD9FaHXGpz/sitQdLnTSHPxB9vIdcKpLKamnhJqxKXD4ON37ODA2035jWcv7xpltTssAehPNPYJDa7LFVDt0p7BA4tRGbYl0ItSDx3oqAW0BM6oSQswqI6yBBCl8vojOJXDmJuKiZO0RRe5+SS7YuAzZp5kDOd99dvn27dsjiNsPsYik7zxBc0BJTVa35pv1IyHDQwqymRwpHXZAmHTWPfsHz9Mfe1jOExABH2DBfJr7QUJqqoV7xMP828nRnf1IPZrdHXOtjBqFxhluE7Fy7k0ytEdTd90nc59ltPrBnct7GorXisv0ZbkMxcELWqANQ3cnEfmWMWz0rHJB88TOfr8Gc7NQ8BHc3fB6w6ckdvgOZwzVcZpgyfpd/dfNw1sxn5ajj4EG5p5cpOd561YrtGMEJ2drXN8bEAFiMhnHfR0H/5obG7ZWjQRXLf5ua8tWUQvScS9Tg43W7G8SMDEoyYU9Bo051VCUla1UrqgnYvBGDpBGpXlKKfA0X0d+fNUwPbKQCIrez4RxQpphurhWbMxVHhghM9lYqABzMGmTBSoRkT0MwgM4MOfcCQZQNxSpEcDWuXJALk66xPVlyFs78qyQPdJF/h0+rwrWxPahn/Mx76bKDQXkcKMAvPYddcRFR3OfwxP73LIe63qSKuBo98iBQl4hc+YRr07SUdSUb0DoYWWDK33o7fBsldlc6e9g2rrLSXaYlwR87hB947NN/Z953c/5z+yq/9QExy1f8yP5XaR2KTWgVMPmX6Rhd6d4Dp2YrKp/hwU5wS/dfCQghCG+um7b3bhtrVOpD7hj6Jv1eirb+hU22vRpapd5oBjtGliNFN33QmLtBBjQOUItcffs+w9FRarPo7fMxnO0D9XygsLoH38H5S7n6NWUp5WJW+bnJTmSsut5Bk/LT9zxEnUtgt6b+QKzTWD63yre4r1tPgmh58Qt5yOFK2gDtDUnCQa+qSwY6cisT3xLA6dx4PDtC6o9qbJC4/urm3xLp2dtm6N/sWgh6BsnVzkiAbDHEr+ikvNBtQfORiQRocDDDb0QTBee+1AliN5DZmzWv2Q8TDcJfZZW3aL508aR2bXNCc4rqno76yawHq5njSAo4L+oM1uEngWaGRVzapn/YxatX4jd4zxnXYrpt3hqtjoPEVMsUN3CRi9w42Gk1o7uOogfO1+L3NnKt0OSsZ29aisH4QJr3oADOd54YudQmmB+8z3rmGNQy3401OG8V7YfirMX0ytyHNm3n7n8RLbYWZQys2Sw3Mupq+NDV7RLrUTxH5a6lhFIBSnt22EwIDsXeZtkHW0a4L8kwDl7CdfF1++MF7VM4k/or9h5vj8orc7yxDHoSdxCK4FVj4eSUMJkFOilgalegkAlHv2Kp3wprXl7OWbgeIJg8eqEFXSaHrGiLjypRTxOu2PNoppGH5gw1JQ2Yis20YUZuwaOuwYPGkzTTfD3GZeX9hbluZVBz4iWzFX5QVbu9OvLf5VBkq2UsY9h1tlpElf5WGPdXb2V9zPaiG7BGjkq/CiBC9+0ZKeQ9/Lu5jJgVuvL/N245jUKSl3Lq50GmnRzR3+b97EgLvZTrv0P2gV+DmOq4ctlm2wivtbivMIN2Yd56ac8x1uzgleJn4GywUi6QsFlZXcs/BiC4U6OmCTgpqVI3XJ59qzcP2CgRxm6NoSB+P+cUgewBYumAk0oRvn8rP4QAX0fBfGwYm+FDuDjX+YrTYHzMlTSs2Y57q10ZJ7a8BtMD0dLQ5REEdZfduy8mXHHoTBum/594Bu9JknpREI3hcQv6//qvG2+/RewMMGvbqWfP87pv/3nxHtSP+vfnwaw3k2X5svjg4fAt7FYUHEuvn/jH/oG31RbK9e8NlczaL4pO77TAEiIxmZ3/3Omyf2/jhry0+2He1f+U992yrXfqPpn4Uhyu10vB9vjq4tJG3P9OgN3MqXl/vJgHRlo94/CPv+O6Ub/dtNjov6TzYelWZb4dV7B/HcO0AzWKyeZZh6OH8P3mmoX0eLboXwsmGX8yUKrv1+Ly56OfJt75x65/2ok7MzLv9Pa2xOaI5q+uL9JWf3HtpEMJfPv1k4No+/6/Z7Z2L42d/9oK6EdfX3En0u/9weTMhIv5SlJ99CnHK8MdwYXZ+cm5v77priqJgG82F3L/++++PCO6FJR0U/78l/Zxf14cGAGIElJUCmx1XFjT9PqstSCsP79VPVpuy9XQeNS9akPfiI6cLaEuLJLgQljwMl5b0XMvncJvadh1J/1sgV7wD+n01Cg1qL7L//jz6x7daelt56Ekh7KYlT9aelM1F3Jwo9diFIJ7bjru0+FQB146FEqTbhOZFG3qX8i+X/gkJjvb1nk7eXkkTuMAahyuLuZj21oWdQ0rNRPSDiPyP9IXUIa0W+qpE3AYJTLcgsJnpWI4Pi7b8BK/X5FkTgaMJIvWYBBW1Qr1DbnLqAlqpzJRAbi6PhTz/gj1SGBfmARKz3fPbE+DxO6xIdvMCNEgLoQuSs7rhio6KTyUvgLUYPSH9CTGjwnjxcpdonixhY/UlE1TGGSQQgKqVTRBhpg7IFuwjHkQQzTfeuDsN+wRqkIfPKqkwE4l9fmBlEPkReVdM3RH59TLPP8lkNlcNP6L8VZaAf1of3fnI59WnRlCyG+9y0Tw4kL3ylviBVRb0qRTKPgUc0kClFd+HT+FAMdCQijRzQyu2sZskZc4cJdUio5AkoW33KbIX06BU784/M2JKGHkxaLLdOr8BBD9cE5w+Z/OgcShSuc5lm6Zx1SJGzBkzcNPkS2P0ruM38TP+88WHF7mcdxU6Coy+ZNaGV/b1MYUYOEcuc/d0vQnlmABHkRiRwUVBlWQCZSfX0AyJSCwWTP+tQ6uFp/BBwtCXkAloDhTizCiTbs4U5zogwN00HsIGYOhf8vqFLYuMBnm3Jo28IeBRhz2c566pdhQgvNGBbfnyM9P2pXWVX9rvjV/4Ixjrh4GQIcEdJDL5DmLuwwKvQHIjJxXwjSGg6pkETq0v44DEpIyVYdGDs26j3FN7rayCJNeMuhVWwJP8j+YF1ar9f/0VPOhjvAOqWTOaC2UD0qFMcKvafxAOZfguYM8WJMynpK2sKQgrBpyNi+OnFLPRvCoWPottCcKNc+RGTFxWDKObdbOoyCL/Puy8/ba6Be9sz2de/lQyzouU7gJSErawoYw3wF8/AbmcsImTEegFnZI9cBRxS6krgzwzAKwyQVCk2bgJdg5yAPFMxDwUyRO8/+l1HkHo9WrRK09DfYqcwh0pf9CNQg4gjEAH58GIeifXZkCQW4UaSBYoGElh4Mkt8uNU8pejdfP8TapsLNbQEhSnGXzQXF9DmsOYLpR69EiWm3R9edRcVGWYJqFFmJ7m6WqsONEKWNy+qx4Ga6sWRlYx4RxcCDmihQGpBNpfkpmxCZdysJwUbqo+6TL3RtMalF0L3Pc2NdbT3djswvEaWOJvk9mqI6HPJQ2ogXiqF+fVfFBDan6AAm0s5IUPsHELcnIWLonR6z7swCT2iI8o4Vj1f+aN1BDIONeNu7o9cwocuGMHkmUscEVwZKnL2frXU3TT65cr2uwd9mk0VuftbYAOVU6EIGBRKOtQQkqezByc+iPZ17w7mqp1kVJW7h5uthkaO5AdTqskwcX12YDJEU+qHMLHKX8nA/v9muCGhtF1qc2P9TpZOgr+8yhp/jO+gnqMwChHlgHxpLhHY69Sw3p4UpCIquaDoEoZocDD/UhLvLvf//SRfzWVeevwwj0Q4iRE95FXBjuSxyOzoy/VzT1NcZ4tDj+zxq7ORw14fjFuvPpQfmGMPSYLHt6Yvx/14N7wrL0GdjdtdtLheK9cjQiWmB8wvqR4Pn5zQ5HjzpXdhdLzizprlhq3IQlLvN1WSAxuFKcW8zZ0zJy3PF4eqtq9vaDqoJaHbYLBWCFjuHIVN/ezO6nvoI5LqBHv7XlfddJDPw2UGO0jyJVH0BNo2oEVgr+NaxcpwMyKTM2Tdue3BENnqBtyVkTLuwx2AZ3LM5ZhVGg1s68n8GJ5+DhLCiVBEggI4rT4+dkvMgIGfmAQEZ3c2OwRI0Bc5APEIPZwAMrhbA0TcLUofme2/9dCdOtdl5pvk30tl+N1X//mvohAcRaCNoTZFSAQQuEc3m6gU3tg+kkkyi894D01dVohzpbVeFzgruhI4Epks2D2RlhGZMevxJBsMcsBCGSAFERyOgVaH1A5JmbhNCj8csmrG2vTE05mPk4+uL/lcx2PP1G3efmqxOOru/1jx42CGA8k0wYUFrz/CR0LkYDjcn71UCFk7iLZPY/72UV9BBMma+2swEEGzyJPsKtteXX8ZJ8dF2N7gVPwCEansz/uPFmdHoCSjX/pzo+Pn/92+81/By/M6ykWiW2m84a4rJU47aWKhll0bonIFHvTGDWbBqbzo3pvqS2jmkkWqO4KU4JDNinqsiASwOBml0iKqm1ZwZGCenvM98KsHTWhtgp6+JWNV3rrEaUCvUneEyPJZPay53B7eUQbjYibDAge3I2GVWSlVVHr1TjieaRgEZk4bMrP+zNxxYXXbRWyOs8uCVJ3acW+56G+SG4KzaC4t3hAMUKkr0tFRgiXWan/MVg8Mxp+K3LqxZGJfQf+1QyhVMvFMO5AWk8MbyIEwuZR8JcyYuy+jwixxf8+kQ1ogXe/azzOl21CW8PPCmejEmZJQHWiM4yc1JsRPRyBlHETJMwIVJBWNgFiv/zD0t6bq+O7mky3I2Oyu4hOCLx9C9Rj9jqkl2NVq3GWOTDEToNSF87//JEh5R8Sk4ncLoW4Ywi+NGUlBwndl67DlDNcvQGZTZPKd7bVQ1OeoYA3xjmXSvEIzlWTqxxpczn6AGAihhx1n7y6GOsv2LuPVTeiDk43GVqlLic9jJDNvSR336rmL+83PnPde9Yb16Qf3oGA1s7x1bzqVQXDELa7ZJFKNdA1reBVMJ6ljU161TcqHzRZllI2D0K1EKM/fMKjXZgc73U37/awXchM+1ctnwo4FJ3nLgRmiuuegcP/uo0ZdW9zs0RGDrQonZCsRb45mt8XTD0OZfcknRa4nEfkjBFx4e7s1Yi7W0Z1xIlNDjHODKLaiuLqsBnldBFWKH6TFHzwzWnY3CenUn1nA5cbwsDfOj/+yfiVyfvjFxdEyNXtwTOYcF0+uQCxFPOOdMESCU+Ep3rYXiGCdBpJBsDi4zjR9Y0d56FAGiIISA4JME2dOkzIM500ju4G8Yz4XW9vkbpjwNVf9a1W+Uh2UfXr/7R/qWup17hp4h3puBhykhVQiMm2RJzTxeP52N3u7Jh7JN3garrMiiTDUFYjDuhZtXNH/Lm6Iz+Orikug8lURIRFpLRUEvIxDuXTLkwsqLOk1hylLYSM2QLHUIQmEAPSUnJluceRIXm4FK2Z56Ft1vICb9pTCAEt3zK65YFXHp3OV+M1gtBnrHHAsJtsJDgy4NAbCro4lxvmyTiB4GV/NGSEoZpFXyDS7AEpI4xTuNIF1QHjXELyqnD3N9fawrFw5KgYjtqcGjMUHEAZzyEMtABHVuUgoqgI2RyGJW4Q9l2IcEGIE45+E10JtSPxxrke8Y5mj4YRhH2QZkUpzvRyvoYa7wqoc+Z1EziAJAP0VDhCIi17p6Z+4p3Mt/8h2F//7VA1czmO9FlTEbcuSP45hReTbbpa3HksWvZ3xwZUW25k0FBkD8WLW4qLHvbPfUeSJR9yZDl8Ipjrd+vQUd06ObasBYggMJ7q2vbR0fMH33QV5KYC6BmC9ayEL8kaqoGEo3QBXDBRMQeqozEC5CSjLhEZs8QttSLXjQPldHInyhbhKGCMsyOu2YM9T/x2QllcvsIog7gfEgog1vxCz1gETvWRVYTkMIByspRfAcmuc9BnELJhgFQoQzaQNkqwze2qHniXSw6IuiAgEQJR4HuADgLLoWDXzRSlBE1jEZCGMkYRBlUEy2GOmfGr3jqvR9WYmaM4J2ZE0uvxi1rzbhSWFKj3skmvpIoZC1rjwPNqipdFoB+ZQYU0EIG13ECGNJAYPMvM19848Vg4JOKXeGRtMwYH4L0hW4o0VZJwG0c1bFVrXaBPC0qR+gQynJXIs8OIdmhVaazQOMR/jbQEum8Ub9apeWAc+D683zZv8MaPnD7BaldwRvQIeBtyewc22zAkIPvWK/fM6yBLK60Sm4eFAjtB5DQjcW0NYj1iRkUTcDVWRDApsy/o19d8v5jY//rVUXxDTI1PWOleVB0EA4TBBKJCRG+0CA8MxYlngjNt94yvekf5sPmBztAa7IGw6gJgsWPdKpgvoXKvPSg0ktRTHgARpoO+jFJcfFspMy7mhC5L2hFjlUXxrGMtMUysh/+zmEiztLb8fGp+wbKNSqoqVO6c8vpcaO6xO3/c9puji1jEG/P6j6Apmd9oudiJEQSKNKXaRJ7/L1HjpkHniCxxz3v91jWoC7YgppI67kKkpRYpvPmSBOMQ3sCX/uPXXmulYOdQKYU7JMKZTB2Y1e7uKVw61LPhWGAhAhgOqwhIkKFMeCFrK4ap+b7xe4tNkht6RVlCWnCEF+JlJxDpgDXrP9INKoaBvJdNSA5YJSijCD6ZwzJETADUc5UyMRmejh9E0tKG/HqXWxRHMzlRdeCANwNFZOeMI7JHun2TyG+kxQvvaj0nFtP1FnySI0QSZaOZelfLxkLBB5EhLse5KETcufUH0l8AvToFP1zbaRYHvg+L3PL0/iMtoqbKM7fNQV4IAZQduDRI05STRRY42jGBahHugAEsEju0E5WnpcTbE67PpophZzGQlpixjyuSp22kHBm3a6n6oUA+anKUCVsrptdO3lxPLZCfh44XlMDsyKnLsoRAj5hsZ7ER/0p7DST9XPmT50T6bVQe4OLbZN6jP6FwgqKmcQKXP56+By5UfOXEYkamhJ6chphVS8FTIhZlzHkF7ujGcp+/btA72xzfvaNaNI44Vwq3jYyoyioUDQCYQKuQwyUerfR6IJyL7T0jAX/ItQJ7SLqzwiy7LDgcnpaCnfZbnzDMTfBBAakIxIA2tGAuytwTiEbek5W3euMIeBgdHNA4ESQj1YRxKme6/KFVdXD93IDMLTVyF69eB4KOVspohde7+WS38GIebbLX+0CEqtXEheUFWsBzYxrsn8IvGLrSGmlfGI8dXN1lyVdlRIkxdDCYRhEBeN9JRl4Zc/y5spIrc8JcfX3bBr2hGsR+hnJ5jqmLlzIdVlML9mqARrF3BlmbOx9axjY5DIi2wtWcsAMifI/Dfu0KTFChId+1UJ2rZWusOIaxizCrQYjb8n3WuMbl4Tw2wt+kvFodeOyc7RcuwlXLyz37wIcga0Ruk9zePbpqxG5bQczTvRTWl1FOWyQYXzaZegvktgPgQLqd+HVpOojnfC0xWR8cEVFQ8nEcqfIhK+GbluUCJ/b6sdA3mfkvUbKfm6smdsvOSdJEhBwETsPmypZzqTfubL4Uvq4DXsxw1xtD3XKdmNp9zrAwhyIu4YGQRBD0nOJEQYAwcr0aIIVgEJar1UhYbPaB8M8Ty7GRnZRW431aNrPZLkgyhkHEHFweca+dDfE1I7BFJpbplIqQ6lTmziUmZCeG0L5Dt5R/VGsoOwM5L0WyL3wKh4KKB6L+R6K32UkhscYSFjNDKnIVcJdXc52DjjJZ390yUjwCFwIncH/Znady3GsfHHQN9VchAFUTgwRCBk3JCMqrwQ6zNCUwnw5xPxIPiuOjdb9d7hJnFf6IoSD7qtAog1aJaAMk3sMGIPPGtpHAKdCj0ZsgKCptWO0JBeGyybsjMaiKHL+U6xPkSKCNtBTuuDuSs1CnGoLpFU4rDdWhJdTPtmgBgs0GGKeNIzU/kwYunPUWdTTwKJ4RIyJCFReLN8wZSwt1TkcCcYjAGEQmB9dvoO8UBhzcdk4cqZXiCuyTkcNtdberOI/60pXhaw5kMRkmdU4RCbyDm4FOseNouWnJU5qhhEud3c8K7NpcsST1995beBzlja4Qmh9siJWjGnvID+OKKXDT+iEEpC/UypYl9iZjDpMBo7bhWBDeQPlqnC8dt5y4QnBArorL6wBpc+64EisvPiNWHAZv7X7vYVPuTFgj0GiPfi/t3erVqc2xFviXf2YbLTm/R7R63l7U+/DhwV32ZdBhyaQOjtRF6+91EX10E6y7Ix5POAVnTbcDp8ZgZA6cRm6z3TGylbh5JAsoD7J23/Fh3FZjnijw/y372Ik/q3e81WIQfoO4uaq1fs6B5Q8+IQ9ydvPJWFvhj+/7E/us0PS5z+ych3D2NhY58Kx34K0A6VixxZAtKGmJTf4jGhHNZP1ayViMaDspIHls5Mnq+xZM9g+OoqweSQAWCYugQ76H6dE0Fw4noYnhp0iOsSSxTn+BvR3eJDQ6+CcjLOnxakg5lZbCE2v0O1VP3zmtdS9Qehd+cWNi/+7zzWY5F+Z0QuI4IiJvjrkI8lzxqoSD+EBnMr1nobelkP0NJPFehSMfcZMQKnI7l4Fite8YthAeQozkMOGeWnj86ytcz6jJU71W1vFcNCw92saMju489+6ykKk3lNpkmDV0hBJxYqYOz4m4I6gxAjyiqE0oq08PVM79a+R3RZkBExdCsDKSG7k0PI28L3VBSBl21hhlRS3WMNeE9YSk8tAR7HXdMACxgUcrerN9pNrHF7FhsyV2M4OdnjiCDhSXpSMS3LZGVrYT4Y7JKyt+1Nk37cJd0pmNT7T7LQPHba5Ewt5SoknUgu13S9tjxZicNKwJLDGjjUhgNOCBN6a10clhdyNsxXX2v4jahuF6XIIZbEaUu8gVPAu3C67Hb/d+Fu169O8rA/z9k6/wgVdhqKb6/ffzLS3/YN+bGCoUqCyfNKlShIgLt4nQC9FCDi4baD49QMP/Vjv30kwyMyVn15tcOfwUZ38EbliTQSACXnWpQtf5V2+oyR/fuxsUgnimN4/7G3n16RWXhuNNlGIYgWxT5A2XJ9ZbrTZlnvFv3NjLOVLGWyFWgS+2I8IfT37s+WZ/sJboDUhBZg3dQZAuZ2XoAvDkB96vM5M3PfTTJ//YooyY6DO/4GDH1UXXuE6LmowbPZ8Ctpq9+uX5HRDL5iVygdmIVAEsqaYKPg5FU3U+ghBzDzDRaXe0eEc5DImLSv5Gak8DYsDfa1v/TeOcQlWp5gqooYmg9WE5+rk846h29McKG/L4AUyTnCXAJUo5TsRkG0a9K8k0sx3cbbo0pLhqNmJfWa1nAke8/UpXG2cnMMUXnOC5dhhyTx+93vjnP8j+5UsBdOajo3z+td3Emgh1I4wgXCDMiRTd4ajw6yqFTPD86s1S8aUvvmE15m+qIAfTMkYQUofBgTuShGZoVRWSGUTEwUHScceRJfn2Qd8FJT4C1z+INs4p7Y0mpOe6E+Ol8B7RIdVAS5NrGppgmUoMKrmRiEykjFJMR6TdkrSOyXolbhGbocg5kVnF62VZ8YoQXfnNTtTyqujxiGwAbpw8F3L/vDVJuZe3O+C/qltLWpza6euRN/7ad70dkSHNK/DTcaHbqoo2h5pFJbh0jUxqCoxA+3AZ7fWMe2ivZww9sEPTIGA7/G0Sg8AiUozgICKE1ZC4MBkX0LCwmelUVDCe3BKDuDksesBMdqJq2o0Crj88D9f1o5IS7hEUkgmSgTSGcZ6ABSPHdTL2s7jfl5WpPIIq1iiD7bqXAAiDb91k/FKSICSU8hzFEKIHw5gDAEh25A/5klwzIilqQAhwEZMUUXlRCkL1LMu0azgamFmAWwPGcvdyf2zeyWDW7PjbmorEElxl1gtWHXS525o/+cg199rA2OC13wTyWOQmcXTFn59PPNflyTsLxvsQyaIcSPkDfKQ24eB6yT9q/AZSAosn2Bvwrga+yFz8LJGdnODPR6oCp9eaYOpURB1xwMO9vSg+bBg6NX/BYMxqFKwWgSIGjUIathuZijUEZayTidzIDOW6I9shKm1EI9QCfwfhNE4JwMOJOD0RGAEDgKkD9U8uJTvJiIC7ZtD+77HiaIIRE8k6YnpuoXmkA0PPXEKI/StEGDISGcqIEBA0sqx3IlAtRIDn9b17EYkAxBDQSY4wpqUhFgnsxByRqcbFvAN+CKGCiI44lgWiB1JC9VjcXZd0kGeIM8bgnzq+urznoLeDHIwg6IKyBSdFrGbQOMQ5iDl5OuVvSSMjPuC95EAHA2k/8oYBmk4HponsMk+Pf6cd929dt6xZHPD+et1Ii2xHmIe1Q0qB0wlWnvaAHjNWu5yUzBVoH5WNXcFXkoUdrPOVBXhf3V1q3tofHT7w+z881dmHlWh40vNgGqc4t3Gze0vYeZt3nrHbg0EQk5UwMH9a9dcrQozppY8ETSV5c5x8E1aVuDYfG1CWhda1ebhx+/Jjrsius8ufoepfn/P+7tf2uUs/UOqDTAvKRtKK8KvBRczab3TbqY4FFRcRZ35q/FvFlbr4AEFUkr8FA66zSOLoGwVHK+ufXKjC+XMheDHk7P9/xdBj21SO+z3MEV/afJveRCnSMaKQQZBGHuDXN/vSKALSjCCU1TEDEIYhR1GEIBYGEHKRCAF1+hOb2hduCbFtfc0erMEAmv+rpw/8hBTW2QKGLnHcngmW8jmDd/nHH7hxu4HW3gPsRaVisYGjRoL6iimWq/LuqLGsZXMNQ2iJcZXI7qbdDifoMRuD1sAHnFjvF39ud3z5vVaZvkTrEniyKwYT3Q0SZ+GPWtKuc9vrhQH51dbfbhStrU9qZtS6+NHgmpLuxOGvF+bkVE0onoVcSMKD+HbUewkPlBbtnGzljk3aw42tRrNkUKLxAZ31KK5XwOtXn25oDI6CVfv+JXWMyTOiIT7NRdID0jYFdyJl8qr+gQDrI75gAMmHHqpqa+wZn1gwel5Nw6taVF9nMklcMXbU1Z4LezoWysozIBTri3KQQQQQMEAGGSIAAZ4QRJAMCIAMIl2XoShiiADSGQIa9KIIo7+kCiOsmBz3LoFHbQW0CYUxyYLSE0TgQp9p8vjTqde/Pb6qBy3tX+r0+YPK/yHYuMAfJH1wMEgfwGTsXhDmOZy/RSJi/sJB14DnKRKKaLY5nBAzl8whNoNbwmGOykIexvNuRP5baht2nWnNHj4ZusnBqIobAiNy9YK6wwsqI2dawNwuIFslXV18UKCr8wylOGOkLn+2CLfO+9sbT7gd1/dixC4jjCEeGeV6xr2QcXwA2wYKd8k9pJDwldbSNrK4Rn7svgqkIZRFcejA9ZFM+w1Mi7CEWk3UAAx10h0S5+RsbhhrVfqsZBv2+aQf1Nrv3J+KZCBCHElRAGIQqlmYxldRiTHZIpusuIvZOZJlRoAQJ0Yyu0GA6gHvPtC48MML+zq++7ax9GVy32/2JDprr3rx29/8pUqW2sEmcUxLx9qV4RECMckDsIvK9U1JeYvgziheqWHAV+Jz1fz7w+tTIrd7LB21QwFyxH7919mH3hugWpdSv+bYeY76Zn66fSuYqloZV/ntcmvluifC8KL8t2u/GTjI/bbcdNUbAYgIiJouCZeZgQIvg2AdQTrhs9nsdY/KJs6n3xAuXDWquszFJaNZQnYXdr//vNbX6KuuxcCMVr1wY8u8Jr41EQNaS4u2xiZGPBcIeZxzQoI5zXOiIUGLTH/b9I1+rahQ//UezRMNrHeq36d/czzdvvm9ldX6xzc1HQA3E9Omx+KPb4NIFgwLvgUpqkbqLfkP7P7njmm0/TllQWPr0ezV+rblP/p0vVYk2JbJ09is4NhpEGv5ZJk5G3HJuJDQe8GNqaxRvI6pgCIRJvAwDRnWIKBrfDdxOCPxbqFWvx3oeYQKG5rCtC+Sy2BRZvEdvmqsix7VhVW+xeE2bkKLo91dhbg5sNhm2XsO/cOO59zIlX1ld20hebd/p6Bwd5Nh7Fvotde74YHabcb6gNi+H3XSoD1wf+3crRHmHzn/UYj5X5SFS3PDYngg4xxa38clNpeMLjkefresJBzMOtF1O7mNq7CsvpW4Pvk33PCWgxVYFT5rMlYXQ9iK8FJ6t6ERKYFkxdj48txQtabV/aYTfPJ5tst8kRdUvMDh0w6Jb73wi6/ePP3fnQtMVl7hhuul2mG4pjmnx2I13rYkDY7I8cQPs/NL3lD5BLX85mykh4OgevwfFf+9BHHN8eMPsr3rfbilzB4R6ATfdSF9ZUyEQDwuemQK4kuECp5vtXYLVnOhudVi+w4ek1vfG2xbVK5ueapZ2Dj5V24q2zSneFF1WRN22SoM5uVCvU5gs6nwu7CIRQXMLBWZpeaFpIx01dplhSFOK3mAtLbPWRR45SYkcEKmtxWveDv7QZvcQ9aUq3JHxDrS0X40X5gWanEzpVnK51NHdaw0DoFBQkYrZtMKJ/Cb87i5r9YunVepjIQMMiEUbYFaCTJAcGFCaFpCuS5cUAKt3lKLwdp2Q+ZUyfY75u+sbTm4fbMw6ap6e0pRNvXYjOlYApr5cv0qWjasEhIZSSDsouq46ClQjW3sBtiRFDeNJl/f/53nO+xUZwRoH+7wjsWG/UC3iXEoBxHu8EomxnHcKQtzGe2e745Nnn3cmGl03AG5OymHwpA12eLyLWYIuylIFZR2vga82e2iVoE4omHVPsMGgmuFy47atm3S3XYlLc0vnmyqfLKjUqqZOXJMwsXYNa2945/+4cqn7VK2Tl5zCF6S48gOcZwhH5Sno/wGVuDaoe3Xp2ERQh+UjsT5RKYSN3JYdshKKAQdnOhAEHY97+fGYO3Joc3hCohCHH/mta3fK4VsDX32ypmVSi0DogdqKVlHfhR40Gswcd7kUsjNYnW/x49MpMy4zOobKPYWJMT4sAxKB2/j8rj8jYt/8XXnTTUuVYV5FAv1qhlDEQOICNQw3ArtwAcplUSiaRRxsWtZZMkEQroBluMmg1LPeJMy1aG+MG8+yjxbr6LbOfwLbkmhcd3Aams8p6h/GsN9jOHj97VXrMHDRA2BCcWuyhOPeGnLBLD7t031fKC/sK9sgb7TNyBFRc8/vf8vem3JTTzJuaez+3NkIpXLiGPIe6sm9h4Q2X/Zrs3X7h7+uf5zL3CHisd/l9dzwSM2lB0PzCnhlrN6T7zUW+AQ1K5XTnpP6rYs6AXEHkP9DXkFj3CUmaqmv0VwzTzC7eR0g6jD7xpr4noWLd/c8SoMYiHbc/3Xv29JfbOnHUFCS9uhuv7uxSFTykKLBGwsGfEPMeDRCMIBzwNz7QnV7JPX37RpQ+Azp1vXPqGNyskGWGhViu+yoyD13SXDYrhDwXlImYlpAHiF906cb7lZWwWqZ1ZHP2lkdpogQWg+d1+4eBltwU0Y6TBa862ORULZzWTVcq4SBwS5Zo3cV5MtRzYTO2snq+0eVHPLmdutBl/aGcJnJ99Lib1iRECXMDNGcMbL30Fnst6v/rr0TGafR0ICB6Cr+lbOsevgiMyRLMchsXfrjow1A5ORTDMRUUbAD6BTxZcapp0MApwZzXn+MfePwPvV+qe97++fhidu2EmokSgEASkdvHv3JI7VQfl0TQrcVXur17GCRBZypdBgcBkMXE5ozphLON2AoC+tF2BEh7eAjye7ApA+R4p2yA3yaAD5laxmS9Xba5StZ4v7kUGuaDvEhg+O/gymFy6Zzf6WddTEOI/InF8HHZtVLqDJOwYjVDHqBD70CdNa0D0qtK3mhipLtztQpqzD1XimlCr19XyBsPDqu80nu1LPrFkbv5L8UvtObYRmT0UhhCiVIVTHcP+e4jiKmPiSuQBjLuPYNv3KAEP0Ry86qodSn4R+dBZSZiQMaJG525iXQRishS0pJfA3fVE9hJVqTIAMRRGEDBHmo4ZrWQCKP3SodPfjAnPA+CC41iE+u0Lbn17epmz7yQnHOZ5GdBun6t7doHlCahIuix+lxtqyj2q/uNgkrEOkmsHZq4JBOFPDxHr8gFYxNP3ddM2FsxGWJbk4UiALWEbigrx4C8s1yVkRYSkID/0p/UYbWmSusG/w+64Utr8aMUMZ2QkMCm4O+mMLF7rMHUW7iU1iKzt8hJrdaLk832DKIP/BeFwqRsblvCMkxPo4uRl4s0b8MMzO4deejKuBq53w9zoj6nkTHIhZcVGD2W8NEgfkZ40U2+yqQti4n4m4DavzV77fFpmSLKH16QV5+GFi2astD5b+7jf/H3Kunu+ak8RPxthbj1Zf1qw6E1QOwHF6fu1RqXCU7LoGkQNRVQ8QnFxtzBTeOac7mVKhH9lReGNHktu4U9csvh/Psy4wSobc1UvkOEdTiECIdRPfFYsg4NVjhAEqcjxW8GUI+pTcpj74E/j51evfe3LPdZPRZ/jsyk+uXjr7cmj36n/K31AyBcU7mnvNHEkjIY4c2KjiDtjPeuwoLmmCIBFkhD3A5n9a2IdLi+XbD3fGHioOudl0YI9/h/+Wu0Byen+ud1fgU5PyiWyqjy4xblciiZIbpDtW4oWynX+MGqpIvv1Y1brzTcK1VyaHIQt40wRmFTWM8qcvuAMrS+cUbXFX8a2Htuk+urSD4580yYalkaA1u3LAdficBF2OgeYH5q3UN9ntbODw9I7KebXZdIUfwySP1w1wQYch/GrMh7rtVnOKzUftnmzX10P+aMucToiKZemxjO329EwZ/BwMd5AwEhnCPZPoBo/8zhVLxsjHkmw+UHrWGJnCH47BGqiO7HttPd+ix+hoCehV4+UB/aTjBP4Kiy+dkNjqrEcL8v9SCIEoiSGCQETAnE7hEUlWLS++IQUDb2UCBZ9+Y0TnxzSUgkzge4CYZKMCNy5MQwERJI/iCKMRLxQNKIMS1PWbqRO2iLflvmdZx5qXgJpFD4poM0luBBhghJAI0SJ4YBzZjT36bz1BxMIiCWIHbzQSoxEoQFvpmRZhs4gsCfutKgGhDyZwTpsWzH3+Gl4o+8EO/Z+PbR3NbdKUt4+scnDQ1Njmfzm4NIriskIk/dlI9G+9GVrfA988c4iYdtQNOF13sc1SMUIO6mv6g2AvRmQbV8tyQiYDRT9Wiurk+wQu5enoTCAYVzHqt0Wftw34Ng6oqquqYt5vbu3Jug3L8OKo0dROjDt44Sgq3OEuujVRYI1RuGoRVyPnVBZbh3CIaukKw4yvUFnvcBjRjrK5A78dfln/tK++TG2LBR9GDTcq5kKOtnCRRDkH65AqpSUkWWC559h6Z+auSqEqhE092eO6NCYf4FBLnSqLImqW6XahelKig1aPI3qVOay1H4Ma38Uj8YP6Pf7AE/vqHui1jkXaTIIVOZYeeSiXXo8EM+e2k2JsrcvVbnYiByFlSM4GOmUi93I3qM0tg1cqCw+iroPZOu1CoV689F3hcraU8jwXrYQNQ4HHWzvLJrwKJXWw90qvyT2gNgwjJPUANA9aEpb4URbp1xQUh/4n4RqrOmMx9hrNT5uiWW2JX64t9OPYizvZ1tta55KbjNZ5ux+Kp6mCZVbjHFD8uAahSp77qIdvbp8fkawlWOYdqDit15D1AkdgGCJrnhapx9IZPYZhKXXlYDnMoTACObsNoaNcbXXcJv7nrS7TpV1T0jqNVeAAFoxcABlPHXT/yXvxLKImrA94owubwyZz5OcMbdN+rdSc8cgcCCWJbYFPhZ0QreBcD718qZnfv9dc3CCbE1MtjN8PybSeh5EVvPV2bNIoThve+20fhBBJY+JvX/Brdl6oIIfZ7+K5lRsnnVSWmnkICfN6kxubw363IveQPTDsY0DcQ1Q0+Q6OAIBg8l2EY3qPyLKdutczQo4nHxSZCCD09oA5fzsTdxA9PPI30Fqz8vbNYU3DcK1dRRGguuKaGKt10+9zy1A1qRPMT0qkBJeE4hTZkZEWFCdwor7YKFLpTNinqiNKtUPEVlhaNpOzByO3ftB6A/AnDc2Uz8gSCa5VA5jnfGhIVh3GglsQad0sDX2dOBNxO2B1DIcUKsn1uhOXojwogg6pknEDjnVH4JDIBfsBwwwKnAN5nZyrK+HQIWvKA7ygwzm0LsIMcMJqXHfrMm5j9f5TyQElMIlHPdGetXUNlPzeGSceyN0k8EvNxmPU/o6Bv62s9SK3FCYsVxMGZ4/mh0FGyec10krY5GgMVtbqMQEzFHfACudQBXHG0cliicLXveJGndObgcYl34p+YS8Qbx2T7qpHx5WR/35ZGUtLETyDzTXhX4dzuiF7O60mWhoZBAFGIAGbvMln8UFPVlX+sLE5mvVumdy57mt99NlIUmI+5ru1JzWx5ztCljUbPJFKjxSAdBLCE0ZTzwgRKu15lciqBc1I9DKxBq6EWkp7Xx29bVzsuR7GFnTvIO90qEDTFSQgarbGkE6MECJMsK0eAYQUrAKRZglhAGIIhmUbjjNmLTQO22nMZI43PtllFU8GXvCfmfxS/jBYHUu5gQjFtJEaPNgbiYRv84iXwz4KD5HQGtmlMoHBId9AWddO/w+PIKCRTSB6mRZk7DHssVvFxHKjpxfoGWOvR/T5xJiqBFdzEwt9t7S4d1Wv5Hi/MO/mJxfUOxHvoAIXlrUQ4VsxPef7+R72vSC/BOWYjo3XVpUaDUPBkZlFVs1mbif8jOucPYQQEoOY2omez20orUbEPVx0z1draXfHVlS8WkFVXF7g3vYfXB5++uDzDySv9JS5VIwJi0EJExOfny0qhUNS/8a88Mi0YEhYPK1lh2qCADnk2rsStnM3BuZ31CwyGF+OEWYrCxQJlFBrw0mDAAcgmu5RIlG595neU/biJYgQEkIH7+uMwdvucBj+4Em/j5cvs43Z3jjxZzE9lVn68snm6nqITjaTv2aX37J+rpTYOn/FwpM/tY760qkWHzPDcsTrLfhNo+p7QJIYijFPrFd8RWTTub17vb8sWmzNmj+hM3V9NW/GLSwhL19h2kRMVsoi/A7MfYbXLPfVCl5XakVdW8COIeymShXhvHz8TS0IiAvU6c8tIvi6WqBPHrocu7bfYJxWFCYjjqjiaIAJUOTcMpaxP/2OkRzm3Cf7q60l2F6Ztf6QUIlr7N0MvG14G/SkULn2DiGU5f4ef2Hb2uGvLzqJQnF/77MsMBZ72GtvXfvak+cnB4xcO9lUPLp1NV+/FGYhCmprMFw2ta1TMJfxPFlNStJZobwNLsuQgmPvvrz/zLFrP79/2FT2fy9vvn54bMh8i0TlFu8oCJd5p8Y8PgwlKBnc9MrLBW0Q9yPUJZUEoVxupCTV6TLdRs11ikHuivSJuJKXu4AW23RtMB5BDkSFSE/uhodUaJXJV4h7E2b/My/fIR+nGnSaC6okSJX0TrBe39Gy/6bjPa2f7j6dxyXDf7TYz1xT2HIwsfxzs9/NjfuzLfrLHpNv8OI1zihEKH72Lw7AWjEgstxSwK1yj9jLeOgJg17zful1RH4SmRi8c61e/HH7IC9YBPyQJPq8q3IxAQkAe5Ax8jVHw1CEl5GOBZZLgeakgDoGus5qhmkjHVgn3f/CEf+3LKBO3xtt8O6K6c2RU5n/Xrwndoq+hHbzwG6KP+UVJYyDocWL99e/aO6Oox2a+88FyaJ5ubwNRi7A9nqiGx9efued7qD+/yWvvmzOjwWcmTPrnnq0cF3rOyM11gc9/F55fKRfeoF0xh4YT2MNcifD4p4JrOzCgXwBZqm6/5XmUT/kUfXK3P37cqNQmqid1/lk6OCRKsAcd33VeeTuI5nRvQwkN9SUjb+/2LjSYKiTHhCJ2hsZRS1vPBjs+OiNponEj+d2Fvbnqu5r2zz1Hjfn/D9Pret5vXHrF4N5oqf1Ub7s6MiHO6/OtKU9Wcvid05t32PH2Jbfmm/wZsLhggSYt2q90GGnVQa63Go81ZK2FdCqXhtXWFC6YPz0/zg77z9KZ3gaVpS2vyuGuweuKZFL9y0AxafHhh/mNz9cKBhm7GfYMgnFuTDDAK5B/MG9HpSDruAvVu6UTgVL2mdg4Tn0Tneo4cbba0SyjcwYCUflEuoBAAYQkansaYN5MD9EOKPMpVKyBDnbowIYU4/N4nip1OsWCgK7+zbXJ+57+tT2igLPHxqHaO1re8OCCsW2YjmUh0157tMnvzInuen04S7kb5cxz9sMsjH9j8lzbUYVqZLVWRtuJbbnG+bJZtxcGjfcPF4CIevf9ONpQ8rOJ9zViX2/e0D6cvSrzmMvyNuOR74vhHtfHF1cgmOCydrMyTGn03ryGStXOGhZZ2nPXxI3ztip1anqATQYia332B+v/ddHKBT3B6jq7sGx5CaDjeiAB+TcXP1ipRV7dilEJQ6CkOhU5Zw7jh+3n3AI6r3UkClJ7IAT002mrvsfvwe+v/r7Cy+czak5lPPs9ugmZbU8XFYpeIytYMHwJyBwvmPvf7crYh4N6TtFTm2DaNQkTil0mk2kEc95IIexKIG7wxGTerJZUiCGvVokRkTS+Fj64L/ffGjv1T6oT+sbDajUgWzdWPbVKga7LwY6VWPeRij8CuWtLuVaBEcGe5jMFUO/H4kQKcPKCLh6ZkP8wqPvfTYFS7ac2BCz+PBiB0P4PjmvIbDswM0uNAcyqEPVrahI0oFs4tQ6x50Yv0bwRdxEe/XGAcYvmMQr3BapeIw1BirRLqv3+s/yDOqES8aYElIvSbpsVxuhajVOyKbY3BEo8Uy0oimvBHsBjjKgdaJbkOaFjMIlIaIaOS8D2RpGyjBhGZg7pY1RljtzWzjP27E4Ou/AYq429H9DKldUhC5hToIzCDp5aix4odcobKau/tLQ7uZ2QtDF/Y/3K5lXh8rm1nwxDfSNwuSe6NCm7z9QO6er7uWWlkOzPxPbalF785KX91189LmqZx/ZOROyLwxxU0O24ErGLeIrO3ek0y3Lmz2Q27fsp0XCr6BRRy6Jn2dorbK03OPhcrx3aNU42fp0oVRYUvLN0mKbKoeHU6/vqeysiZJ55ubof/tWAJwXH755y9ojh88r5LGufxz56vITv5VWGC81Kv9eVG9o34KgFA1PerRtoKf//AsH1+xabTjgOInR86Ftn3iTnsmhLAtPy7Ij6+3ALNRagFiwuPPHv1m1fZ9RcWLuPoweAft/5rmYXU6Mt1rJQaRcWxYqJfo4O/zmx2VfVPCgbvz1rxn3jzWdBbG2n8Pg76hQu2NywfzNN297z1wQYQNWkhQ8ecvkdsTlleGPBisf1bZoLdgTjT0/mDFE5tSflbr8L91QZOKXTM+7Zbmv6HJv3ZCRe6htAIH//uEd7IxbnDBJW1u/w27Wlzc3t0i0ynHVvtXVNBho8NXs+9EfXZotk1+U0Gg+lQYFXCZoOxrk1cyVgC0tLA2tOvu2HQdEcOD5z8senGAchMJfC2KZNdlI+OzjDF5Osgav9m+djfe0YktiuJZMUuc8klDfKfXd/2+19xebUsG/Zp+O1sfXjWsUHXLZNx9a+rECI543ol4H3gOEYPKUAvPoutvjFN6U5qxD/nvkjUTNwU1IJxARNqHBcQQ8byKJxKKtH5WzC0i67J9Ob2L6PkAo0ymK/F0DBjb9ubGM7hMjYd27pVfkbtIGq78B/vM9eed8Z0/mV63bELCarFb17uzeCNUkcbPOQ5a5cuLNoa2dQyKGhTyv2NTFX0TzhQ6E1/P5r1fs6uioKXA4JxyYi4YX8reSe3+97N4Nanmqh4O82Zh1y/PvPldT+CwabX/hTMPJwc5v7I9ApC9nn1aW3xxYcrBsydkZy56IO77uyYRWvqGlAHVMttD24JajVBhd5qzoueLMc3U/YRpo7Hhc3Sf8DBfSfefFsZ4eP5QO/9Jd12uoeKf4wJJINkKRUos6x5OtsSsi0wgXMNOcEDHQrCwE3lfNyPeV7C17n8jMPZhZQ3/oylQWG1oNQ5/qRqcYcWBtnK3ef6OHyHi77hvXp1XuYrM/vo0J9Y/fX5VU7e0BEL5JemzFyUOWqP49G7Y4oicLvnTZ2HxS+BxE3I2Xu59wO+cWFuwMBZdHDThdRRw1kkFwU4fg77DeIKOlwCg5eryDGx+uY65KqOdhMcMkmTYXOHBy+vKgsMQsj7gbnciQIokYRnpgXofP5afVpYKcrMQGDojv97Wcjt3rXRbv6/9c3ue8gOjH2np2wd5DWNxoYmDbxAHXbt5c8CS8d06UwT4p5jRunjR7CDUPBn/ltfZkf3VeLysV5qs9wnKO71XmlSfO8oehXxP4ooH+kgW0CpkkuVihEHAGBGvx9OVKhyETkDwOI1zisf19/vHbeicSndh89qVMuAXCTcbLevNywl2+4qEWhtgck8TCqL2UBqBqQxSV4BPytIXYQNmK3K0fnH47tZMbnKDZNuPJmx8POiwTDmECCudljiBP9/NFZltcKo2Qxq/h9CXfN6N7d1tHpAwSRXgKXq3L4TU4ngUNYFRBgWnIAW4mnuUCT6SIpkHq6XPE9RlKCXAAtMGha8u76jrl5s6gud4A01xH4YBN5gMojmfUACstW8M2F+GRRYny+3iuAEHMI+eqC+R9u/+g16sy2YabYMBj+ey2jy8syqwoJVRQC8royRPZ8ZTgInYT3wVNvSJCAcShoCpTBKv44HafRYalQT/rsbUX0fuJVJxy5CE8sjQoz/yueEf0VzDpQzJw2KYfm3Y6//fhvafe1m4Ei4Gu4uwjQHOgWD/UbZqmAwlm9btltxpV7ZqdHO0nyB8ZBwo8GufgqPjQGliIyAKCYJtNhC3IeZi53COEiXGuBoCIKvffLpPxrz2fvD3++54rEZjk/ZfxqUMORq7EUYQSXpAkPTP9ao4cR1a28SZ6SdDFzCPEIGwmJj2bO4vf7xGXjP5EoTDtAw4Xd4l7by089P0V+uvfNnLNOMc4J8lbjIqqVHvWXuyViy+Df732/2vY77VDg4+G9Yktfla3LDD/85m2Nb1//2AzyKB3I50RZbKLpReXFJei0DWwdDHq9rkw3BvBiznFg0ynXvnr8p+2/eEt9ruQaeKsgqUUYNGk1pbeks3pTGXxqZii6CkvBN5r4oQ2CiQAoM4A6vFEdNASHs1GTrHpWwM+Cfyy/UXmWzXgq0qf2y4dmH5JilBxjdBX63/fzuVe0+vflU9YcJ5rGbV6hvbnDxsEk/7ejgwvQXqDw2xCRCQov3Xy/tHkT7M9hkULuPkuTIj0G1O3cf4cvlONmRepokwZMhbgZYP53XffrZS1Ftzn9cGeZbZtO3JrPw6ewySchz9TwR5f3nY7ZLD4vl7Mz3M0pI9/3O5b2GPznfUJwDu1Y/Qg6Dlh/0rmUm0XY6EewPa3qv6K/F/86PGN4luX/kK65wP5Zryb4L0IX46/V2rvJUX5vsSXph6SXIGFPSiDl0HQqod/mWOKBjM65YU9o12mmfFpqPJon7Fh26V3n35CoYja1dTPi/pusGULi9TxtXNDToHt/WX9ov8gMcLo8SVg0eBF7ttywRxFalbjYh54M981VSXiPU6afHrKmC0CQm9kh189YiTv5A0MwYwjPvPF6IrWrsF9DP3Z26PkAymEh4TLCz2bPJcW2DdlXtQfXAZhZyJgWpG3xLcVrz5bd8/EoX+3hro0EPgyter1Lnpe2zudJqDUsDMQ94Hx8whwjs+zSwRwOtWeeub+isYfeYD5OJbJXq/PvPiWoMGO6/exT1jERjZ47No30GBSHSQfdtcXc+vLTLuSkXq0QMj7LLf7apnRBZ0qXBUnkUZbS4WrqAySkqVElUrxgzgxx6XBrV7jHfES37XvFty0rtQh/punuI6gVPR8y6te/k5rsr2jLgpRXIfVb2yKcMIURhOaQYJOQpFEEm1tBDOPJ0LQVeTxiQcjAkogGKQh4q4VaIsd51NTiCvhP27c8fTN0V+h3X9S/LfBXV/rFKUBDi1Wgu4a8CY/rd1wtuPXILS5/+rzgQgzElkyMFlHDAGmIahBVk0IAxTaZK07aJ0U/ff5uJvHLv/Lsgr2BGcCrd8f/OWgZI2Xm6UrE1CFyANuVmMKgFaj0GOQalDAD4lQxLF3ISxm1naL2WJHJnOeYOCGLQJqLUdvFAhPG4StG430XY70+qAqIu4itK7lQiejP+bMML+81o9VBIs4N5f+GbTkwhHO34HVXcNxR89C0wqcXFxfAElSRowAPoQaSjCcmnGZ/8gNRcN9c6RPd+1d9CFwyz1ZRgCEkIsQgCFAXEyWQxoClBB4AMJJEoWQIF3zQx6fRxRFYIktxgjjBCLjanJoVM8wU/DB6L5NH7CwAH6ps4f0R+h8ve61Uw+M7jqxp24OJB7sVLljnMXY51vpjDyM17vm1th63+l3GWfQUkczrUWCM1Oxf9CzrLXS6rvLNT3nSL3VfaMqGXqFFo7nk7wwTg1yxAhnwQ+feD7cIY0c1Oo2BGp+MSuxx44q7Yt6C391eyaiAS3laU56/KiFymyzJJ/v2YnSuh3KUM96BE9/lrV49V6GQvuzFvNb2unpnz540zvpUyZURYPdkTccdXKNBIuzulBrLyuuV5xx6Zc5vu5p8PDwcxAktQDurKheirYutkr8K6dBtKIXS4yGtL3wl0ycZ6tce2nQ1lo/o/SpjTMhw4SZLhCcIw4FQmlLs9tE0K2ceeKHL98jjoql30Fw6UDPt66DDi+nt/R7eyp9xzQBBPctRETOFIHVv/B/J5lcQV7jJgM1Rw865KvxT1n7XAk3V3b4vWLBbWfRq5nK032FXXrJFvWkdOXLJ0VxtGbPndJ/Z1pxV/Lb59dsb2kYfPjcqtJUjfZgJHXz+p8zs2uJzlx41Rd/3cvRBxdxHx4ucun/t9NgHBO2Rt9yFpPl1o1pdCKSGVv8QR37zaaclzmMW2KxFpF8sHCZrDNmMAir0Cdc5adXPPumJzfeMXAksTDyi894ZnCZfnujIDnqhG8/xa/PjX6Y7a3cevDGoXb9m7Ee04FlRfS2Zk5IsON/8axo3vi4wG++eiG+W3hepQu+sd7eNd9f3DzwBUVnb1uy1/dGc/LMpGnHSzUhOUPWQxj0HN0xVMl/tHQZdzo1j+OU1hqQOj0W/Kawq6/ytPZw/a0G8Vv5vStIYKT65cauL3ZWKz/u+SFuW8IlzXlLLYcT/qkpephIliWeJRGgnTj+fdvtld2KYej2h801tMVieSNugfDKhrfzbwGvTAXluRECbhtcaBZw2yUIquaneAL+GL1Euf/7TIwQuIQvGVzwFE/hf34WnCEI6I0fvfKzNUttDV+B9gBw8f+ElwKzu7j9VHCE//dbEgLc/X/ORA+RgvL/WV1fe/1BGgAA7QCQQ98U1bwtzrv7tKx20pRSm5wz591pn5/pEPHQdqfziuCEUbboW9HvaXD/Atadn1pkzFxvuN30l/XzxLeWvLUb8tjdB/ZisyE+JsfAGr8L6Y0Aeqn3r5Pwg7gCAGYnvwgAnAAAWeC2d/EpDJC+vVqBPYzO3KixWSV8m813AQabAQBQCOyLkIS8ABGrMGK1GpHQbEHOVrcjpUVEgZKok6ratAE0qcku/B6w52l8C4TzNr4NrPnG8H0glvDITzqQV04QFRZRy2OhTTaYl202Zp3Vxo0asEKlhaBtg43WCYwe+UqbJ47aJKJSGI59pkR23kD3dgEf61WgBVxteRj92k251Xtv8MTSsbDXs59jM3DVhqlx0vgWVKysVQcm1o+ujatjaqKSuuJYdEsxva2mOq5uNZbuBXlUTK47cqeWpqSBnpbR1STk2R7mhuW8La6aUi035uqrWNPlyo1k1hsnVm7YtLIKlscmqOROUd+8Y4VnbmwdHj1s3zdWX11rUkyNe3J74PIVE6M0ladyXGvbtHH1oibXFnJH7+RmSmjTIrquNTki+O/qCfz9dgALYoD5A8FQcUlpWXlFZVXcvy6qV8dq4rV19YmGOenWtvaOzrnz5nd1L1iY6enty/YPDA4NjyxaHAQJ5CCFPBSgARqhCZqhBVqhDYqQQYmuXLdt46qINT7Kb75hdTicDIuaaDgMEYiCDtUQgxqIQy3UsZOiaBs75qR/C4DDGz+NAwAAAA==) format('woff2'), - url(data:application/font-woff;charset=utf-8;base64,d09GRgABAAAAAHo4AA8AAAAA4KwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABWAAAABwAAAAcgxtSpEdERUYAAAF0AAAAHAAAAB4AJwBNT1MvMgAAAZAAAABJAAAAYHJYlnpjbWFwAAAB3AAAAKMAAAF6K26sXGN2dCAAAAKAAAAABAAAAAQARAURZ2FzcAAAAoQAAAAIAAAACAAAABBnbHlmAAACjAAAc9cAANc4BKegHmhlYWQAAHZkAAAAMgAAADYR5QgpaGhlYQAAdpgAAAAdAAAAJAuBBZ1obXR4AAB2uAAAAKwAAAEcKkRAzmxvY2EAAHdkAAAAewAAAJDMYwHYbWF4cAAAd+AAAAAfAAAAIACfAnZuYW1lAAB4AAAAAXAAAALsHaNuI3Bvc3QAAHlwAAAAvQAAATbMg4Xgd2ViZgAAejAAAAAGAAAABto4WnAAAAABAAAAANXulPUAAAAA1pYy9wAAAADWloq3eNpjYGRgYOABYjEgZmJgBEI3IGYB8xgABqAAdXjaY2Bh8WWcwMDKwMJqzHKWgYFhFoRmOsuQxpQG5AOl4ICRAQmEeof7MRxgUFD9w5b2D6iS9RfDMpgaxi9Me4CUAgMjAGYvDc8AAAB42mNgYGBmgGAZBkYGECgB8hjBfBaGCCAtxCAAFGFiUGCIYqhiWKDApaCvEK/65/9/oJwCgyNDIlCMASb2//H/w//3/p/xwPKB6P1nt7ygZqIBRjYGuAQjE5BgQlcAcRJewMLKxs7BycXNw8vHLyAoJCwiKiYuISklLSMLkZeTV1BUUlZRVVPX0NTS1tHV0zcwNDI2MTUzZ6AusCBLFwCa8x6LAABEBREAAQAB//8AD3jaVL1prGXrdh1UX/+tvtlr7ebsfbp96uxd7am6p7333VtVr/N18+xnJ35+RGmMsRzHcQgoOA3BpEGRpUQ0EokUIoTAilAQDkGyFQkhAhFCCVJEEixBkBNEkFB+EBEU8QOJCN5ljPmtfd7Lrbp1ztln79V+c8wx5xxzrif6ydefPNE/437iiXkSnlz9qnry5rNfC/bJP7r+Ve/+589+zWh8++RXDV92fPnXglf/72e/pvj6TXfeXZ5351/XZ995qv7cd37O/cQ/+Ytft3/zyZMnRo1f/FXz4+6vqF9U/4n6O0+eqHnw81Ar+auDT38Wg59+GodTNR/86Oen6joMfOFELeb4Db8sThy+2+/2Vxp/73b3d/e7h/s9/r/d3eFffoOfb/nileJv7/Bnz33UClsdFtz9iG/8sRqwS24ZO65N8DtuBn/eq/3t/cNn6o3avVcP2AZev7t9uN3v7m75ljv58b26u5c9v1c7/ubhFoewl4PBBrb7Wx6kkn/eq3eK73vAod1xg7KT6Z+79+qau72WV+6xG3zyM3wQp7nlFrnx++vFzfXdlh+Yj8PN9YJXcPdGbUePazLgsqhFujzqZi5feKYLXMK5u5erhYPGFeKFuMC1HWqNT57g7G+u72/mOMUrg41f3t/xkHHib/Q9vgy+UdgB/sqNkMt3c403mh8zRimtjLan3jini8zmtvPWKeWdLoscr2mljTbGl8ppVyvdjtocK28sPmfK67PGNpXzThnHLRU219pabFIp55yxUSv8zirecK+qRrtgHN8RjLKmKqLXNtja9meucNlswEa1w0eVitpqi9/iP5NZ7bEh6/iaUkFHj8NyGX7AAVp71Bypl3nX4Qel8SuV29Ka3BVqoVShG7xLB12Ouow3OlgT8CalrI54O7ZsrOGu1LPV9/lM+1w7i7NwJvD61Nod2SaY1kYfrapL7LQJ9jt/ora5x5FGbMGXVVtrGx0uU9/7WW0Kk+PDhtdEqXedznhZNK4nLg12i0vKy65zb7UpmtxZ+6QsVBvsad1+5k07upW1gyv92m8W1bu3xduj+bHHpYi5ttjs+vnFZxtnYRE4yKa19bEKNjj81PZ1b+rcWo+zm2lbhrzNBlu27mQWzkO2s0U0tYlj7Zcme6GHJlPV2fDluH1xqmOMtXoRFld+Y47rqGPm/SwP1RG3jKuPPcyOZittW4WLiNuKY8E9wn0uVLMpscCO6tJXuPgGt9Z4ZVcWe89MdO1z9Y0wxMK1M9PZplC+afMb9b5obmwZM5XhrHBuuc1wTTKnF2pjTYM/OsdVs1h6R7hUefDW9AY3AguWVxD3pML60q/XJ1p/K9riyPGq42LFoJ3hBS94n3Ff7aiXPix1qU9r8yu1Gzemxw5yX6jofe5qG3HTtZ+pZxfxFB/2GZajN6b6fGiwHVfXWNG4BVijuYkhC3uzLjehLMo8qHBevQNUPlHqG1/8T+bPuZ9Xv4EftjDxxQAk2Y4w8cU1DF9g6gLASbs8QOcohinfwN5PVA3bDx6WT5i4uZ7fzIFHhMH7G0HSByDJ7lYsnQj5cLu4BnYBiPAaAOqdkq+32GXa6DAnyACi+WF8maedETcVvxcYIZp77koDHW4AwMC66zvC113a8P07fXslCJdQdnebsO7hfgS4ELNk2+IECDeLOQF2zwO93d8R3O/SQb5XhCiAmezjVjD2DrslLONE74iq+Ag+wb/Y5e2JlhPAeUy7wfUZPiieKU4Svx2SB8Imrh9uAM83hOIJwN/gOg43PNQdruc1z4cX8547lgMjQO/ly5W+uFIXdDd0EPQiuK47gLgcpf69u2WOpQ+UAXp4opHvVO/dcejyPFNLGH7msY6N87iWnoAUfY5lqWFMAC6sWAAS/sWSdgpYplr+owpLlCJE4GX5hfdERwXowJKs8ElBEGyGmFVGPYd1zIuhaFSYY5nazJYKMBOJDtbDNINJ6KaN17B/Y1xYcCt49cSZWXZ2ii1rAD2sx2msdQ8Y9wBwIK8yRN4c7w34AijBd7AOfrx2eW5dFQGvdBSO8GcAOA7YgY0DErXJcPhKy9lr/vE8Hs/DwZkqswmm82Zmy1LZjy28jIFFAUdwGiqGnfv5ZfUWJ6Fmed7EOFZKV7pSpe5MqPsuvnXz1csqX56cxOMcCNAAeOGcXO4WT7dNLHhx8Wlcvd4qwEmlWlfB6Da4xnUIcsi8kLXCEecz27giOhyyacJyb7Mu25/OF4vXxZdsP5tt9tH0MbwtXgUXa226aNvKLmI4y5s5jrbGavAxK+G87Mro9TqGUdkr1x+HJuJy6tLOAIldr02Pj2Mvvsx/PNtsTr663pZO10tXPSsXY+t1UPBzIarRl7rAgdPZWeN9yHTehys198dl6cxKtzt8LGt+RxzdcVFmXh2ZprQ/4+x7A1fIm4e14GwXrMpXz16cVcWm64PTMbQ8T1xmU2GNajoPeGGHq6RyM/evjf9y+BJvUo1b62Oh88wR6JUNurWncbysuriyema8s6MzGdbUGk5DxVLRJYfzvuwFA3/HF3/efO5+Wf0ifrjdJcZ1TwuFEZH6EZMCeMSCBj3Cogkg+93FlqZNeiNYBKoyJFIksDUkfBEArDXsH+8ZyQflzcQ0wbADCglfNAIL+AR+Jzzo8PM8/XsvHFMYoPyT0IloQE7I74XrvVcfgJNeaK/HwY7puHBEI48C4IKfZBuPf67c3S7RQVAxoba4DAKtaWdkkFcglbcCr2SN91f6jbq9Jp3jkcj+06ZUIsvAIVyai90tibPsS6gnNi4AS8QCmumfsuRAtGL44M65Jjy9tCRxeD2z1nFRAcCsd7FvVkpHOGqVw9ZhoAF+DhhCUHLJfMlVYICWTAbfRcACOSK+BWwUoGcqUyRhWFawrJCZE37A3I7zcw1md24rbBJ7B+5hJQEYKiJUARJgbeuADNhHjHC8LgpA4Ni+r33RfVyV+NjYAzUHw3dpIT5W/vInHBMoIM6GnIqsEYgayRdbvo2OP/fhhVYr7FoHWO/a93q0qi2UOzUleB7eFkg6SD0UbUBzU+7ndIULhe0aksTKWWFq5dMWVAiQ2WgCS1YWMC/VAXmBJrk/rto32TMbKxvmMRxZkFOvswieA/MFOtmRjMzvgTsLb4506GyXt7bwunLNxXh7VNnZiB3lZ+X52ek6bG2ZW1CY3AXQoU2sDTATjM0ORRjDZTgxfoFtESMqvZrXX7XwCDYa63M7DufwSGWJT/Y+r5vmo3f1wmQlyUsGUlG2+VKPAVf1JIDFgM/k3Ar8WUlGbOhocDML2y3deIY7i58ysOnrqvyt521+utJz9QxvxkJp78vlsqejK4HgQIRSZX01m1eAr9GoqnRd6w34uOa1JMCTWxd6iDM7+g3sF47GC2968utf/OfmN/kn6t/FDzCGhTh6+nUEQnNEONf314yFDjGSmOIo7Gme6Iu/SGETTJ2AEIgLggkS9yw2mrxkSG+lUZHGiNG/EzoiRgRCIETlzYGbjdwLg0s9TrGtYEyiTtwyY0Na9V0y9tuEJAxiBT2EhjDEvSXruCKhgNlP1ALMbf8g/Ao/7ME89oJ+Wxzexe6FoslzLxcCBFOoSh72QSU8wCvTJTnFJbsmSSNxeqOE0wiI6EOcig/tr3l03Bq418V2v9sipsW1eKMfhJsBgHmA97IFHOX+4Vo+rZ/MhpPVrFy1WAxkxQj2Clt5+Fx7Fo6ty4kU4OhYE7jLwAZbFhuszYK3XYNa2BaLKpI14fYTHrCJatFvc7glXcKbeBWTWYNnZKpHkIY1F+FhyI4YT9Jx1zlCQYBQl/lanz+vadmAuh6kggxFgU94ghaDMjg60H+VAtNcS0jaBi/hHbZtIhlA3vjIMA3hmRcMIs60M4aHJFE82LL1p+c25qV70c9caZ+2TYFQkCwCpwnXB4aXKYSava4Q9cV86EG9Qmm5uVLQAeAhZy1IR/dMwoV9RPe3ZyXIoytftC/PEXrPeAFmGY5Gt7rLi/tg1+3HatPcLytT2Sqz4EOnXh/b1VksqrmZfevt27BQ32x3mV8/+/FhBEYSCnXIs8o+O2tjGd80s5Pq6OHLxZHu7UUMTQPwgmeoyhf6pPp0jNWzt31x3GbmyLarvDgZjF6u/Los/QBvgfBosGbwy8XRmc+LUJp8zcBsGRGf2s5mleuq/rQAw8jOsvwownLqmT8+yfeZfl0UJRCzqbplMLVd48RiHqzn3a4Z1zleFJX+8TatLCwxRPi8d1kbliuT1VpF4PoQjyJvFL1V0IyjEYp1g13nYZzpsYVH6RhjYzvgrAjGgdo2lsJ5NTHKIjBFSMqrj8157syTd8aEP3/3i//B9O7Pql/GDymJdH8zh2NnDmvKXpFXNGq8SdB0oq7lK3Mr4wJUZJ6YCP8ZhYXME3iRMgj1IIMBIIGyHHIyQlvmsnFx8IjmiAspByWYxO+FQO23CQWY2wEkYUfHSnJszIqFwd+c6IkEAfa8REmJDNUIEu0b9ZkiDlz4tO37tIO9pNyIFkxvSayGP/iyl9Byf5sgi5/k8SU0k0gNRwmYkLNt1H5CyylZxi0zlrtNn0Bspz4VVHyYh4sAggMsOsASgYmItt1LqLefknT3tym2vdgiULvh/iQ3qP7PKtox9BrusS7GejAB5hiCLYIzIQcBMA4YFM3ZxQo2X9o5bv9GwWZhdZoMBXc8B/w4Lm1CkaRkZK0gRNHHeZ2vyzFrVTl6YEak/4VN8X9mB+BxEddhOfpZy2iH+SgtyTCCiMCRI8Mq8VtBwtVxLD3pVKbJvZwvTeaMNSGln8BAQspq6WQIiAOtqoRcYXOAqkxQzZqYwM0GgiVMxDbGLIOt7IsSIULW0nKwGUnFRfkunL81tsDlsb7NEXCZuuyOuosY4efhkXGZrESHxFjPSI9nGdxP2i5zYC3Yua0H2+Tzy6wEVOLsc9B87ciXqmW9d/P4dHOZn6h+3Q4/7D8qbIlt4Eh1Hd3qSHehXvzMGAdQjcyG4yzrYr5aLWI2MG2GcOujMtsCCuKoGlyVzjUtAj5r8iwUw8z0s3XeahdaUxYxbmbWfvqbfYmb5Nevi1fOtOAtlaHnyDvX3jb38Be48942bcAvF1eLwgBUChNC9J298LzNCAsrYgfYIb6Up9nKfVwPvoAfCBFnrEzh7aYqztzqWTZvX1uPaDUzTVHD/dtj28bWxPNnS4eP2wpbcMAP8KispZcZ2zK4kKte4SaDYWe4fbggvJPwTj+qfV3OPCh8v173EciYqTw3PX+PN2Od6UyCZu86H7i0wJvLwHjqd3/xd8zPuJ9Xv1Vy7xLuMD1M/jGXOIhmyCSSpOCZ0RmmHI/EKWQujIEIRn6/vTKIM5KB3x14gnpIqSNJ9/BXQnYAdn5ggJUyOgQsgo58I7gnv5kiJ5ANCXkm1nMr2xGwIKwQwBKwXZErfeAuJJnOXSfmQcDb305wI6miKUN/Ax4yB7+5T0ERozNBjofrD5rM8IO6/1TN76dsOYJKVgbmY5gfq7C7EEoF4kYQF8R+uCdhSkTQ/KSfwzDhl+mPcNUjzYLhDO8H7BAw4+GOxRphKpmqonre+czRqEsabTTApJTqsMSECCIiWQYx6ryEzylhNdgmVg12kjFL/axYXDYvz8va5Oap0VVOu6VzmtEl6ZTDAEcJMHsuDtMyl+xVf86NNjX2kvu1WxmBkWh6xlTg/yVsvcutGV2PY4T5KFIpsHl8LJLkRBAEpk0yX9qeGXX+5VdfCjvRpsBfnCRWNuAqNzBq83+cDfEyNmflc51ff7MN+ywE4NY8HOnhlR1nIXsVByz6zOVvN+9zRC2ES8Bw9DX42liPmSNbDD5bOmw/2qbKC70p8vcxf+ZWPmA3w61berfPgQdSlnB+PSyJQKGpXQOLuik+NzXAQZu2PEVY1PnZGoeR66WBhWrGu3GM/tpn2ZFBEKZAxHQOIpbh1Jt2aNc+Lor6bfjhnmkj/fpFMB94t3nvGpjaos5H/8wNMGLfZvlCFXlk6GIRS4Hk2tJc1DNnF7PaysVSUnCIumzoPzJTxDbqrIWD8IiMW107OCUXWQXBXXC5H47PJT/yZ774u+aX3E+rn5xiHS7NRl3TtF9KxWYUw9Ujk79csIvrh+vFNb9LdpLcIem55GgfbiTpcM9caAqPEhP47h8puPGDu4udT7legYjFAK4ypoDJH3LDTL0ATE5TLmV/Ze4ed3kw61SjExfPzOmVxuv+o0QD8Plh2tg4lyw3OE4CCkYu5BuSNpaksuR+hSi807e7hEqHuhs/cai0Sa55ogwTwvBX79Qe+8IxCTu5u5aMMfFBfx2wvIlrW8DVlxFmRv8tRoxAtYcDjozQWWViNsPj9o6tphGy+ITFb1khgpXndLylwT3VfpaNwGhvj9u+gY3B7rGxgpSbmJFz684BGjTWB+lFoLMdVic2COUQJoGVHZREJ/gcjI2pGIJFGchsjQKRYcRj5EsMz4qqYKAPdqzpymFXRCVgD1iEMlws5BlyBFiPHoaOn9pytIO1Kx5VyLxhKQO0GRtCQOJ+GP7f61lct9sIC3K1tcdMouiyVjkCLWY85Jjq3pZ1McdRel484KQCkIFWgVBE0zDsKrQ9HUEoFmftPdDneJxnLNIY+vjy+ctl9rPMLDSf4lK21xtbLrNP1q9clbdWn/a/9X5rWZ2pynicl50pwHbgD1vVgriYCPf+rPqmdZXJ8wJkICz1t+t41AOqtDlz93fdondHAL6i7MLnzeur3qkT1YUq6DDMNsvKx338ieI+qm5tz63pl3H47PjE56U6mhk44MGxohb31Z3ceJCCJU4Pjnz5DWeL1jZFGcjyKgSPHXDkeEFEx2X3HtdB0TE0MWtK1sh/P2x6dL9P/Zr6B+ofwa6lsiLhQSL/4rZ3t6k2s99JgnMuRXM/UfVUnBnEAJMp4rf7lB8gYd/Bg7JgcggPUuEkiHtOKVD5WHJswzyZdwouHgs/UiKnoxfnL+xcDEdq5JJXZIVmJ8USMvNDiADfLrHIlCfZTQUXVnlYfQGoIM654LthvruH2/spH3L3XuOML7i3tKF7Bhq3BzNmAYcwdnc7ZTnN3e3FDoGFJDdBU97wcu2k5H57sPub6/T9dUoN398QKBfzBKNjSu7ypOH+eVn228BLgXfdLOY388O7JO6qtbk/BB44rDe6NosTky4m3iWnuBgHuZMPb4HDd3tQi8U9UwUsgpPaEleOwNSZBqMRMpsOK3VSBWG6XevK2lD5slWvmzkMK5P8okW0UggqYC219PRGqjuwa2yxMK7Skizx2B5xyiHOkTJ7gG3V2cusimC8KelRMhtBVGHhR/xZiktgs5n1LKxECX3J6osmIIpmWl8AB16TWBiYysBvI/h0TAlXj60BHuvO3XNbPBLL6he37BgQKSech55Xw1YNMaSE3TOpxx9YotesrUdGTogxxBEG5nAMybjEK7QoXZ5U+drOVNUwP1I8Y3DimG8mo2J5SDJJlAGATjmmj6Ru5mCu4A5/QePwzSZ+u/w5NdvEZs06Ey88t+89D8++iouQX3lQ8MbheAzikzruP/Ivq1CESp/vV3Of55f9KpYgG6zbPH1/su6D9VleX7y8jG9NVuVa522Iy2x43q5cwXNGvOMRXIXh22uLcNH36UYAAk9eAICyuDoeEEoGphwMwyNcQOd9ONPtoq63c/KHuclZh7Fd2XzS/qiZH4ftsfanVpLsgbltc9Yc7d1JNc+5EyU5Wd4ChfWzcvapUb3tX/6sck2lLjI/3Sv4r7zX+3U+xt7rJaIjyZPFYNRAjUcWgNhS0Na801aiY6xW3FOmrnJeeywVrifea9EzpLBRM4tVgfCAGmUdCWSpalzmzDu3wM3NdQimUrG1HwX9/QUcj/rOP565syt7vkcEZGtcVUWJBTYv5HPmvrRm9eAMfssvlVDv5n0Lh1dGK5zpxRe/Yf4N9x+qf4gfJCETjjUTLm8UA51R4C7xe+LmiWK96MTgtYdrQUV1qE/DY/It1wdaFLZTqhmREbZGzASgeW73jUK0tjuA027iUItrJoau7B2xD3ACgCcMnzLzPFGvxxLUvHYESeB8AhXEIok+TSWdhwO0S0bmSk/AvhMqhQORFPYQDnki0rJRHMR08LfiBe4fUhH7QYpXKaK6P6RrHv8Hzt5eabx4t01FrymjnLLddFVSV/fJeTTCrJLfkEqcvqGaCwdwsdviMgUvKL2fBFMPey8KBQR8N1LtEm6aSmfp37t7yQNJFgsgn3JNn9LFkMTu05v1u2b0sZaFCCwC7cLS88fV0eg8FnkKqVgYZRGeeBCkbqWWWC6+LWdNAJkITK/YwNib+hRmDwuqZwKifGWOwcsLx+9It8i1EEQQLWC5rpRsCLMiJi8ETzUDQcv6rAqhZWQXZoDtY5ARqVxHMUOAhGsyEcMg6Ccxil3B9CarNKAzPNaS6RUEgIMqnl4OEj0iRlGmbYyUoExKYxMTB2zKs/ZrmPTxqRTnsRVSPRx7YQSgaYgGUJOT/4V6kI3QnAnueaQVRZwN6BKCMTLIlNTW4OKRp+yTACrn4aqZz90LfRMQCZP8wUNoAWAmk0Ai3an782VWL7OLWM9ZdlMk1WuGVkaSam25smenTf/DNquDS7BfSPmQ28opF9jCccQSNwIhEu6Xj6EG+4brsXRhuHoNKHUewYFB/9amPXaAsTwWVuoIscrUxpiqDyFr7EzuHeAZYewqe/ai+0mfndSXoOlNi0CwdDls8U2Nm2DK4/msUp2btcsKQfFwZNY1GOPzquqFD5cA39jVxQtmzw1ili7E93ZcFlnGIPmkH+u1PlGD7rx74ffaPjdDLEFUcd2auN4/u65PwXpNEdSr2aUvcSH65/2safNZW3uG9YWny+u06gByelYNw3mG5QBohdM3oYRPKWfGZiByS5FAeUqbLmsW6Jmt1klKQj8YY2ZMW1VrEOZzeNfWLyzTRjq4vLRdv5p18Tx80tUgKitfg3rYHhcRAYlleg0hz5IOtMI9ck+eaPUvffFfmB9xf0z9svpLQFaYOWtdPnHQIWW852F7EYaJQgqp9Qe5ENAAaCGZaEk6PeaJEgqmNNRiIovDVJZnSJkI4l3CJqmhTSpQpnzku1TEur2aquoTg552/MiUydGmQ0v5LYoIDvuRAHJKFk25aEae9yl/TdHRdxHy9n5/qMwD2HhgH9SFVPSS0IrvuvBJq3Q4MH5z+0HNT9UtVVrXpyJbuk96qfeGqJawj6D+hjnupOMiTk/QD7h8uE8FTfEO8E7DzfVUP0gM14Ij31+EFBHcm5uaNNBXfcwypqQRzAmOaUlDszCVMe3DsNBMAGE9cUMskckiWK4DG4Gbn6k4W+5jRgMbGdl4BEeSg6Ra0XqR+YFAFQiZDPWcnbGFsNSRXLBwOXHaSPGdWIEISuigyyJX9/2sHW9XjFofupz8mCRXseyONcygVdRDAGBBtaTmxLHpvJD8GvFxBkah57kZbK0rzwSVZ2L+0g6d611R1sRJha8AkUBCjojOlQJ0PEGbsug4IoVD2nR340wys0HYNaAzI6OBozl2FmFj851fx8aXIJ8kbParal4eVd1yo87Ws+gR9xo/nC1rkwRRBQi5x4F2vsqCWRm7wrXC2c0tVmuBq9HB7jOdZ51pSnMLRrdsLILZwsyK3G9MGzfZql6WOhZY1HFj+uNc+3IcbFKPgkmXbhkWIPW+aqvWd6aaO9zvZ93QOJ85f2lLW9Xure5w+K6qdLG56SP1EHA1iujhddVVw5hnuu0Qkbs2L7NltixLRLB0AMDdzMRV+7rpO5Z1g8FvYj43nRuP6mObV7gRsQ956E6LMu4obMBZzo7aQre4E2bmciuyNHBdIx5vtPBU9Rjd3i3B+XJfxMBkCg8Gl/w0tus2ANNx50abU7HhVQ2i7pq8Hmc/AJzLjuqKNZmI+0QnVnEZx/Y7v+7KzBbForXVJbHr3/vir5ifBXb9AfVLT55czqd6vEjK97vt7uFA3KSKtUuJJkkx6QtBskOWill0L5xRhJhJFTnJC2p94e9u3zuJZm93h/CbRXy8cKXvUqXqjZKgmqmyK/1BCRV6EGm7mDor8Qha7Z3EsEmASITxd/uLu4e7R/nRQtLYxLMpUhX6BVzYexEZkfcBHoaL6ySRGIdDiu9RJsVP3CQgRhTBvD9PK8lMJ6mkpCWo0R9rPf8uaAPDUsg7Js3UxGUF8YewxYdNmLAUGLnd1cr8ZMTi3cxHgg2CHtCIxhVBdIpOhag6daQ1lX5cIBTVWglDKb+W8MZn/bE1xVHzBq8gmlb1ORaJcyGQ2zFolUVK/Z/LVEZROQGMggIGdlK8MqIS2Efqc5SPvtW+NqxgRSas4DGxRN3xkSttLYfhaitBB0ibp848FrRnZXIHepfeo2xvRY85U7ZJMiHWgbxIIYV77dsbyoQQYoluwTlhcFmWzbABEjLKHGwutNJTGJ+cuNa/zc9JKGG8ODESsKFYuELrrC6fGkSPxp635iv58ryfxSMmFXJQJn1jvxzDZo5o67NN3vnCYGFaMj/bN3l81hxf1NHg5SNdBmCB81VlVoB90ZP6c48Qu7DxtH++WbQ7WxKSCxj67DgrrstvHdsAHu2iHRHOBlt/3PezIStbswDB8q61phnjh44RrIN30brGHuqyDT11ii1T4Vm+I7YtSlyHwHWgQxNPbd7N+irrpBbPtIZiOpzQRO04vAapLQix2paAAj3gHgAkfIj9jMuhi66w5WftsxBK0qAK5wgHkIEesrDn8jgeNa+Xq7xcME5lxTUGh3h08WL28Rx3rvZlBWdg8tr87h9CaM9sf5mVOFyJKd998ff0P3H/gfqD1CneI1A5VYG29HYM14vr4WKQUj0oUQIFSRrdiAD7/pBon+jQiZ7EQpPFEjcG4UbphVPl04amGAuUKVnaJEiai1xpYMRa60ZdHLKBF9tUpbsy3xPKUaUkGurHvyRL7yZ2pA8p/Hkq1E3i7pQjZMcMAkufOmtEcX1QAwB/bveH4DRRovdqd5AiTOKj/cU29djciprpbhumwv19Sh0KiyPu4WLiMl2nRJ+eZJbEX/VfbbvuuF7pOcmIkAmXZH2xg8XA+lcIyIYm9LrVKvOVZXKCqlbP8jpICKyLGW1YSiCLCHCICVM80y6OhCgydnFBdH1eYjRCBP2SbbQv8vNTyl69pLVMMWPzBBNZVmRqtH9iDgJQiZcYf+mexwD0kZIfuIbsMFdxXJndZtYZd7QSgYFxrc+kkkfGZYWEGOmXQYSALTcUh2uR6imAFNNpVvV5lZdACssoEyfADorcZF2V1esCpyP0Dg7eRDPAer55tINVAr7yRSsRrhJZuSkWFmEJs26Sp8uw6zwHRWtHEI1jb2PDq4GtYPdaagG5ZfOO8Z06fVkddcugu23D4KPzsfVjnPWILr52Wjb908pnQL9xnp2GfW4ajziYzLLKdWzKcG6WIV9VS71kjGJMXaza+uIu+1IG8GIjBmjiJvpFHqpmvnBr15ZsC8GtNxHBWBwRPFnXdGU09Sa/furHzum1b9WyzaqqOMX1yN3saN7VwRQgqm44jXYWinxpqlD5ilV+p6IrfVczQT/u3gxvQouVUMzOroJqQhztRs/mICEErAqrIyt38ypbtCdFl2tqFKhULQIBLoPxNnFDjPhXv/hvzF90/4o6xQ8XjEL8lqxgkgON8n9KQIf0I+zthm6YeiC4X9IKGuOxktDlEJosUp+FRCFT5PFBTZ42peuZrVF30iQnxT1pjZinhMpktmA679R1kgnjhYfvJQ1hFFxh68jNNTMsMEl2fn23dw2oYhM7wg4u/HfBZJIW8/+Ha3aXYCv8+eaDAkakJj0RV3r9eYP7kenavJ77ggsEvrJ2iGBtASYcSe+kmwo2HEjuuehSE4MzOZePNu28OHrTH1XgjqxlsdS1KAosQUp8Gn3Vn5+wwgYXRuUabrWDxWs3xNYyA8/cjUI4A4/govdB+8EO0kzBejs1N0wlF7rkzp2tY2NEGajZdcHQAM7d9XCANh+Nx8rKFKAJkfIqKxwphJyBidRClyQKmfvq8922tX9PgyUrVVQ2K7ImdDr+5otPXn0yK85XbuhnKi9iLDNQWIpajK7VEkQoxM2K5UgcTKG6aPvi+xe/q+byuGl9cTTk2XO4cV8X9eJu/DA89/Pydv75Sdc3+lWzOaHz0mzI0AF2d+R/cL74dv2c2Smju9JdvZoVn+jwzB4fdWV7ed32i+cMhcokL8pmLQVSUmQwpCMUKWa61zjSJHFg3Q7MBey8VK4iW3ODcnVJ6C3pXxmfUZugV1TPUVJu90ffZ9TXT7qn1NFp9We++Nv20v3b6pfUf8xM7QJeMFzTBqYvL9V2HMaNqhVcCQN0StL2UqoSN0kCC+eYEpGjNPwEKjrkl4vvbWQ6VLXvGLpPPktytXe3SVI7NZXKCmeOEz5QjFJYffKRbCgQlzxxXknobuibp3L5/WOO9k6idXpJMnkzuco7UclhdwNT0VcHdv+Yjd3ycKQb6z5x/wfZ1qGxQLO2P9X/afo4wXC7346NuhtE5TdQX0fScar2A3vDEueQr7xaFxIrXE9G/ZgdoVz6XgWqfnd329RwJkWxURR3lAk/3Mzxj20HBF/SCumogGNEXJH7uo3P29L5n0bozXsNYwBjLo2LLf0a1kouBW3q50zQi3xn3NnT0/6eRoH17UsfK6lVSxICXo52hP3MsFrZgBkYuFsWgjJqzfi+DGQ9MKMRy4fq0kegQR4LKc8LG5eSTi5dTvC9LJhHrlks48BWUvxpcSyekQVoL8JVvc4NuGrRLtQ6VqbCgdmTq03nWl0Fpk+5XYqA2a/lck+lMtDLwcL8gkIeWKmEIla9Xo2uqILtQ5apnrKcIxDj6jvfKL1NvAHHBTh7Oh8GX85ixPbgVv6ET7gjIpA6MN1sJbb3Bib2e2aDeWkKU1Ko0hyt+5f7by2Nyd5mq+VmaStcAhMj1TSigw7uyC+btdbrtsd1iWxHsGVfLPX5eb1VvgfJoJAxA09v7UDVgo77IW8i7lYwc+tHh0V22q/LijCcS6Ka1xPIlhe4UPCXCMvbFy6uLIKoronuJCLE+KwMA4KZUNl5YwqfWXebr31TmnMgo58rrCGsg6JsPjcvwkfdRVsUXB+gbfD1xy/Uy5hfZbo09SsXL+NM7pJeLtxTxFsLjaNuwLXsUMzB+rq8JYfoPAAS8ZzakKYwxrL+qlgN9sKHI90WJBpDrMHocFS2V5QjsB/lZwFnbcA10zMdjrq5dWPLkAPRxkI6sJLe93/94q+YP+3+uPrv8UMqJU+cm+IxycJNUfWQsObwZ3zsZ+efm0PY7feEEv76QspIUnzes11btLr71Lgd/MMtxa1sP7jdT5LfVH7hnuaTcPfQsJ2OSjBrkheLMIhlIQCL3qdEx93uIoERcWbqXn9IqQ4hD9skENQTs6hFkjxOlTAi3zyJdvapMP+ZYpFIuuWnRiOgnehq7ve7QwO9NDXdscSzP0wBEOGxbOb2jeA3kB9nTa0zX97t7x4S8u4udklsSExN4rz99wgHp1L/u+/RCEsbw/3U+yAFpH1qWOAp3ydKgxA55K42bWYEagzrRFpS5oj+Wh9XBejEqOebBiyx9GqO6LxWeqGrplhKeiKXIIKSHwkM2DwuFfMgVXkl2qBqDK2E8qkqykJTzZ5JkfFEZj0kgsDCZUV9tUKUrUn3GawwAcZeRTAOvjFmtW0amoIkb8E7yswmBRKjkTw5adMAt5q3vW1n0pPFtyZxgZGMKWlO5r0pwAvguBHZ40BKqtTY0aTTUTFNnIoIHiAbpJlKDgQvsc6DzZQN4Bb4NdN6rmLMG+deR5au3Mz1CKDtcTuvnQfaWsqo8U3Qtu/cL2p2WxTzsHJPKz2yyrVmsdvyJPFWoF8X2/bq1r0qfMDpZctq80P1V6MqpOXbe4b2ZWOPSwROzQj6hoMqLiute26lXYU44JjgQXIEL6dh7nDGdrWvXzY1whBYRMhaa066YVbbszjGxfykKn6LDdfu6tJvXrg5zgHYNSzds67NgPkuD50ZyuwMpx5cvXDBqptYX0Rg4h5+4qE5OyZqqKwyr2P1amsRoTRmmPWqzeqP2nWsHdNPfexCLNpuLPSiXJ9xdIGliMPHLNrn1y5ch7YVomWfOTVma1C4+WpYMKFTj/r85MXTm9K8bsrbl8zOzhaqanMQQw2XAgpdq67zLKR7xob9MS5TI64GER7eUdsZ1oPHMfLFjunibvt6v+59712/jvqF818aVgaUeKHBmTXWu45hGWNT+xZ4iwVvisLaUBeIc37gi79t/qj7GfVfC2sDylB0e5N6qyY1EqvRU3WcusFFuB7DDeW1O4pi8K7rQ/Jy0uEk7CSOGSqT1UuVykTD/FTdz0/1nDJdYiejqv1dDazaSfdS+vNODV5Uw6kuzk4CkQhe7LaSPr3YHXSLCYMF4mo1VZ/ksAc/FYMOcdc0IeQgLxRxccrNCtRc7CYOKTM/7qbu0NS8JeNKBLiSbHH/MNJPPCongfzv9AXIFvZ9vWDHP8guLtT2UcOVGkrfqaRn3G3FDSQ+Nima0zgRPRXO2fMhkslUPaKruNg/3NzupW/0g7q92D3cTerre/0aKIDFwqJNIRkPttGkLmy2IlPlTAkwK7M6ygwFq2/U0i9YaNcI4gkphWgvFAIKkc9o3QyGKTlOzXDSGWqqLG9Su0IhmYlUeTJpcAfxiEQsVY9zCgdB8hCbO9MDAWGIvsVXVp966TJwda3dBndp01DFDHRafHj1tHRnR2B4XvTU4H2inwEY0YHjUCr2FJoU1hDbAhCh96LNZLKTXUB5ZFXUSUGMEQuiURJKZl6w0YabjSRiLkx5WG8L7szz9azSMzukCSQwFDYGENgnIbiSMhSTo511P8WMNtVgpeiUSmqftapYiVLUPwWqbmDUTNOQwC4k5dmp1i/a5drM89cn6njB9MaRQCvbJNfRZDn4o2YHCrNWMwBG4TJRIyP0oyZKcapKYG+9YnQs9JDNu73vtJ2FnMk10zPJxKa3Ps7szLetnc/zV6/GO19i3ZaLobj67NS0lT3JsrxcnOcxXyM6zcqVX6dxKm1dHtc5gJz3jGvG69UGvM0tcM1drjKiS30c2nL1JTesfDZWiMDVs03BbIrfAObpgooX+Vi12byuq0hNNiJ4nAPWU7Bl630Dnq2LxpS+0i+1esb7zA4tHCMpvo/rszN37Mpo69bMM52ZUhfsqKVoqlzBXzhHRqlzaUGBX8b2camBlMzhfPTFf2d+zv1b6leot2YO5PZ+XMzvFkxrLIbrhzRmAyZ7ww6Cu4dBgCl1aR3rYR8Wo2RN2ao+TNUWbmCaabG4uX/gkI7F9jM1RX43bFadCsX3k9goNWxMc5P8Y11bXmWtfSqPH8glyV54bFnX/nuE3LdXJu1md7s7zOI4FLjvUhQ8tbAehjRNpPImJawGfxjVQXwiT709pGmF5n1vH5eMwLibEsIIdbe7lPmVLjHZ/RslvR+PGJawiyRwLx3uOyq7774nrbwXGejNB3UtJXimsNjdAUBnq9ctaBAzmDrNnDAuKQ1FKAkGJZoUSQTjP6dBHnTjzjbPs+1KOhlgmcyiRqrfjAPecK1kVMuJII6dn2HjOh/3AevTNIj7pB4FtOhoNV6nxkREvKYCR+oRxGYcbLSosqxpXo2CCaYgsjB0ghN1LkmJkv1JaatIE36k2JVSLkBQF6j+sDEPpWrLi6ZxvmZ/kDRtqspIZVWnD1H5ByZbSGtYxv78CmdhX3phf6VZtf25/4iRK9VJTvJsotFjgtxZaUARQanwQWENuFx/wk5dtpLTwk45KQj7jSxfiQTQyXAkxUw5rI6x9sKusNMGhOQi+0H7CQVYir1PSsYzUTpjOJspAGNE/ydSFUTQdpbDXp/HpyE7FlF9rMrBgjQGnR2542zZRepPEaTWWfjGanHnh2/ub/rm+Q9dkLCd+GLTrhdlNcsvjUz1cSMYT5nPy/3586Xe5I3wb6C1r56NT4u7bK6H9VEo8+Iqiy3nG5gKnLFbHi/O3G0Y6xyBLHBr7Cs4yHnRjixVwrXkdm7sEKptsy82IQyAfNParJoN7fOrAU4HDqvVl6sceOr39bvTly2IWVUo9Tb+M7MxNy12hCBhEVsVbcteUyODqFgFZG6SjFmUZN4md+ymUSxYHNSO/0df/DXzy+5PPvlrT/6hKoBRJylgAymqpz6sEx0kkJpLbwUVjBTqgCCINJuia1KJVJ2myTHaTLPSmOBKRSzJuKVWSlj1laad8nsaurSUi9XbhykxfYi6rm9TkBqY3nqvT5SgyjGYyEFYDnpHaHKLK+kM5RFMyDOVk9OEoRN9+djZ+oaglEBukNkZ3Kf5M1w3Mx295GURojRlVrM2lHeiMjEBZEFCLMT1far+EhAA+7X3RQrgDKM63IDT+aKpxgrGyloP9SRZDddjOxm6sNSdo5H6tQkvbHbml70bpChDI2TbE8jys8+fM6fg6MitqLLAPI43G2v33Xf+HXYnwj4iewB1KtvU1kpGOSvUWwv37EKhaa1gH9LUxHE6dKGIkv5T83G3WCIYUUImKrz31DUv2stv/sziNz2s4y/kW5ddlnaZI7xR7bpb5c4XAKQsmNs3xacP/3z3Q9kL3fzu8+6D/+Zy9eNVvqgR1ncvXbfdrXYz3/mjV2Z7stuWzXG/q5bvvi9/t365YvfqUaca386qz+tXofjRcs2uN3ar0Gbsyj7/lv4txdnrrT+53nfNrij7u1lcEoAJkHGsmp/+6JeArDPbttly6T4aZSAQ29uiHSObQgLYQh6/v4GHbyn0YJOWrcpyNl+ZRZ5VuBsgT4s0M+tfw/r/pvtj6o/QP0sp8/76get0vx39NmzvLqT5eYvlDn69l6zxFsuZoUcA4d7tL8JW0ruHIudUmT1Vw8V2sqVDF+P3iBtkChaTrEnpIFqy78YTAwW4YkRDas8YRHPmp5k0mvOtmAe5TenhqaX6g3QW8kMpWc10i3Rx3VxPE2d21IowrrljS8fUS5WaMhKX39++P0ynuN+9lAFf93d75nInKdvtoUybhlZohhe7g7TljU790tuUz2HTiAQ29Ln3ODyZ/vdwnThCmpG1078gzQKJ3QaZTyWSUyv+IpUfyfLFqflG9IYGVB6eUdFm2GVXGx1KVwFAwac9ZWG6Jm8Uel2oUFADxMwBowmRk9nU9yy1VJJxNmIejXPg7CpJWAGclu3S+UinBz+XsYfA+Nz3xp+a8bI9Bk2ts4+0lGV9ACUWRQgtcuq+4FQrXYg/FlUY/ELQkm5mjojd0rIv+r1I7WxlOfeITdYytIoyJeVKqkPICyQLKcjNgxGxr/uWDJlgZSjN3VHJ4Ypuk7xARtRIEczpDJex0VVz7FvTEW6KmY0bwyYS39lsZtr9WXYxBlcZ1bQO7k1nu/DUSqAVHGACQUd1/kn+rLtx8aNZ897F+/J5bdscKNPhA7FAKFIb12RVswAoDvGjyBFcx76cjW3+sj3dLm5VgVtT90/Hk+cxvChyRApAF1/a0JRxgchHMqm21DN8C660M82yaeB+T118UxfB1fvheFszMowqGtyE+EYayPxocX/JcIzLAN96Pndj7Y7sYM1VFkovvfQu8XvNyCpy/IwtakvzZBiQslKM5PrxiX7yg1/8L+b/cf+6eq2+9OTJ5YlK6cRaTa1KKUV5FWQ+ZvAfDamMI9afCDUQoFGSMPXsvpoyibvDFDsxm4eko6ctbMPEm5mYxJftMTGEdvpOpRmcnKDy4bFMupDYQQbhUZiVKrdp9gpI8vuYDDn1NxPZbul0EwMGbEgCYkIbL6M/U9pZCwb5CbXSsCv/3SYufi/tmfJRM09ul8HE3PyDInNlddy9bDgeqVTVWXv01R7LPAhP8/NOIkb6tYG5ScRhq/uzuoYDzIIIu09OfVFLY0aAI1NP6wjLsF0cFmsLbG/Lxj/vTl8VJSm1L8pYyoCVwGSerxHFh8JVsSjzEYYgelJdb2b+qSSqQhkd1ehPZYZjaPRgitSuQxlHG9uhPD152qwM43xd+XaxGT+uw619v3qawcTKwhZRkmCp6JLpImtlCBlHL2T35afh+O+J8r071/f/Y7jM1uv8xXYEe94cZctzW6y7zcsgQnmwDVd8dgfrL9QVp630fyiue3fOju6ji9JTaAvuCcIKFjdnAmEGali7oqjnPvzxsyL7A+3vWo7L4vOrdx8Q6A7lKq58HmWODStfmfB5RRE2y11BsfblQoAPdrHHheKo0nrmGst+QsQpTT1s2s/LOakCrgsuqrO1W3L6KQ64Kv1p+dQUQznEi1l3DeYMm3VxPouX9nl3Frql+4XjePVCm1mWtS1vOYCyGvpPNktTVK5tfLWL3+9a/S+YNmt+CvehcubPsm7bfvG3zG9zf0idqZfTlIHF43C1C1JHLvr72xs4k7vbFLTu0sSA22msmUgZZOYtdQVbP2mO0jiSUYqPSew01Uw8a7usEdySXnJkmgwHeFQ1PRpiqgVPK/8wSngnQeteKq5iVQ+HUQNSb30vis9bCUUvHvtd6FcP2TZWOGScpcwVWJxYVlr9NDxK9jaNmxJn7xb371QKz823QsWkbsEwJ9c1ZTCiaWM7IC3K5krqB8HC90Qv4B+Z7JDRE3SCBUMC9t9lueKoENhXy1aRo7I/eR4yVUnQGVkeBYyqmTaFdSBglwW8rClLmOlAPxqwfRPmR8OJIRYrsOM85DC3VoVKS6GUuSEez3DjLgJnYdiZsc2lOqnn31wzXjZZVTZfrvI2pvQdnaQXNSD+u7gcOzPHKVWA6u980tkmB2D8xvm3+y9Z01jfKZx+aizWLNd2pOm+YKqrNzD1zGbKFmPI3rZv2RTCfkHwZLJxVvW+Hstf0Fez5kear/iTpn56bPb74eTEl/OQL2zxyepKf8hmLXsy9CI7Kta+e7bTR2ER/E1WZFr/9vnHsXftLPbFR+cfxT47qi9dOUekp1Vf5m/DPDOL4JbudfXiI5e/eKXbF7E7qYdvF/nlSpoL34YP+ekct9FeDrm7MPnqS+pFPa+KwDAQl/CoPWpmPvq6AvLhloNMZAVnNALuQr/o92FjIzCxOprHja8yzv9UooX4ddjU73SfqpfqLaM6LcNykrT/0JMmTQq3yU6SHjmJHThdkFaW5pvht+Zi/091jkmfBA0w5cX3nIWYxv0sxo0KqSuDMqCpZeKFkX6z9weJj3w0zeOYUusilXivLg7T0Chl2PvvDga4WAwX47Uk7LejTwrmaVQHh13LqMPHqdg34lrvHguG0gjxAL8c2I43yPkyRUUt5PhoZgrMfT+Yz01b4ts8L3QWZD4zxxO6wpWgfyyMaWGdacEivgra1d5R6FdyEOmZYmJw5q20LhraITOzMqjHk43KqBv23npznOlI3ZCDwfiaLFCq7ZTXsSmKs6pp0SbJAqVDC3SqYkeFprjfirgwU8WClFFUwKoJPr+IOSzIreLHMWr4Ewa1dH/Gvur2O64dOSiZZOU0+8KWwR1n5eY7/xlg4v+ur7ZMlOjL/vZk+HoZ+iws4smCTXDpQDj45jQ7CZ/Xp1nz+kN5XzcZKLW/yMJxJSxY9wDYszqrXNnavD3Jqsv4pcF9pWw+Vad6FhEa1tuzqnMv7Y9kYXbZUZJl7XxZH/uZDECbr/1lQLxaVH72A33zKovHVbbMnn9l8W2Pq0VTiAiVTd4YOJ45wsf1N55mZ5vGbRhkwsfmzIotYosrAoL3XJ2ev6iPvr/pXf5yXb3OhlUx13lbuAFhQgMGWupyWQ1Lo16Ooy5xxzjxWana50E3iMKFxPvs/f/3v3vbp77QPwgb+2H3R9SfYuy4SLFXGCZ9zHYPI3i4Yfv3lLWdT4V2rMCLqTFpymIckrKpbhRSPrbWUzQnEaJ/7NOR8Tvi32RExXv9gaHUfZpkwQ6iadT8lEJlviPlfg89UgO7/9MY5mkEgIwXlR8OI3UO3UlCVm/TrNL7aeqnzHu/u1KpCv6A8HDqYno02VEkwJzzd7e7YBS9SULEU4XdXI/smQ0v1faNepyJz8al3cNj9el2MudpxD2Y6oPI/6/vP0ydn7xcknCaxgjd69c3C6ntyPhMtrHrvNJzswnP8rWUhHNyNvw6U52MfzOuYpenpHdwdwG5ATyIszxzx3Z0mCnsMWa899FUs1g9lD22BCdZSgzHD1lOjMtE7F+buZUjkBxq6oUEbLB4LLIhk1fPji/Ot0Ftynwh/UGcXMpZGoUcAoK+lu1JFAazRMFwDxGnlhkynH7BA5E0nryd+2RfC90v2Db7mWQ0gLwjOl1y5pdlzpTsQi2o24e3bAI/pHJpRGU2mzATCoMIzv2ivvDrUdmmBf4EdgWCBeA/vcpVYbb5Vb5t4qfabViX0m0T7ZvqWr8JX6uLnC1VhUIszhSan7ddDJvsNmYhN80cVnac9R+aNjs1oUWQaDOzwkUdI9xa6OGQi1z3liJOE6l4sHEWzvrjFci7XerW6LH4LIayb2xDJSDuUmQ7+lKrbX3y1M10NuQeJATIHILLVlQqhSMflgRT6dQQFZd3Oi99v32r/FsCsjOlLf+5+mKZ9cdly3bPInvm1u51h9fyLqz8qIpQ4DpKHkIKa0GU3dIeUnpd9bi7ma5Aal1BLzB+Mn9TggHluIcrby98c1K8jjkO2Z+yKIrbuKwpGfVLdop7G3K9sOGJVd/64r81f979s+pj9TvVv6j+NFDl6jBH00wtOtcP9+PwcL24T8JYhm338yQd/G4zzvxxGKCXGRR+vjjUpKeazpB6x6VZnUmoJP1N0JUmnPvhVIYkp0r6IKw3qYEfU0AiEWZi55AwOny9w0Lj5kLS+3ADstP5QfMociOJMhkaH7rR/6lU1F0SLiaaPb2UuqqkKD8dwGGU2CVblGQQhzySgjOLiBnTNLGrJMAUEGIKDxeDs1BwHNeL6xNzTX3xtVxPyWCn8jyOWP00mJfdrGY/FjmWXJsWVNk0WHrr6DOYPrw33WjOxhBOzoxxc7TDCjUdoqDQ9L4AgJj8kuobK4DgqiN4Oqn6MqvLpEPMOJFdHH7JUvMijhW7Ies3jRvbbFRplGjImbqixZs0sJxrukg94iGLwbCeU7PbRRJk7CLXRjJMSyBMN4s1LJ6if8s2G+k4MHnBbnZHtp0535mMw04pmAyDFXfPQQyB3emrPO+M+j5sNJKpcIJIafqcRTGQ/iXYams2PpNuJgCe/mPsOv8WLh/njYB+aA841cxtRybLomkReP5hdzFyhM9tZUU0kMlURBbcwYoIPxLbp67NWjd+06x72+uujCsO6QmIbktd57+z/9LZs3VfR478Ypqt4QQKG+Z6cVzUL7y/Lt5QObrYOH/sh6yFkeeBfUijzwbFESQmIDKYcYyZJrPQvm7q+LUqtJnJ89JxHj07MLDddSgBBrbuZq1/la1B0X3jhko1zQhKhMXGLq4M2IvTdW+96Tppg4WPUK7NTX7aB1+6hSqr/nz1URiwpvSgLsJZVn1SPnPPR98Yn81C8ROLHTCl1Pl+PArnJpyG7BSsJeZJ0arlvyKceT3vFSd1IJoCDVy5zsU/xTCH9bDuD8sUUjqEHnF+X8Vlp1K7yaI4IYc5/eKPmtH9fnWBH94r9g+cKiaT5qIdlOG+U8b5u4XhIZDOeOaz4cXxWpLhTXMmOBj0IJRmdSZNLj+RqV+NSlWlx4gY7OSlAlMgAZBpeqKpu51SVmLbnAm8ffPY4UOq85AGfj7Wig9JNeqkr++kzH6qUm2dObsg0/mSfCZp+tgrfcEpQ49J7TRR4krfpWYheSDQB3V9xwQdFS6n5azOQDll3ci4mjyLIkbOB3eeD8CAIJlb6eDx5XPGmTPJafmo6uhmlPRSrRdkBI1p6agjE9najCsASHjTjfN56xt9KQ+y6ZZnu8VSm+1sObe6s0XNIohWLTX4fuYoqhI+48va5aMr92HgABmsTrhTySEzd5tltIkiiLrP67is5prjaF1tqlxIjB3ZtisPy0FM3lppdZ4mg8IiS9+538uO7Gia1zn79DrjwilbeDRH4HT16rx/dm+3nX+h8/woy39/9mXNsvVTZ+vB+CKobFvvVDh2bWkr2/OZP/PZ19Zh4VZH2d5kwwl8IGJqxAFd02K7z/nQHhXnczCYTHtX4Ipw5T/VX573P+9eXL1Yf6zLLG8WxcKt1+GEmWZF2fDLcgmLMCBtzPCKPIi9AiYDq2Pc7PqYLw2vYNsuxty8iC0byWVOc5GLrcj0DucHWwGdOSqCoCSPwdBDyDa+o4b3L3/xN8zfdX9Q/XWpG323yz+1zT8+KQT/T08CGA72wJG913eToPVmIR4Vy1U856mac/avUPSFdCOEFBLU6vE5KUm4q2V6905Ks5It82lAwhjSZIJapcFfUkaaOP7jeHLp9E1NesMho7WXglAa9Ds9l+CxaXk3TSSnP/ePubHvfeZBeuTB1WPtNs3wur+ZMt4SmtxOArbkzdMgXpO6hw5kRQS5W38hrQvYO0KIh93FlcG2LnhgdNOcSDiJ46Y+ZIQF05jxNLfzvbqZhnVJbvz+Lg1ekGYKpuTeqS2ORP+bOiv45CaYbTlqTlt1fFZFnfdJypOxjc/Cw7hw6demKagggP3JDFsOvZxx1geIK4vrohehcEFPAAHLa1zlunnHIQOmhOu3k2hMOlvBI5242A7g7VmAIhPn5zhrRWZRKW8Xue1nG6caokXhpVsR9gfu3Yn6TeRyLA65UIvSn3NeOJZcwYcgsG3OsPdxOzi2HFWF5XAlSRQE0FpSWY6p5jNbCo5JkcdEifrFUUWBHdU//GN9FsvSHvFZW5x1bAUyZOCf7tQATqLmA2d81aou66rI6ll2VZ4gxKnMKrS6pR4Gny6d9DlK7w5TcSbCzbqF932dUc2CoCpT4DhwW8Hlp7MluyRA9r20bgSmwxRYwsKXZvayjRWFDW1EeBE0QhNQH7ZrhMCni+A6yJiKMsYZfLJtZ571sqCPGrcIs6qqQwv/iOhFMjO46Yv4mVN3vutW5epUk8A9y+y4qDZA8Coreu2yyJnw+RjjMttdyoiIeQ7e7+vwangGkMj1Z4P28wJkqLbtcXV3GkYQ/LefBnsW6ItxwC1IjT+Ow6J35iTWY/upXbl6mLlFsc5myyyv8rq3cOngHJQUU1VIIvPqdd/0rhEHAmZ0aW/10W9uXtUNO1o4y6o7f97/vsqaE9txSLw0WTHhU1OEDqzXMrdZHpvldKPrGFatCVi5Ja6Czvt4nX9i1zhKJRjJexNNziTTAnyMc7p6mysD3PvHX/yX5h+4v8BZ5f1wUOdKRYhANEUhJylyJ/6J+DbJ/5NENgyHRAZw6GEy1bvb9OAlceRJnv8gSl7NKQNDgof5oWf5MNrpPtWe0pP7OCZwd3gAH3mDzNtF4ESOkbz+B/Ve3+4uAj8l6c2GYYY0FPJ7VhdYrktYNIi6uFEHVW7YAohSR6R0O9zcX0tPI89UcGYn88OlFCZDf1Owc3iQxDTHQCYohPkN0OlxhoI0QWzD4yO59lt6BHIgCt52F9KJraf64Y7V9Ot7zg4jMRJwlkPeX9wyaBn+Lw7clDpyYWRUfWYo6QQrXEW/NHXNOjR5v86DPBGOIi/OMt9Uca70zIzSNVDInHBNObmC/1Wg+k3wfUb7csVlSP2UsAqZ8UcL5WwUeHfWsbG8/djOzY0dRSRb+O7ILWe4tgwF2IcJ0Ko4nlg6AThyOpPpDPOsCnxeCZ/TpO2Qbylw0hx+FzitpeUI/UMa0omYhW0Dx/7yuG6F4sgYdB0CswP4EJ+CYGWQodVNPOLk1HXZh4otFwWzr+qgIsZbOD204TVLAyLOc9kPl79U39kWYf9yHG2R2dU690v2F0cQMZCuuuuZ9z31RTG8sMV6dvqLw0dl3pgimh8bFn8pZF++rMJVW81bTt9eY/ejVWA353rz7GG9e/biql2Ubz7/Uj5chLfPXXhbzn6qD9+O8UVR9SrH6XBywgxgF4vl5uobq93X69F+NdrXz1xYHlFq96OL+ms/oj9Wvo5H7uv1C/YVm+P+bAXWpWtVZDZ1X8qg98pXvciUjxUilQLEpvCwdb9x7iQ8hQfKz+HLXl0+HWIa1ahz6nnDEVskjGgLSsdBpjNftQEMJ9frdQ7iJcNcY3oEGIfYRj3N/WKbie5N6XMZHWT1l36wOJdHfyVpGvMpqtKLgSN3ADZBngEW4WuLgurCKM/pQMQX4BaTduebX/wt/Tfcter53IRDB+YbtZhm+cJ2t2kCMAGE0zXTIwPAAUCQtgfJ/piap0VoM2cKhOTrVA3y8IVUAlkk1epIKZxP4h5GMoJForu5v5MhxSwHpubl2w9qwrn0HBgZUDXNM5EMcKo5PHCWQrLi9KCm3eNDQZOg7TD8gCHLB5Xw7PBemWz6sE/fah6I/iOKg6+J/bMq2rMiLuC2huNsTzWMNNWmZ5QhRmgpc1AVohje7T7KiEXbMGfAJ8xJYpC3qeC8YMrY4PDL1sQ0+c01+XzOEXjy6B6Sa5YrvMzaYQqBYld5loY/9dXTmnpQ+J611KY5x9FIAZ16V1YBqRPnLADELTrIA1RkyeBI4O96yzI+wuqf5SMQYzm6RczKurSfVCTqpig/Wbi+aILrQys9QpTtUKSrPSL6gAW0yZ9lqxO961dGLxdP23eRAtcXbsU8hjzf8ww8K3MXKqNmgLMX9s31+99iuyWOqa3BTMx8A5OoTf4DNv8sZK/1Sbluu/K4nge1OvYLO4ydrzjS1Lg6e3tx4uoFOEFZFvGo6IfCFSXBfWWqE1eC1dz6vjxmGwynIhhTqUrVdbY9ucrO+Cw/yRdRyfb0yNu2018eFi/jpb7wz+szhi+xyVZ8Pu6Tv//Fb5jf476m/mX176u/yur542Mx6BXMnA/ETU/Inebyp+bmQaYDD9O43uupzPb4+A5h9NsrfSvl7e+Z/jEJtaWNZRri+45dyGk53qYU4PDd+cIp+J/7x0k/HESyS/P2D9MHGLanOgA+PT21ceq5ud9x5setPNTj9lAx5G/DdsogpF0e6h2Hyf23nDtwc39o3bu5lloCh4I8dvpNjzW7kyefyMC31MUsHYopdhJ8OFU3J/ZahiJJ7+Jj++U1S/NkPjuphcgTSfwuVWbAFVLMtMX2Lu9TTHWR5ngmVe314TBwfc1PZ3xyjDHrbCi+FaxE6qsiuiBxBJtnJO9HYTWHsvJRHjrjqAO23pn5SSw9R3LDFRdORpvK8BG2zrOeTlk1aHvmpf+34FNslZfpvRxpw7ADNoywGOyPpve8GJwku71M9jW1jzIqWJ5iQitPk0q0KEptejQbE402lSgzKrzb6pubj+nkDdVrJs+zvjHrvJBODnCAogPSqKVppDOMyTUZkY4T1kVVfiVb3Qx8qi1chtQ7alVVNnJEdt0wxwKQaOB6tgE7zvrvjIQ5rWdKCiqRz01jFJDJWGPq3qL6m5IQqXXHooqecyi+t7n7VHJjuAy/Ui4zPhSucTk4gA5l1uhF+9nS4jNZ6cs5blCdD6v7JQDOleDdnrp3ePxuyHZNs9J15vPZvnm64nyoggLcWQPm4mufs0iMmEYP3hbl0h+vtWiMSzizKlbdR3zCLUKWM/1p7c+q4mlRXpY33jTOPm/u7Lyt+mw5X7vzogHhiJdhsLYL2RJ3JfBhJu7Z7CvPjtOzd0PEwrALV0c6VlMXHOfMUcIA8pA/hYO1iB4HAKU+9pmLTVsB8U5dXxld4+bIU1bKRsOXM4wiO5JZ0LnUVaT0qxvOFM05qiJ/1pP3lKt3lsKQnkOzomVrVg46wSmdLjVoqbxxs0Xtiv5jlsqwcr8S5FlU0swFBkXnL5JFmWQt5IEJclI3+IRuw6l/01MB06yH3/7FXzf/m/ttetBfJ+pNOp1DU94o3tUfnikwvT7IjLH0XKQxiRSGRJ7nafJa+v5YXR8aXgQj0lM/pmeq3YGQ7+VBt1Oa8vCocD6IYBqpwpcnKY8IfyUxKvtLcjsZpsvEC+IEyfRMT2LaifJox2fFPlxPUc7tBcur6dEjUw/id59PcnfQ/kqVlAj2OC3pcbzk3fTchi01ijsvHdkCTi/VpMr4nt6ZqRlxcQ3Uu2em5VhdsCn9UVE/dVwLjeInLB+Pci9PiXivDuh6u99OD45LT1dQOIrbq8OcORzmTYLahYzYGFP6aDHcXB9r+IJjvd3fbo/pFqZO8bvADYXD9Di8sB8RxO1TUYtZ3qkCBsb1QEn2rciuZGLmdhJgq5QeS8+0kBwVnIX5+yy4tOfuNuRLLsEwaYJFP1spzo1RhaxgyZmTt+gjwotPomQGyuZpXjsO8GFqwwR51Crl+hnHkbdWckU2yXBl/IK0u1zoc89Cspdx1vJoBqnDcPKcjH0Byi6OQhefrl44P1xGNoKAZVjgJraS8WF4QBc+XWUONseONm6i5gdl1LdNbUzcsDwrijWqivVqzmXDsVa2mR8zBMz5+Ml5e3phR83haYNtOLDXc0QOcYlZF1dyGjAiKMpW4jKG1obSSnujyeGSsk16OpOdEfGjUrlPaluJBmTSsDP1GPKw9bXtVOHllBlgWhEtHx7XK+UjSQ/JJbZ2PdZtPivNUTzVJ8YOX+PbOGWCOSjLyb+kkhlbDUW6a/xsXJqKI7qi+bMRF0yVw7nfx4wdpkZvJEz1+XLb+qwMG38O15PH6oz9BQizWM8DZj/b2E2RFZ3v5Sk4pXOxA5iW7bDY40ZhCbpt1oXQ2x5XwrMxxuYzH/PjD1SimnwJzC+XQBr2aLCXaslnRdA9mjf6fhmrUiMkD8z3sdEymj7zq7pcDKprCyzBE3/E3qtFU1SUKzs/9ktZcJv82JdZ/P/b+7Yfy7Lzrlp73de+nr33udW5VNU5XefUpburuy6nemb6Mh6Px2N7PBPG41HiKHZigp2LYhGcECELghQbFFkRIEcGv/CAEE/hNQIJ4RcShYcQIjkiCFCEAggkECBekC1P8/2+tU/18AfwgkrT3dOXU1Wn9t7ru/4u+kDoA2/3s1BYO/N2lA0m5oHI8lHmPVAF4MwWaF0rV9pyTM2cqvG0Fn73HiRSWwgWJvHaw1VcM3uFcoW+WwzOTF/WqlKZ5bUkJQpJH9OxT+nFbNmcc7S2izwHEw3PgmIGKSMRITdGNTNLPKfY+pU2LZNC2gyofpQgeDpa9v2x4HT1vPXO9CRgwlnBJtC4YTDXQdkB4zHDlAAJ9aQSdXtRhtQagIDLinJXqYf1Sq2sHkyRU0KPVQccrwKTXe0mIGQ5duYrZprqB8wEUUOLLz7/V/JKf0t8VvwEY3k4NK95gsPqNRjOrM7Ein3Tt4ZUDJmJmy2KSSdiySJ9sajmDRdr/UaAC4c9DMejCye7EBdJVxszxrqTEe4qZcy8ltG2pssCW6/Pyy6uXnfRdWu3+SwG881Vx36iDgCN5sBCFIQqdEvJZ9DnfdhgaLcQGozE1tHcZlsXb/2tNsyhZIT7stPow9cfbgYcko3t1DktpIGQt6K4/Y1p8lacpBCABZViyD1rdPDB5zVbsTHGREWu2J64xpeRp3Fbriy2Y4oiEfMUMXiwGDN5zU0mxRSfYsGWUIHK0Aw6wuV4kUObhx5dKjpgnYMJD5gWgX0w2QuT3TNYdaOCoYXgoH2Y9mfZauqzVE/NktUuFNPXE5gqMWcdAxXmXwQKGbLPT2Ue/Y1h6QcdIGzjTEBQx35e4A2GkIiSmkJh6bjn4WwBzF8eGJJHv1bGesBTqb5ahTlqQSrhqbjWUAFMmSDC2oRUho2cH8tyHzNKatqvmetuM3xHDjWyxbgLvI3oPoHaGyeLv20zq6k7t0WVQT1ZRaoITrHdNesDVd6hnvTIuEN39+hBlb4Ea20FsT4KqPfaWiZja0vZ9loZMn+afm74uMrLpspH2r4aZlSGJ7I3VHao29zsp5dSUXP8YJWZ0h/DwDepzK5whUr7njLUQE7HBgRagx6CgvCmOhEn9fBg98HjI+enxcNBtgsgAqWheVbOCnPP21lIMScqM7MHOKzymAdBZAAEN0UpqMSFc/IYg6rRxIUnvf4d1X9YXlTrpuppXVfYc6QJEwd0SaEuG+eDnqrOFm0fwwIqxxNw/R1nPpkwJzYKJiEhq8+/n79s93f7v+fyOlN981WdlqZ+6O4qM6U8pcbuIDSDMDT2Dt1wqyv6mm40AsAioGZV4pPPf09+R/+q+KT4jPia+K0bRz0bcXOYGg/bLVommn53Gjnbxr1lFU3Wv+szNbOMQyoMlxZnnefm/WiBt0JMYFg611zryMyMLsJt52cuwc+eR5R8xP/0je16VC6SnokYy24ASPATZuR9t6LvFNOfim19TBVksoo7/G5NR5Hk6vpyeRWdj7fwwrihi4IYXSP/1Aw4vkRf0xd66xebbnfacdkBqtTX9xOqnofR1muzvASmN+4XTMdSpdb8nF4cl4OHGzg/LLZlKRr0q9UpPthSbdxZhQ0RIS+iMaj8Fp3hAqPw4+wOJdXgOt6sjSoVgA9iYQUofY7tOBO68AumORQNKH06u4FYlIDQjLRUASQ2T1M6/u7YDRkFBE8W64AmZpaVpGJB00EdF7kPfrr3Cr2iyEeFt6bnox4QPZrO8qHG8BsQe/Y4p7dD7TKDf1hgnf6lUTAApYh07iyLnYpKiJkw+7oeNoDzxGzad5izag/Zh/t+l8rQMsEUjN6YLbImEWtp+hRHUolUqz54g3XNBXVemC+gLqViNMkFMF9hqGShge9JWXntZ0EvkApze5HBnxNqHBoOfzpAMbFOp77x8p/k1LNSZZi83G4yjBsodxtIwoFM1zPwJ0DJGtTETNrhVbErB/dX5ZkyoK/QyywXC7nMof9x6PZClo5e8bodyqGiRrYx49p+LOtXg/pReFMtvzh+2aZVgoG2TJXo9Xd3fT6lu7CrgjYut6JX6xnde0qm00ROMrrQZjiRYVYy4QKWmgmLiyR1In1mVufFmkv/QlJF3c6C+ejLITsyWdBBFYmuZOlUIRojqXQD2cJ9pi2PTFIqPZDmKDH3shmeq/GsqJaJ+RguJhyDKQal9WbXnIt5TpEeyxKrC52ZO4An1EL5FOAkI/+n4x2Her342ewuUgKcAzHeoXtpfXa39WPnXQ756uSXogpmoHthWAw74awmx6PiZOFHVAzLs+ywyjlm7fzu838q/7b+szt/tPOfRU+ccMxCERS1w7pJ14OIqk/mengDU+Ao1bVl9He8F1tEaQgWuOmAAcwgvezKEXG1iuggjg9gu7FSKMctiO8sbsQWtwxxBhCsbZxHUpBaPZWY9W0GW5nC9QA/PyRTPheHl+vtyo9XhPTyhmXGEK3MVsUHYRGf4DKWR7qJ4CO82TMImbF244Cp46ptoNtPNcD+hEoDkydwzMtkpIQb41WWo41KykTvu4mEmsJRanepTrjcb+7Nshqk/XCdpYnVMxjHCl/bQSb0hrk9dKo9+4Oy8oHPNdYrrXWpdxaO5VqnDVX9oq5PqsNxeIkqCszSVGMLgx4WUseUPG2i87S5KvI7VhWUnIxbTrRzRa9N94Nd5a37YEJhJD2QflC4bKSpyTC/7+hzzeregpmllj5zDzAFUFIli4dRvPo2VgeY1NGTHkymsrQvf215b7Fn2PK6SCsMo9wwdQwzUtYVxSDdf+eRnDbZfrF4q/3zgW0P5YHXWZZREAltktOFq4e5zMrczDJb9HuuvdM+fr9+pnf/fnvYNsjOdIQVYkBO0cRw2KPTdeigDVEPfRaqkZ9Woh7NTZiWU54tJdU6U22RjnwIwDwBYKCsBoby1WF2kIZyCE80o6UpzHE/vxjsjX9sl4J49iibTQczR0Xj+8X8hz+fNraV5Ruz3Xd7vSJ763j+CXv9YPfPFWmf3lJh9i8rs1dMqlY8p/6zf8/sUxDX2l61/pm728JqfUeIv/P8e/Lv6V8QX2EOwVbWOxbxy6Ijww3ZSeoFvNd09no3K2SqwZfwQ1k/BHgGQJ4NKKPA3Reik1J5GhFuUSCw7eTGtxv8tpDUwixgSIBfI8rnhik0ZfEsthjoYHZMv9v6Ry3KrvIYtnvd76Lj5hqQnIvr9TW7h+Iw3/htd/Or1VXUJk2u19G/k9uijutgIhx3cYMsXFKlRG9wvbhacFOx6TjrzKtjK8EOVRThQOednW9HZ48jruTXzaQ3Smt0BDYRrY58OeDk6ITVTTl0yS7GyCHXlEGpXUR1jVgqIgUdFCuwuEXUG0nhCe/3RSubxA+oVEyzAJVJeihV7lvXF5WWcpelIkKVU7PvMYChjzzo+8DK3hY0b/DT6HH09rjXo46mcIENkPxeel6uK+0ziIvnrjWZZY5BiSFJEbLaUMJVrKIKzyk6XXKiqoIJ4pCWgj9KNwwCVkQyOZTXaxjZs3AFVK2Vs9Q0TdIgCpubSYBwFLADFDUGDQtiQABSf5FizsCfOb8fnascd0mptNjfJ8HCOfPQBleJ+b3kSfno3v2xrdSJ3qcD3gSIuygxFLtJ7jTYfPQma1NOPaXG+asUE9/L3OywvqJCoz48PH1aHDwpT9jw2PrGTdNs065eMyOdFpC7UdZWvkk1lNqTognB4JqX8/xTbf6RFIj7JKmHFwejj1LuzPpZP1VLUw9CUQKcmPVgkxvalcs+MmhW3itb6GfjX8iPps9c2B/YM099uDym63sweOpPjqQbaTOkYoSuo/Qlrin1ehO6+mKvD+BXha4mgyf30jlzYN8N2V0xHFVvN0WVtCETNkARB3cibQM9J3u+lxrxIA0UB37l+T+UC/1Z8UX4dyb9OKqOnG4T0SRFZ3bZRghtv0P38ZqPzyp7Awy4vu93A292KEq4l+h37fiQnbvjFDyK/MZgg0yYMJ8Ikv/ry3iIrrZmbjjwvHx7JqLEHifHNkYgHnwMPkQv2hqNMBiH8TdXETWDviC54uXffbG1iWNETTQXYARwFOjcNhUXSPcf0nDadjRYiF9GTHDk3G4hmCwvzlPn1fVWm2Jlks+N910i2xEEj6geZjMILaOtGyU3XlUY2d0f7as89dkAuzek3UqwICbUoCZ09JKo3Ca77QaPLs1BFqjGPy0KJ0dUCMM0O2F1fCUh3BtVOzt7bV6wBZEDb8UmSQ70Quha0pmS98sasUlEqHbCUm6aZ6aONWJ4rqvZWZcHFZKpszzqdYE+iQvQqKOwwDvCAjgZnhLQi+vMsEKVkY0H3Mun9M5zI6+oINVvqvunbXVqRu2soioen1e3UFbXEFfXoBfhmwcOEX0Oaw3pni5nFJ4c5Giw/jnwEJF7RzWt1x6Y6NBPf1H4tUx309f9sFY9FrCIrksgVh44u++Gmcz6ggJIRX2AKlwt5UKluemtj8ayYPAt1f5McQLVKUC54lApCnV0+cpRz+4v5CCbU8RKSrOXnBULKyberENDcYKxUErxiKoI1006SDNMfUSBAULCVmiw4GX0D3vbKROfBwuLNpaRB8tK2VEvrX3RZMNCezcLfV1V0AqVbBjhLMWWNvUDuztMR4AvKJmawh7Wd06mr1jtdhLx2ef/WD7T7+/84c5/Y98gZNUzBoHccF06U5/trgWy5JSeMfXB2Y3nqCPhXEZNpRer8U5MaevveLWdDW577mfCxqQZCYk3W38GJkdBpWRbftzszZarWCqbjozYoZt5D4SFuBzcaORGMRi8uUEEJMtrN24HLk9RIOeazlXZA7p65idptsxS5+RQCozpKK1pav2wAz4eFtQbAZkRCuoTbYSpUkNZseQSHZfcY2WcY3qGozSga+89lbmTfuWrbPKsHlJCrSAijwylnGycWJnJ1FtnqZPehYm1KwBTMaYXsWxSQ9R0IY+L0joRU3va+2t0B6kfDD7JbINs6POiKfF4Bpm1vihCrdRJ1fv4+M1eWevTLN8P4V7l/owz/d3q+lHvJ2wZMo03W5xpsV+5Ner5QOnsjbPXH+zr1h4E3+rxYdnshXBa+X2jetRrL8tgz9/6SDOHEWHqUmenWTqgLrgYZqv2LMhBSPb6d/PTMBpyr40nWCztas+nZdgduks6+Y56jFpgJAZDIuodZG9kCnluP/nweDz5enp1rMyCvpVe8u9Favp60ivUmB554VLMvCZHPju8ZzEj/4vP/6X8B/pzFP+uxOfixvVG4k+cimHkp9rO7YkJ5AYW7DwNL+Tivr7avFgAdqH8Kr44QlVmXJdy7Vno1c1ruc5EHnkSiR6sNPbCDevqmh1gLhfbY9Npo1yeiavNjZjY+ZZNij6Pu8eo3rCdaMdWMXZ9Fy9srSLBbjDsd3quehOR6WsWQUTr22f1FPz2uhQL86JG55ETlpZLs1r07Ypn8p0HDlAwEYqykd+uqJ04p4ZByNJQCKUTQJ16gXBHqabFDt9IEJOgIojIh7VXAjUeZpZAkuuxUWaCXr4qc5gu6SyhYgZg7p5MMpR5CJvjHO4qbK4OkBTlFwVB6AIYaGS44+y+TPFJgV+wmGjTccp00kq5r9AOKfbYYsP5XMrTKllRxqATmbWoxGQvGVNJ6XFYjcwhLUj5iXVEP/gWJQGYToUE0ogRKgIaDCrIHCm0qYoGVDg6k+JjicJalL5axTqyhWetPywYx3JAf/k7rqfXaXhqLNRexlldPVmNTpYZJYlHhTr3+z/1JpRh5CN/lvF0Cip/tT8vIMive2fVY7c3yvb67Z66DCHoI3skxXw2kyOXj2Azu/JYkwKQuK7ckR2HypvH7fVhBfZrcq+aLpAwvZMZ/cjpbowzsI417xWwE1gX97O+BbzBJcUA+qsiFAl1fLNETjFuoQuUpV7X1mc6M0/SdpkZ1Vwsh+em30wcXT0bgjXULiiRQRUAABsAf0Rqv+PoDhdBV6qnITOg2LsHkD9Mw2E9D7oBFf1fpRvM4x2MqgJ4DacmumZbJQ7ksZ5Eb+Pv0Yu/yB4zm5t2kFc8qCdRuw3nolvtxHaz3VpZsEUw81S2u58p1a4LGBd3djMoT9toQjXgMrU713F60xFKr9jSt+v5uu4RtKtn2/6uy4pobYGuUM9EBGR04+nlIvab8Se1v1eXdrmKYHJr2BaYh82RY76J7NKOVxtdjxfMTWdCzTMI8L8wqhkkn6yDhJuCtREqHIxXUdQcyppZmBmeo9GxKsGjELlkuBSdBuCYqczfs5B6xGsGdYvCjO6ozkJ7YnRFDz22k4WEwxv9CM7TYUph1U2nV490r9+MwMpMGkMZcDfNlE4T70o5p2xnMfFoMP2QnlUzI5WDvnyGWhUwfvQqRR39dlOIZIG/SgEgYA9fM/0LyqBwhYE9uhOpluwOn0q9qPtL/W5KT3ibV7NqVU7L0pQgwLPhXkyao+BPTiq5mDvbL6kVlVPvEzl3002x5/2wcF9G6GLjKgotQEhq4fKwufvpvdm63JW1g4eCDxO7i7ecTPZmI7N3nO3tVvkyobOwn13kr7bFxbhWa2dtbl3d84Uc+PSdvR8tx3N6ps0hL/d18k01CXf8vZkfItZpDx8Iw1BW6tSYTUFJUBmu6Hj6LZPCzYq69G/4k1SlHrse2I9TTdRIpEDl+ub0QLvd2qVFU9Ur6wzvjD/6/N/IpX5r53+LnjiLlVyUHFltbgQKkCq2HMWuokviNqcb48S9Jx57oIci6bmzVHthq8AOkmAmdhDJ7VADhyfyKT8k4N39tx2xIL/x5KjTE2JWJzeRN+UeJqLIuxYDTd6DbJ12X5jdmIvBNI5TO8YCfwJ5zd9pVDkUtm9Z3+jifNNs0H6tGXjFxNE+ABJ7AuXhpXST9e5YqRx1G5XciaAChQqOjB5sZBAIbrP1MN8ielpslKY1cuNSDnZwacReJoPUH7oo1oLF0lS04EbhlNJjEdp0SeHYFLXHMSsam/AKUdUUS/3C216dskI025g5l0HQtpQPzaDEAfRQzNKyDYIJFQYND887uS+jTs5lAUAg+TwEg6VrFnJquITrZS2WsNlvR+VC6BGJzmQ7OFHfM2o3b5QvqcmRqljtVp/W5T2RDIo75bPsU2Uxc/ZqGT72QDlLSSazj3z/pfKZkpUI0l2ce+FDL1BhxSMl2Zhe4V4b3XnJ7WXN3ezMJx/V0wGEeTPfhr69Sl9qPlIqqgAx6sJZeDjqD4s7fT+krrSh+4P0EtKaei8bkryokoMCwjDSjJC4QMCk8FNK1ARZ+un+u3QibKuz2ePgJpDJsNXo/ZN9W0nqyGD/S8eO6o/RnfvDd43cd0FQwf8WNvZZoDoTSXKZMhXpgz+xRZ30gFHsSZfaQQLuM53ZphnluoxeKf/8+e/KH+jfFH8YNVZkBNRvDVI6S5RlZ1LPNOVznkGWLFq/tDymGFhW7efZJ2o5Vsi0C/6TbRdRFfZy2Y/inAZ/urFuxVYVGW+4xRia/s3MBO1Z/wZxYSLsuVNZgG8incGo13d92c0pGPC/XoGHBHD/JWMHV9ZsV7xFlychugRAiY3wRXRasafjbURMXEhWm8HF5oVWytME2Y6/xuXT7fo1Tl1YTmHNuJHry6iiQqluabocbBcGPGx848s1hY9FR02F5MSAidmUPiEleHUZoc9iu2CBDuL1i0r7aXIO+QVeQHdyvvwj+e3dpyGHYrLph+DpnhcJiHVSZz3Fgh+VGGWyojauwF4RsiUZFMZAuocGgYP1odEQZWEJBPQzgpX/KFeCR4R5wiTZw2xWAF1kwWKAkp2IJ9amkAfxnUc90E6hSFdRrwhHWLPFuUniKpaFB0CKoMRSV6loeNgLWBslX6dKdlkVo5SyYFWkgbK+MNha0iuZUapY9JvSJDuhcNgABA1SBlAupVhkOzVmz2W54lkPKNV0kuYz+2yVKdG3dzIZKCoC4qeMq/ELfOrp4jno71Um7Y0U5OiDmJtkZZ8mIrpZZ6KwwudW5VnQYGMkrFyO6QawDVP91YNDM2numeMvGQpfrhhSpv4y1Ku0EuV8L3vVybTWpXO1SukbgP9clfSTEVU0VIyLHKsWA6Klrug6lzxuhsUqBjLoEyBfRnVqUL1gZ1p8yv1opayhOFD1D+tJNknMnrx7L8sXzswhsyNHx2t9abVn5F+pqsT08uloPMzpomrr+sWpoYOuGhf0OnubIrfgB4EOSzNMxL5bpapoqUDWn3lzzzS69pL1SKuhqk59xqM1JYblKB95XxqhU1mIAsayJojGwasE7C+0C/TVM9MYu7R6NmTDXrpPucMT4FIKhVR7Jx8vq9eT0aQXJpjcjxiqb9TwGL6/IVXTBjdVB4drBLNQEGOiCRnQcspzwRZUK0WZUEUGI1IXzLlZTpPdKZg5Ai7BKp3LqjJ2QwlL+Zbqjq88/3fyu/rtnf9Od/gQvKfOaalTiokKg6icub9ko6ZOYy1W3FiSXJ9HQAjzp3new+ykrbFdx//gkl3Mk/7DBxxPeQMLNMbNP3/Y/Jnb9i4OLldLHkJvcbftoEhY//sJB+V1G/sCNoAy8eNM5yWAqh/Mkg6JtrraPAGs+vqwo3Ju2eidswB9aT1gf7uko152s+zhQHy/orTVeDNpFQBKdBP0fKTXpiroqCaF6iuIJKEYBnSWXepxNFmQQYRZtS50mdPdBqUYmqWgQQEIxpKKcSSILpaPsWAYGtsAK0BB66MgKQ7saeA+jHwQXO1dXivT8+rQ5HCUD4CS9Ip942dpWxwBeMrYVpeu9VNjDlaaPZfqEs5oSsShpJjthuRukqIAtw+dc5QnAn/1gt77B/+WIoSElQgevUJfwKcaIhG/I9Mka4K/ewCMVhJ6Jp/v21PV1ww+Domui4QOqwdG1aNwLsHLUElhdI8ibpPoyuYhSSlLh2hcSTHEwvcC5Rg1QFqz4j96VYRPxTxypnTSo10f9Qw96HfcfqLGTn0sTxvresnPf/b4e4Nf0ipdqItCTkLPngx+3MMRLrENtWEOKmw9JXMKWXVSDZrdWWVm7HkwTvc+XXyyb/InpqKLmoTip47fKUv9BhUUqpbiQcja7NjeU7pHZ5GuwoTDrV+06i2XH1IHdSlkRY2NR43xW89/X/60frbzX1BjUAa0jHi0vINgnDsvOnm7wi3qOesQnG9LkMEWf94f9CM+lLUM1/Sqy+g5seHSox8VF1fx/xHFFR9WtL5Y2jLkSVGxsnVJXa1fyJyciUhvjNj+uC+9mRFjWzS4YMmleA7wRtZR+ZA3OdvmgOo9L0wYVDA1SPlp9wAQJWWejpL87j7UrsdAOlIbl7WFHIpatmWG1pYymGUqUckmxxSlAF10mJb2oBmaFRnjjAYllYMDi/O0oDrX7PPePUCbO+1lTU2Hp8/1fZ7s4imzGYVVdJRUONLfGSYgl0Or3/ZJj0rChDV0MiOdzDRl9UzR3Tx0YoEufCTXQe1aX+mjRn0tQbEOvQ+bwlsR3Z+e5xdVIUSoe+88AQGRQvQyfU8/mLZ3V5/R9UD5JqnlAaP9WCQM2QioBTVKzEgehjzUiaYg3djhR88/W74/3/vpLH3kKSS3aZ7jHC9KkXs6eAU1wD3jHqbiUf7LVe/X/GuH/ff3DyA0oM006Fk6evld56r+YjX4MZ00VSLbnIrf4CKRqqLzUylMaU5cstJq6E78y7vhgQ13B29Dokd8/fmfyAv9KSER+02krW/6D2JlOtj+tovGsf86x7qu0/fsb8vmIlZvU1SWw/kN573PhghcvF0yc4OLyYjtSy6u2Ua8QwfEYek5lnSrx1ydsnvU5QtsDoMBBbjsYL1zgYy5KBS6opVwJPptOj1QptBvLq5XNw4wXQ37NCKn6Y97Md+tks8PqaZIG+/oekPfFtUdRSsDSG1fT1O6rFJkGJTK3MPzniomBT5aJeKm3EhK2gChzkceSB1APyvmLBhmSZ+ykCAyARU2WVZ72wyrCmYOqZ6BzqQjhFZROSa85VU7JqrMQQOaFGfKM+MDnkBRg84Wrcu8Ouv5MqG47SrFrn30hGMJNaPvhMJrqV/zg8KulnCPSE9yYI9hQO4KU9LRpUMwXBZn+8zA+3rdfn5viN2axiK/VA8OLHS20v5Q5+9Mxl/Jsr2E2t7KpRnUb0NAFUox9uHZ68XJ/lSoUNFznWqVzUPzlxaTV1ygLq/o5WnmTLRZNao9cHdfG41fWiZy1MK1qja/DFETSi1e1SqzUbs7AdYeWh5ZQlHmSE2Ox69bNbXZ2gS/l/WPmiGFGUqiSTlCNqazWGgLjoFzstz1Izzf333+p3KmvyCOEYsHQyZNwRIJrGyWvkSJAiDYOrZjS6jbcPSdCfN/OWWeYhmwYcQJBfVSYHvONK0rXiyvV8uWEfcoVKbM+uZA2rLM1XLBNg+nYtV5NlAJc8IANIaAMbqGSUbxWb+6WU7j0QUAB/Oa68grRd1zuZ3MxMrrakA1ksEbAnyOmtM1xGpWV4sVIHKAxWKusrri310NoGeHnd/11XouOhErjGXPk1/JptHlj6dZW+F/KIcXVL/L3thmFT8zOsfoTmZ1ZlmYdsw0O8eoK3RARdcmJZ29Y54JWDGaOaNJ+4lIBT1aQRU6E9Nli823pqA3UrwKBvYdGv7YQmvecFv2XnMlEB+F86uiVyOwRzOgqLngOIcIj92UiKwl+jB2K2MVS6q7sT+GAPUYGxGn36czkPSCouTUYydMXn2zjK2wolSyHCZVv4ZUlTrJf6a+sNVq83jc+rpwOfy52SCINQcKuDNZSb/VlDp6i+SoLj9e5PN1kmW73v947t94Itczdxg8wHysNp/sZRpKnrnXckDtkJ5AEwdwcq/M2eR61PS+9FqRzuvXUAw5mdMB6Vfj7L55uem996R+pWxz08sURFs67zp8A3gxeFvMlWK/eirJYHTh2OMIOCDqFjW8wxMBzvcfP/8evfGv0LfcE/d3du7MNad9LhViPF136qybD++SDYs+MqJkabpXdbPGhP3EEIh5JCA679Vk+wM9RRKR3nSaVku7iLpNm23ZvgVlYMG2HXTgX+LGgOI+dxPdMPDGASyiy/bEopDRqxHAl/g/21LPwLTAm9VA/JhIlaS/kisIN1BCuTwc3BAPo4jb4kUbkPyHJFUpHrrZbmqPgo+EJ0GlLjV8qbC7qsph/mBq3EuqfMz+sC9DMlJ+SvfBO4wYQq8SszR7mAw8u6mWVQK9Mr0ydQkIOFBcGhZ7QGmwyDr8/tivzwABCuwEUEMml7uuOPUzuNnQvc07HVnU0ZFjmJTo9sUEJCYNTgvYLKCxWDTunrpFoMAzyCo61mGULLyaUI0EvrQI4oPv5LtQpcxgheILyDcovaGERw+8uKSHKthcBewoqN0NVPf35MiYQuefvTzy9VgVnxq+92QY/qrcf9S7finx7xaDYzHYK5bz8KiYvlz8lUHz3uvavmb9pYEmbSX2yuITcuploE61gV5Lrt09VcJjUcboguuP/sjIXGqW0QKTGN8TMG5QXDB5FuTAVVrkWlPCsrZMLCUjytnUKjtvMw8XQnknmiKlMwz11cn88oEcz4v6J6lmHbSyrnH06YpOy/Ju7fdNWum+KSv35cxU3k+drpPGigysQZP9QQ2353Qsxf5Rsz9kq6VrIPeKhvqCwc7PPf8D+b/U5Y7ZqXfGO4ud453znX+x8693/tPO/9j5PgXEWkzFvliKtbgnNlQsvS7e3Nk53Lwi0EBQhZNAPWC9OaTi/GpzIZ/xAmA5vB5aqtxYsvzqkqrcgWFBN8uWBBT/gYXsmwUywH25vrJzNqy/n1xfra6oowfRFltpOpEwJU6iNbG96qZ6QEXdmBeLZ6LTW+emZZ60dLLQxgy7w8m46jPGghq77A/psM/FIT7uyi5A22LIiLEDFHoNqx3g1c31koeVpWhaiBlw+gXOxAyv15Y+yeraRiGKa9OnazC0g/615SV7Q6+4T1em2TCSmr9AiTrPJmto/uwd/BxV+fpLOT0uv1F/8F/vU/+ozpYN4JPJPzvb7LVLN0iycjJU4tfkl66mp9a9PpgEe7pZnCrt6blJPq/ocbBl5cWEavLjvfEd6vmHTuXUGtrC9ZNAndfnS+eWiLH9wrCwmxjShZbzbOlHyHz+8llhF358ULr2IMucG1J9uXQeauoVHCwTwacoVHL9kR++K0ZZOoYdyY/gr5fHZs/QO/6CpMc5uN5odt1CGIUq1s/Rl9P0kr4SOtj13uZMpnf+Vm/2pcaoXOn/2M7NPWXMpFAN9fe6OIBQa8irs0FIH4L3dk89VLPZN3rvPvxTTMv04V9YrX/90T+6ntQfn3zw3U9cnR8Uo/Dut6/eppP+m40xr1ZVvT771b+biM1k8uQlf+/4o4d1Qxk1yYAs79ct2Jyr1aSdzUJYurEMY2DDINMj/+il3hiBaRiGsKjnvbiiHBzcS/LElKeDHtVzInXZKz+SyMf0GeeuHgbfa77SJgOjSu8Lam5SKf+6qKhRdI95jPqL3g2cvaaK3f6gPC7yq484HUbi6xiPruvHjw9++MfvFiKxB1SGnNmjqmfwTLyxhlTDD96++rb0afib70FUblEWf/mwPbg4+/jea9lPfjPra/1N0XzhC9h5JtkHX36n9ZTOZ/b1+Xx99DNPTv/GLd/zlu95y/e85Xv+P+d73saZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnPn/O87Aj3tHf0N/Y0fu7PQOegf6G9//Gn7u7Pwf/gBC4AB42mNgZGBgAOLrh+yy4vltvjLIczCAwLVpRt8hdNd2Bob/daytbCAuBwMTiAIAS7ELYwAAeNpjYGRgYP31PwpI/mYAAtZWBkYGVOAOAGrhBCwAAAB42m3PKwsCQRSG4W+W3W4x223aTRq8gMltWmwaDCJaFkHQaLIY7IIY7dsEwWITi/4Ag128vMMOWBx4+M6Zw8ww3l0lsbyN5B8tk0YNLfoTLtQdLDBAFn2sUECUzHQjy8hhjQl7FTIF7jFDNJBhf4cHdZ28kk20UaXfk0uMELpz9s0iswPZI7fkFDH12Z+r687/wd9eUvD8pXljzKc/TkyfJ8Mk7SyYSV80s1AveNpjYGDQgUCmQ6wu7HGc87jf8F7h3yeUJyolXiE5Q3qVrJP8NCU5lR61OxoxWlk6cron9KsMFxnfMa0w/2JlZb3HzsIxznmS6xv3OZ4LvKf4fPC7E1ARpBOyJSwufEukStS2mKy4sISupH0pn9LVMqOy+/DCOQA/+TbcAHjaY2BkYGBwZ3JlEGMAASYgZmQAiTkw6IEEABOHARgAeNqNUV1LAkEUPaMWSmASEdFDLL63mrkmBkEEPhTEkn08t36ktLm2jtZTv6Mf0lO/oOwX9Fd66szstIkWxGWWM+eee++5swCW8YIkRCoDYJMnwgIrvEU4gSyKBidRxb7BKeQxNngBD3gyeBF5kTM4jVVRMjiDqqgbvISKeDT4FWvi2eA3FMXE4Amy4tPgd+QS6Qh/JLGRWEeJnrbpxoKLEAG/I3jw0UMTV7hEm+HyBBiQbeOU55oan9mQlTbrVezhHMfUnxDNV23N1M0rrBnFBW8hhvQRoM/s9CQXDTJF7fyH7VIp6Vrpx3GFzd2qzN6y642eJ9Ehqzb0uL0Nh6eCMnYZzj+8//ZOB0SediyZs3BIrqd1FlEfrT/et0u95Jwhaigw7nXYZEI9f1prkwnpo6A9etxCbSrjTc+oVu94pCda2CGncg57l/kGNSKHzPcfb1HdoVbtJemgqfsPOG3EWz3u3sAdGbVNyAr/CzM7bOd42m3Mx04CYRhG4fOCgAURvQa7qP/8zFAsi4k6NgTsla1KYgwbF168Ccp87jibZ3fIMOqnzyvjOgZllCXLIksss8Iqa6yzQYVNttjGEeCpEhJRo06DJjvsssc+hxyR/D5OOOWMc1pc0KZDl0uuuOaGW+6454FHnnjmhZ4mlFNeBU1qStOaUVGzKmlOZc1rIf/28T14D1J84euz71zs/vTO/RuY3qyaoRmZNbNuNsymGaf6JDVKjZKDIVmuNI4AAAAAAVpw2jcAAA==) format('woff'); - font-weight: normal; - font-style: normal; - -} - -.weepeople { - font-family: "WeePeople"; -} \ No newline at end of file diff --git a/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/lpips/base_model.py b/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/lpips/base_model.py deleted file mode 100644 index 8de1d16f0c7fa52d8067139abc6e769e96d0a6a1..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/lpips/base_model.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import numpy as np -import torch -from torch.autograd import Variable -from pdb import set_trace as st -from IPython import embed - -class BaseModel(): - def __init__(self): - pass; - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True, gpu_ids=[0]): - self.use_gpu = use_gpu - self.gpu_ids = gpu_ids - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s'%save_path) - network.load_state_dict(torch.load(save_path)) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'),flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') diff --git a/spaces/michaelthwan/digest-everything-gpt/digester/chatgpt_service.py b/spaces/michaelthwan/digest-everything-gpt/digester/chatgpt_service.py deleted file mode 100644 index adfc260fad8901fe0dc5301554023b50ec9312a7..0000000000000000000000000000000000000000 --- a/spaces/michaelthwan/digest-everything-gpt/digester/chatgpt_service.py +++ /dev/null @@ -1,293 +0,0 @@ -import json -import logging -import re -import threading -import time -import traceback - -import requests - -from digester.util import get_config, Prompt, get_token, get_first_n_tokens_and_remaining, provide_text_with_css, GradioInputs - -timeout_bot_msg = "Request timeout. Network error" -SYSTEM_PROMPT = "Be a assistant to digest youtube, podcast content to give summaries and insights" - -TIMEOUT_MSG = f'{provide_text_with_css("ERROR", "red")} Request timeout.' -TOKEN_EXCEED_MSG = f'{provide_text_with_css("ERROR", "red")} Exceed token but it should not happen and should be splitted.' - -# This piece of code heavily reference -# - https://github.com/GaiZhenbiao/ChuanhuChatGPT -# - https://github.com/binary-husky/chatgpt_academic - - -config = get_config() - - -class LLMService: - @staticmethod - def report_exception(chatbot, history, chat_input, chat_output): - chatbot.append((chat_input, chat_output)) - history.append(chat_input) - history.append(chat_output) - - @staticmethod - def get_full_error(chunk, stream_response): - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - @staticmethod - def generate_payload(api_key, gpt_model, inputs, history, stream): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}" - } - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": SYSTEM_PROMPT}] - if conversation_cnt: - for index in range(0, 2 * conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index + 1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - - payload = { - "model": gpt_model, - "messages": messages, - "temperature": 1.0, - "top_p": 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - - print(f"generate_payload() LLM: {gpt_model}, conversation_cnt: {conversation_cnt}") - print(f"\n[[[[[INPUT]]]]]\n{inputs}") - print(f"[[[[[OUTPUT]]]]]") - return headers, payload - - -class ChatGPTService: - @staticmethod - def say(user_say, chatbot_say, chatbot, history, status, source_md, is_append=True): - if is_append: - chatbot.append((user_say, chatbot_say)) - else: - chatbot[-1] = (user_say, chatbot_say) - yield chatbot, history, status, source_md - - @staticmethod - def say_using_ginputs(user_say, chatbot_say, status, g_inputs: GradioInputs, is_append=True): - yield from ChatGPTService.say(user_say, chatbot_say, - g_inputs.chatbot, g_inputs.history, status, g_inputs.source_md, is_append) - - @staticmethod - def get_reduce_token_percent(text): - try: - pattern = r"(\d+)\s+tokens\b" - match = re.findall(pattern, text) - EXCEED_ALLO = 500 - max_limit = float(match[0]) - EXCEED_ALLO - current_tokens = float(match[1]) - ratio = max_limit / current_tokens - assert ratio > 0 and ratio < 1 - return ratio, str(int(current_tokens - max_limit)) - except: - return 0.5, 'Unknown' - - @staticmethod - def trigger_callgpt_pipeline(prompt_obj: Prompt, prompt_show_user: str, g_inputs: GradioInputs, is_timestamp=False): - chatbot, history, source_md, api_key, gpt_model = g_inputs.chatbot, g_inputs.history, f"[{g_inputs.source_textbox}] {g_inputs.source_target_textbox}", g_inputs.apikey_textbox, g_inputs.gpt_model_textbox - yield from ChatGPTService.say(prompt_show_user, f"{provide_text_with_css('INFO', 'blue')} waiting for ChatGPT's response.", chatbot, history, "Success", source_md) - - prompts = ChatGPTService.split_prompt_content(prompt_obj, is_timestamp) - full_gpt_response = "" - for i, prompt in enumerate(prompts): - yield from ChatGPTService.say(None, f"{provide_text_with_css('INFO', 'blue')} Processing Batch {i + 1} / {len(prompts)}", - chatbot, history, "Success", source_md) - prompt_str = f"{prompt.prompt_prefix}{prompt.prompt_main}{prompt.prompt_suffix}" - - gpt_response = yield from ChatGPTService.single_call_chatgpt_with_handling( - source_md, prompt_str, prompt_show_user, chatbot, api_key, gpt_model, history=[] - ) - - chatbot[-1] = (prompt_show_user, gpt_response) - # seems no need chat history now (have it later?) - # history.append(prompt_show_user) - # history.append(gpt_response) - full_gpt_response += gpt_response + "\n" - yield chatbot, history, "Success", source_md # show gpt output - return full_gpt_response, len(prompts) - - @staticmethod - def split_prompt_content(prompt: Prompt, is_timestamp=False) -> list: - """ - Split the prompt.prompt_main into multiple parts, each part is less than tokens - Then return all prompts object - """ - prompts = [] - MAX_CONTENT_TOKEN = config.get('openai').get('content_token') - if not is_timestamp: - temp_prompt_main = prompt.prompt_main - while True: - if len(temp_prompt_main) == 0: - break - elif len(temp_prompt_main) < MAX_CONTENT_TOKEN: - prompts.append(Prompt(prompt_prefix=prompt.prompt_prefix, - prompt_main=temp_prompt_main, - prompt_suffix=prompt.prompt_suffix)) - break - else: - first, last = get_first_n_tokens_and_remaining(temp_prompt_main, MAX_CONTENT_TOKEN) - temp_prompt_main = last - prompts.append(Prompt(prompt_prefix=prompt.prompt_prefix, - prompt_main=first, - prompt_suffix=prompt.prompt_suffix)) - else: - # A bit ugly to handle the timestamped version and non-timestamped version in this matter. - # But make a working software first. - paragraphs_split_by_timestamp = [] - for sentence in prompt.prompt_main.split('\n'): - if sentence == "": - continue - - def is_start_with_timestamp(sentence): - return sentence[0].isdigit() and (sentence[1] == ":" or sentence[2] == ":") - - if is_start_with_timestamp(sentence): - paragraphs_split_by_timestamp.append(sentence) - else: - paragraphs_split_by_timestamp[-1] += sentence - - def extract_timestamp(paragraph): - return paragraph.split(' ')[0] - - def extract_minute(timestamp): - return int(timestamp.split(':')[0]) - - def append_prompt(prompt, prompts, temp_minute, temp_paragraph, temp_timestamp): - prompts.append(Prompt(prompt_prefix=prompt.prompt_prefix, - prompt_main=temp_paragraph, - prompt_suffix=prompt.prompt_suffix.format(first_timestamp=temp_timestamp, - second_minute=temp_minute + 2, - third_minute=temp_minute + 4) - # this formatting gives better result in one-shot learning / example. - # ie if it is the second+ splitted prompt, don't use 0:00 as the first timestamp example - # use the exact first timestamp of the splitted prompt - )) - - token_num_list = list(map(get_token, paragraphs_split_by_timestamp)) # e.g. [159, 160, 158, ..] - timestamp_list = list(map(extract_timestamp, paragraphs_split_by_timestamp)) # e.g. ['0:00', '0:32', '1:03' ..] - minute_list = list(map(extract_minute, timestamp_list)) # e.g. [0, 0, 1, ..] - - accumulated_token_num, temp_paragraph, temp_timestamp, temp_minute = 0, "", timestamp_list[0], minute_list[0] - for i, paragraph in enumerate(paragraphs_split_by_timestamp): - curr_token_num = token_num_list[i] - if accumulated_token_num + curr_token_num > MAX_CONTENT_TOKEN: - append_prompt(prompt, prompts, temp_minute, temp_paragraph, temp_timestamp) - accumulated_token_num, temp_paragraph = 0, "" - try: - temp_timestamp, temp_minute = timestamp_list[i + 1], minute_list[i + 1] - except IndexError: - temp_timestamp, temp_minute = timestamp_list[i], minute_list[i] # should be trivial. No more next part - else: - temp_paragraph += paragraph + "\n" - accumulated_token_num += curr_token_num - if accumulated_token_num > 0: # add back remaining - append_prompt(prompt, prompts, temp_minute, temp_paragraph, temp_timestamp) - return prompts - - @staticmethod - def single_call_chatgpt_with_handling(source_md, prompt_str: str, prompt_show_user: str, chatbot, api_key, gpt_model, history=[]): - gpt_response = yield from ChatGPTService.single_rest_call_chatgpt(api_key, prompt_str, gpt_model, chatbot, history=history) - if 'ERROR' in gpt_response: - raise Exception - return gpt_response - - @staticmethod - def single_rest_call_chatgpt(api_key, prompt_str: str, gpt_model, chatbot, history=[], observe_window=None): - """ - Single call chatgpt only. No handling on multiple call (it should be in upper caller multi_call_chatgpt_with_handling()) - - Support stream=True - - observe_window: used to pass the output across threads, most of the time just for the fancy visual effect, just leave it empty - - retry 2 times - """ - headers, payload = LLMService.generate_payload(api_key, gpt_model, prompt_str, history, stream=True) - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=False - response = requests.post(config['openai']['api_url'], headers=headers, - json=payload, stream=True, timeout=config['openai']['timeout_sec'] - ) - break - except requests.exceptions.ReadTimeout as e: - max_retry = config['openai']['max_retry'] - retry += 1 - traceback.print_exc() - if retry > max_retry: - raise TimeoutError - if max_retry != 0: - print(f'Request timeout. Retrying ({retry}/{max_retry}) ...') - - stream_response = response.iter_lines() - result = '' - while True: - try: - chunk = next(stream_response).decode() - except StopIteration: - break - if len(chunk) == 0: continue - if not chunk.startswith('data:'): - error_msg = LLMService.get_full_error(chunk.encode('utf8'), stream_response).decode() - if "reduce the length" in error_msg: - raise ConnectionAbortedError("OpenAI rejected the request:" + error_msg) - else: - raise RuntimeError("OpenAI rejected the request: " + error_msg) - json_data = json.loads(chunk.lstrip('data:'))['choices'][0] - delta = json_data["delta"] - if len(delta) == 0: break - if "role" in delta: continue - if "content" in delta: - result += delta["content"] - print(delta["content"], end='') - yield from ChatGPTService.say(None, result, chatbot, history, "Success", "", is_append=False) - if observe_window is not None: observe_window[0] += delta["content"] - else: - raise RuntimeError("Unexpected Json structure: " + delta) - if json_data['finish_reason'] == 'length': - raise ConnectionAbortedError("Completed normally with insufficient Tokens") - return result - - -if __name__ == '__main__': - import pickle - - prompt: Prompt = pickle.load(open('prompt.pkl', 'rb')) - prompts = ChatGPTService.split_prompt_content(prompt, is_timestamp=True) - for prompt in prompts: - print("=====================================") - print(prompt.prompt_prefix) - print(prompt.prompt_main) - print(prompt.prompt_suffix) diff --git a/spaces/mikkoar/marco/src/components/markdown.tsx b/spaces/mikkoar/marco/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/mikkoar/marco/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/mira-causality/counterfactuals/vae.py b/spaces/mira-causality/counterfactuals/vae.py deleted file mode 100644 index d8c4ed2836b3ee3c4305217a68b14dea92e92d0f..0000000000000000000000000000000000000000 --- a/spaces/mira-causality/counterfactuals/vae.py +++ /dev/null @@ -1,517 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.distributions as dist - -EPS = -9 # minimum logscale - - -@torch.jit.script -def gaussian_kl(q_loc, q_logscale, p_loc, p_logscale): - return ( - -0.5 - + p_logscale - - q_logscale - + 0.5 - * (q_logscale.exp().pow(2) + (q_loc - p_loc).pow(2)) - / p_logscale.exp().pow(2) - ) - - -@torch.jit.script -def sample_gaussian(loc, logscale): - return loc + logscale.exp() * torch.randn_like(loc) - - -class Block(nn.Module): - def __init__( - self, - in_width, - bottleneck, - out_width, - kernel_size=3, - residual=True, - down_rate=None, - version=None, - ): - super().__init__() - self.d = down_rate - self.residual = residual - padding = 0 if kernel_size == 1 else 1 - - if version == "light": # for ukbb - activation = nn.ReLU() - self.conv = nn.Sequential( - activation, - nn.Conv2d(in_width, bottleneck, kernel_size, 1, padding), - activation, - nn.Conv2d(bottleneck, out_width, kernel_size, 1, padding), - ) - else: # for morphomnist - activation = nn.GELU() - self.conv = nn.Sequential( - activation, - nn.Conv2d(in_width, bottleneck, 1, 1), - activation, - nn.Conv2d(bottleneck, bottleneck, kernel_size, 1, padding), - activation, - nn.Conv2d(bottleneck, bottleneck, kernel_size, 1, padding), - activation, - nn.Conv2d(bottleneck, out_width, 1, 1), - ) - - if self.residual and (self.d or in_width > out_width): - self.width_proj = nn.Conv2d(in_width, out_width, 1, 1) - - def forward(self, x): - out = self.conv(x) - if self.residual: - if x.shape[1] != out.shape[1]: - x = self.width_proj(x) - out = x + out - if self.d: - if isinstance(self.d, float): - out = F.adaptive_avg_pool2d(out, int(out.shape[-1] / self.d)) - else: - out = F.avg_pool2d(out, kernel_size=self.d, stride=self.d) - return out - - -class Encoder(nn.Module): - def __init__(self, args): - super().__init__() - # parse architecture - stages = [] - for i, stage in enumerate(args.enc_arch.split(",")): - start = stage.index("b") + 1 - end = stage.index("d") if "d" in stage else None - n_blocks = int(stage[start:end]) - - if i == 0: # define network stem - if n_blocks == 0 and "d" not in stage: - print("Using stride=2 conv encoder stem.") - self.stem = nn.Conv2d( - args.input_channels, - args.widths[1], - kernel_size=7, - stride=2, - padding=3, - ) - continue - else: - self.stem = nn.Conv2d( - args.input_channels, - args.widths[0], - kernel_size=7, - stride=1, - padding=3, - ) - - stages += [(args.widths[i], None) for _ in range(n_blocks)] - if "d" in stage: # downsampling block - stages += [(args.widths[i + 1], int(stage[stage.index("d") + 1]))] - blocks = [] - for i, (width, d) in enumerate(stages): - prev_width = stages[max(0, i - 1)][0] - bottleneck = int(prev_width / args.bottleneck) - blocks.append( - Block(prev_width, bottleneck, width, down_rate=d, version=args.vr) - ) - # scale weights of last conv layer in each block - for b in blocks: - b.conv[-1].weight.data *= np.sqrt(1 / len(blocks)) - self.blocks = nn.ModuleList(blocks) - - def forward(self, x): - x = self.stem(x) - acts = {} - for block in self.blocks: - x = block(x) - res = x.shape[2] - if res % 2 and res > 1: # pad if odd resolution - x = F.pad(x, [0, 1, 0, 1]) - acts[x.size(-1)] = x - return acts - - -class DecoderBlock(nn.Module): - def __init__(self, args, in_width, out_width, resolution): - super().__init__() - bottleneck = int(in_width / args.bottleneck) - self.res = resolution - self.stochastic = self.res <= args.z_max_res - self.z_dim = args.z_dim - self.cond_prior = args.cond_prior - k = 3 if self.res > 2 else 1 - - if self.cond_prior: # conditional prior - p_in_width = in_width + args.context_dim - else: # exogenous prior - p_in_width = in_width - # self.z_feat_proj = nn.Conv2d(self.z_dim + in_width, out_width, 1) - self.z_feat_proj = nn.Conv2d(self.z_dim + in_width, out_width, 1) - - self.prior = Block( - p_in_width, - bottleneck, - 2 * self.z_dim + in_width, - kernel_size=k, - residual=False, - version=args.vr, - ) - if self.stochastic: - self.posterior = Block( - 2 * in_width + args.context_dim, - bottleneck, - 2 * self.z_dim, - kernel_size=k, - residual=False, - version=args.vr, - ) - self.z_proj = nn.Conv2d(self.z_dim + args.context_dim, in_width, 1) - self.conv = Block( - in_width, bottleneck, out_width, kernel_size=k, version=args.vr - ) - - def forward_prior(self, z, pa=None, t=None): - if self.cond_prior: - z = torch.cat([z, pa], dim=1) - z = self.prior(z) - p_loc = z[:, : self.z_dim, ...] - p_logscale = z[:, self.z_dim : 2 * self.z_dim, ...] - p_features = z[:, 2 * self.z_dim :, ...] - if t is not None: - p_logscale = p_logscale + torch.tensor(t).to(z.device).log() - return p_loc, p_logscale, p_features - - def forward_posterior(self, z, pa, x, t=None): - h = torch.cat([z, pa, x], dim=1) - q_loc, q_logscale = self.posterior(h).chunk(2, dim=1) - if t is not None: - q_logscale = q_logscale + torch.tensor(t).to(z.device).log() - return q_loc, q_logscale - - -class Decoder(nn.Module): - def __init__(self, args): - super().__init__() - # parse architecture - stages = [] - for i, stage in enumerate(args.dec_arch.split(",")): - res = int(stage.split("b")[0]) - n_blocks = int(stage[stage.index("b") + 1 :]) - stages += [(res, args.widths[::-1][i]) for _ in range(n_blocks)] - self.blocks = [] - for i, (res, width) in enumerate(stages): - next_width = stages[min(len(stages) - 1, i + 1)][1] - self.blocks.append(DecoderBlock(args, width, next_width, res)) - self._scale_weights() - self.blocks = nn.ModuleList(self.blocks) - # bias params - self.all_res = list(np.unique([stages[i][0] for i in range(len(stages))])) - bias = [] - for i, res in enumerate(self.all_res): - if res <= args.bias_max_res: - bias.append( - nn.Parameter(torch.zeros(1, args.widths[::-1][i], res, res)) - ) - self.bias = nn.ParameterList(bias) - self.cond_prior = args.cond_prior - self.is_drop_cond = True if "mnist" in args.hps else False # hacky - - def _scale_weights(self): - scale = np.sqrt(1 / len(self.blocks)) - for b in self.blocks: - b.z_proj.weight.data *= scale - b.conv.conv[-1].weight.data *= scale - b.prior.conv[-1].weight.data *= 0.0 - - def forward(self, parents, x=None, t=None, abduct=False, latents=[]): - # learnt params for each resolution r - bias = {r.shape[2]: r for r in self.bias} - h = bias[1].repeat(parents.shape[0], 1, 1, 1) # h_init - z = h # for exogenous prior - # for conditioning dropout, stochastic path (p1), deterministic path (p2) - p1, p2 = self.drop_cond() if (self.training and self.cond_prior) else (1, 1) - - stats = [] - for i, block in enumerate(self.blocks): - res = block.res # current block resolution, e.g. 64x64 - pa = parents[..., :res, :res].clone() # select parents @ res - - if ( - self.is_drop_cond - ): # for morphomnist w/ conditioning dropout. Hacky, clean up later - pa_drop1 = pa.clone() - pa_drop1[:, 2:, ...] = pa_drop1[:, 2:, ...] * p1 - pa_drop2 = pa.clone() - pa_drop2[:, 2:, ...] = pa_drop2[:, 2:, ...] * p2 - else: # for ukbb - pa_drop1 = pa_drop2 = pa - - if h.size(-1) < res: # upsample previous layer output - b = bias[res] if res in bias.keys() else 0 # broadcasting - h = b + F.interpolate(h, scale_factor=res / h.shape[-1]) - - if block.cond_prior: # conditional prior: p(z_i | z_ 0: # if std_init=0, random init weights for diag cov - nn.init.zeros_(self.x_logscale.weight) - nn.init.constant_(self.x_logscale.bias, np.log(args.std_init)) - - covariance = args.x_like.split("_")[0] - if covariance == "fixed": - self.x_logscale.weight.requires_grad = False - self.x_logscale.bias.requires_grad = False - elif covariance == "shared": - self.x_logscale.weight.requires_grad = False - self.x_logscale.bias.requires_grad = True - elif covariance == "diag": - self.x_logscale.weight.requires_grad = True - self.x_logscale.bias.requires_grad = True - else: - NotImplementedError(f"{args.x_like} not implemented.") - - def forward(self, h, x=None, t=None): - loc, logscale = self.x_loc(h), self.x_logscale(h).clamp(min=EPS) - - # for RGB inputs - # if hasattr(self, 'channel_coeffs'): - # coeff = torch.tanh(self.channel_coeffs(h)) - # if x is None: # inference - # # loc = loc + logscale.exp() * torch.randn_like(loc) # random sampling - # f = lambda x: torch.clamp(x, min=-1, max=1) - # loc_red = f(loc[:,0,...]) - # loc_green = f(loc[:,1,...] + coeff[:,0,...] * loc_red) - # loc_blue = f(loc[:,2,...] + coeff[:,1,...] * loc_red + coeff[:,2,...] * loc_green) - # else: # training - # loc_red = loc[:,0,...] - # loc_green = loc[:,1,...] + coeff[:,0,...] * x[:,0,...] - # loc_blue = loc[:,2,...] + coeff[:,1,...] * x[:,0,...] + coeff[:,2,...] * x[:,1,...] - - # loc = torch.cat([loc_red.unsqueeze(1), - # loc_green.unsqueeze(1), loc_blue.unsqueeze(1)], dim=1) - - if t is not None: - logscale = logscale + torch.tensor(t).to(h.device).log() - return loc, logscale - - def approx_cdf(self, x): - return 0.5 * ( - 1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) - ) - - def nll(self, h, x): - loc, logscale = self.forward(h, x) - centered_x = x - loc - inv_stdv = torch.exp(-logscale) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = self.approx_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = self.approx_cdf(min_in) - log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = torch.where( - x < -0.999, - log_cdf_plus, - torch.where( - x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) - ), - ) - return -1.0 * log_probs.mean(dim=(1, 2, 3)) - - def sample(self, h, return_loc=True, t=None): - if return_loc: - x, logscale = self.forward(h) - else: - loc, logscale = self.forward(h, t) - x = loc + torch.exp(logscale) * torch.randn_like(loc) - x = torch.clamp(x, min=-1.0, max=1.0) - return x, logscale.exp() - - -class HVAE(nn.Module): - def __init__(self, args): - super().__init__() - args.vr = "light" if "ukbb" in args.hps else None # hacky - self.encoder = Encoder(args) - self.decoder = Decoder(args) - if args.x_like.split("_")[1] == "dgauss": - self.likelihood = DGaussNet(args) - else: - NotImplementedError(f"{args.x_like} not implemented.") - self.cond_prior = args.cond_prior - self.free_bits = args.kl_free_bits - - def forward(self, x, parents, beta=1): - acts = self.encoder(x) - h, stats = self.decoder(parents=parents, x=acts) - nll_pp = self.likelihood.nll(h, x) - if self.free_bits > 0: - free_bits = torch.tensor(self.free_bits).type_as(nll_pp) - kl_pp = 0.0 - for stat in stats: - kl_pp += torch.maximum( - free_bits, stat["kl"].sum(dim=(2, 3)).mean(dim=0) - ).sum() - else: - kl_pp = torch.zeros_like(nll_pp) - for i, stat in enumerate(stats): - kl_pp += stat["kl"].sum(dim=(1, 2, 3)) - kl_pp = kl_pp / np.prod(x.shape[1:]) # per pixel - elbo = nll_pp.mean() + beta * kl_pp.mean() # negative elbo (free energy) - return dict(elbo=elbo, nll=nll_pp.mean(), kl=kl_pp.mean()) - - def sample(self, parents, return_loc=True, t=None): - h, _ = self.decoder(parents=parents, t=t) - return self.likelihood.sample(h, return_loc, t=t) - - def abduct(self, x, parents, cf_parents=None, alpha=0.5, t=None): - acts = self.encoder(x) - _, q_stats = self.decoder( - x=acts, parents=parents, abduct=True, t=t - ) # q(z|x,pa) - q_stats = [s["z"] for s in q_stats] - - if self.cond_prior and cf_parents is not None: - _, p_stats = self.decoder(parents=cf_parents, abduct=True, t=t) # p(z|pa*) - p_stats = [s["z"] for s in p_stats] - - cf_zs = [] - t = torch.tensor(t).to(x.device) # z* sampling temperature - - for i in range(len(q_stats)): - # from z_i ~ q(z_i | z_{dict: - const = {} - if gfp: - for f in solu_tag: - F = soluTag[f] - const[f"con_gfp_N_{f}"] = P+T+L1+F+L2+G+H - if not solu_tag: - const[f"con_gfp_N"] = P+T+L2+G+H - else: - for f in solu_tag: - F = soluTag[f] - const[f"con_N_{f}"] = P+T+L1+F+H - if not solu_tag: - const[f"con_N"] = P+T+H # TODO: verify this one - return const - -def run_C(P:str, gfp:bool, solu_tag:list)->dict: - const = {} - if gfp: - for f in solu_tag: - F = soluTag[f] - const[f"con_gfp_C_{f}"] = H+G+L2+F+L1+T+P - if not solu_tag: - const[f"con_gfp_C"] = H+G+L2+T+P - else: - for f in solu_tag: - F = soluTag[f] - const[f"con_C_{f}"] = H+F+L1+T+P - if not solu_tag: - const[f"con_C"] = H+T+P #TODO verify this - return const - -def build(P:str, solu_tag:list, GFP:bool, P_pos:str)->dict: - """ - This function calculate constructs to Protera's LAB considering: - -orientation N to C terminal, or reverse - -With/withou GFP - -Fusion Proteins - - usage = build( amino_sequence, solutags, GFP:True/false, P_pos:"N+C","N","C") - solutags availables: - 'MBP', 'SUMO', 'Fh8', 'GST', 'NusA', 'Thioredoxin', 'FLAG', 'S-tag', 'CBP', 'STREPII', 'BAP', 'NT11', 'Tab2', 'Z-basic', 'ProteinA', 'IMPACT', 'mysB', 'PolyR', 'c-myc', 'S', 'SBP-tag', 'Strep-tag', 'Twin-Strep-tag', 'HAT', 'BCCP', 'HaloTag' - - """ - if "all" in solu_tag: - solu_tag = list(soluTag.keys()) - - d1 = {} - d2 = {} - - if "C" in P_pos: - d2 = run_C(P, GFP, solu_tag) - - if "N" in P_pos: - d1 = run_N(P, GFP, solu_tag) - - return {**d1,**d2} \ No newline at end of file diff --git a/spaces/mrm8488/FlappyBirds/README.md b/spaces/mrm8488/FlappyBirds/README.md deleted file mode 100644 index 2235d4ba84a87f4060d3aa6e1ac15551a5e0f0cb..0000000000000000000000000000000000000000 --- a/spaces/mrm8488/FlappyBirds/README.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: FlappyBirds -emoji: 🐠 -colorFrom: pink -colorTo: purple -sdk: static -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/mrmocciai/rvc-models/config.py b/spaces/mrmocciai/rvc-models/config.py deleted file mode 100644 index c0c16e0017efbcaf250cb539a1d0edb4e83575e4..0000000000000000000000000000000000000000 --- a/spaces/mrmocciai/rvc-models/config.py +++ /dev/null @@ -1,88 +0,0 @@ -########################硬件参数######################## - -# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速 -device = "cuda:0" - -# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速 -is_half = True - -# 默认0用上所有线程,写数字限制CPU资源使用 -n_cpu = 0 - -########################硬件参数######################## - - -##################下为参数处理逻辑,勿动################## - -########################命令行参数######################## -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--port", type=int, default=7865, help="Listen port") -parser.add_argument("--pycmd", type=str, default="python", help="Python command") -parser.add_argument("--colab", action="store_true", help="Launch in colab") -parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" -) -parser.add_argument( - "--noautoopen", action="store_true", help="Do not open in browser automatically" -) -cmd_opts, unknown = parser.parse_known_args() - -python_cmd = cmd_opts.pycmd -listen_port = cmd_opts.port -iscolab = cmd_opts.colab -noparallel = cmd_opts.noparallel -noautoopen = cmd_opts.noautoopen -########################命令行参数######################## - -import sys -import torch - - -# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. -# check `getattr` and try it for compatibility -def has_mps() -> bool: - if sys.platform != "darwin": - return False - else: - if not getattr(torch, "has_mps", False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - -if not torch.cuda.is_available(): - if has_mps(): - print("没有发现支持的N卡, 使用MPS进行推理") - device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - device = "cpu" - is_half = False - -if device not in ["cpu", "mps"]: - gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) - if "16" in gpu_name or "MX" in gpu_name: - print("16系显卡/MX系显卡强制单精度") - is_half = False - -from multiprocessing import cpu_count - -if n_cpu == 0: - n_cpu = cpu_count() -if is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 -else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/composite_encoder.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/composite_encoder.py deleted file mode 100644 index 4e20fe3a833a2d87876cbec294ad2bebfba7f591..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/composite_encoder.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .fairseq_encoder import FairseqEncoder - - -class CompositeEncoder(FairseqEncoder): - """ - A wrapper around a dictionary of :class:`FairseqEncoder` objects. - - We run forward on each encoder and return a dictionary of outputs. The first - encoder's dictionary is used for initialization. - - Args: - encoders (dict): a dictionary of :class:`FairseqEncoder` objects. - """ - - def __init__(self, encoders): - super().__init__(next(iter(encoders.values())).dictionary) - self.encoders = encoders - for key in self.encoders: - self.add_module(key, self.encoders[key]) - - def forward(self, src_tokens, src_lengths): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of shape - `(batch)` - - Returns: - dict: - the outputs from each Encoder - """ - encoder_out = {} - for key in self.encoders: - encoder_out[key] = self.encoders[key](src_tokens, src_lengths) - return encoder_out - - def reorder_encoder_out(self, encoder_out, new_order): - """Reorder encoder output according to new_order.""" - for key in self.encoders: - encoder_out[key] = self.encoders[key].reorder_encoder_out( - encoder_out[key], new_order - ) - return encoder_out - - def max_positions(self): - return min(self.encoders[key].max_positions() for key in self.encoders) - - def upgrade_state_dict(self, state_dict): - for key in self.encoders: - self.encoders[key].upgrade_state_dict(state_dict) - return state_dict diff --git a/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_caption_stage_1_s1_onlylinear.sh b/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_caption_stage_1_s1_onlylinear.sh deleted file mode 100644 index de9e8af7bf130810670e04ef3e9d1a81f4347665..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_caption_stage_1_s1_onlylinear.sh +++ /dev/null @@ -1,199 +0,0 @@ - - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - -num_workers=0 - - -exp_name=unival_caption_stage_1_s1_onlylinear - - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - - -save_dir=${base_log_dir}/ofa/checkpoints/caption/${exp_name} -log_dir=${save_dir} - -mkdir -p $log_dir $save_dir - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - - - -image_dir=${base_data_dir} - - -data_dir=${base_data_dir}/ofa/caption_data -data=${data_dir}/caption_stage1_train_1.tsv,${data_dir}/caption_stage1_train_2.tsv,${data_dir}/caption_stage1_train_3.tsv,${data_dir}/caption_stage1_train_4.tsv,${data_dir}/caption_stage1_train_5.tsv,${data_dir}/caption_stage1_train_6.tsv,${data_dir}/caption_stage1_train_7.tsv,${data_dir}/caption_stage1_train_8.tsv,${data_dir}/caption_stage1_train_9.tsv,${data_dir}/caption_stage1_train_10.tsv,${data_dir}/caption_val.tsv - -eval_cider_cached=${data_dir}/cider_cached_tokens/coco-valid-words.p - - -restore_file=${base_log_dir}/ofa/checkpoints/pretrain/unival_s1/checkpoint15.pt - -selected_cols=0,4,2 - -task=caption -arch=unival_base -pretrained_model= - - -criterion=adjust_label_smoothed_encouraging_loss -label_smoothing=0.1 -lr=5e-4 -max_epoch=25 -warmup_ratio=0.06 -batch_size=16 -update_freq=2 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_tgt_length=20 -num_bins=1000 -# patch_image_size=480 -drop_worst_ratio=0.2 - - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=4 - -save_interval=1 -validate_interval_updates=2000 -save_interval_updates=0 - - -sample_patch_num='--sample-patch-num=784' # '' - -eval_args='--eval-args={"beam":5,"unnormalized":true,"temperature":1.0,"stop_on_max_len":true}' - -drop_worst_ratio=0.05 # modified from 0.2 for el -drop_best_ratio=0.05 -drop_best_after=6000 -log_end=0.75 # for el -# log_end=1. # for el - -for max_epoch in {$max_epoch,}; do - echo "max_epoch "${max_epoch} - for warmup_ratio in {0.06,}; do - echo "warmup_ratio "${warmup_ratio} - for drop_worst_after in {6000,}; do - echo "drop_worst_after "${drop_worst_after} - - log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after}".log" - save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after} - mkdir -p $save_path - - python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - $data \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay --lr=${lr} \ - --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \ - --log-format=simple --log-interval=10 \ - --fixed-validation-seed=7 \ - --no-epoch-checkpoints --keep-best-checkpoints=1 \ - --save-interval=${save_interval} --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --eval-cider \ - --eval-cider-cached-tokens=${eval_cider_cached} \ - --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \ - --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --freeze-encoder-embedding \ - --freeze-decoder-embedding \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --drop-worst-ratio=${drop_worst_ratio} \ - --drop-worst-after=${drop_worst_after} \ - --fp16 \ - --fp16-scale-window=512 \ - --num-workers=0 \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - ${sample_patch_num} \ - ${eval_args} \ - --reset-dataloader --reset-meters --reset-optimizer \ - --freeze-encoder \ - --freeze-decoder \ - --freeze-audio-encoder \ - --freeze-image-encoder \ - --freeze-video-encoder \ - --log-end ${log_end} --drop-best-ratio ${drop_best_ratio} --drop-best-after ${drop_best_after} - done - done -done \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.sh deleted file mode 100644 index 422f945c31298ee4d8195c095a17160f396ba1fc..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5 -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -####SBATCH --nodelist=x1004c4s2b0n0 -#SBATCH --time=24:00:00 -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/caption/ofa_wacaption_vqacapsnli_caption_stage_1_lr1e5.sh - - diff --git a/spaces/naver/PUMP/core/cuda_deepm/func.cpp b/spaces/naver/PUMP/core/cuda_deepm/func.cpp deleted file mode 100644 index 88657091fe1ff8324724907051551306c39716c0..0000000000000000000000000000000000000000 --- a/spaces/naver/PUMP/core/cuda_deepm/func.cpp +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2022-present NAVER Corp. -// CC BY-NC-SA 4.0 -// Available only for non-commercial use - -#include -using namespace torch::indexing; // Slice -#include - -#define MIN(x, y) ((x) < (y) ? (x) : (y)) -#define MAX(x, y) ((x) < (y) ? (y) : (x)) -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -inline Slice sl(bool x) { - if (x) - return Slice(0, -1); - else - return Slice(1, None); -} - -torch::Tensor forward_agg_cuda( int level, float norm, const torch::Tensor lower, - const at::optional weights, torch::Tensor upper ); - -std::vector forward_agg( int level, float norm, const torch::Tensor lower, - const at::optional weights = at::nullopt ) { - TORCH_CHECK(level >= 1, "level must be >= 1"); - TORCH_CHECK(lower.dim() == 4, "input must have 4 dimensions"); - const auto LH1 = lower.size(0); - const auto LW1 = lower.size(1); - const auto LH2 = lower.size(2); - const auto LW2 = lower.size(3); - if (weights) TORCH_CHECK(weights->size(0) == LH1 && weights->size(1) == LW1, "weights should have shape == lower.shape[:2]"); - const auto UH1 = (level == 1) ? LH1+1 : LH1; - const auto UW1 = (level == 1) ? LW1+1 : LW1; - - TORCH_CHECK(lower.is_cuda()) - auto upper = torch::zeros({UH1, UW1, LH2, LW2}, lower.options()); - torch::Tensor new_weights = forward_agg_cuda( level, norm, lower, weights, upper ); - return {upper, new_weights}; -} - - -torch::Tensor forward_pool_agg_cuda( int level, float norm, const torch::Tensor lower, - const at::optional weights, torch::Tensor upper ); - -std::vector forward_pool_agg( int level, float norm, const torch::Tensor lower, - const at::optional weights = at::nullopt ) { - TORCH_CHECK(level >= 1, "level must be >= 1"); - TORCH_CHECK(lower.dim() == 4, "input must have 4 dimensions"); - const auto LH1 = lower.size(0); - const auto LW1 = lower.size(1); - const auto LH2 = lower.size(2); - const auto LW2 = lower.size(3); - if (weights) TORCH_CHECK(weights->size(0) == LH1 && weights->size(1) == LW1, "weights should have shape == lower.shape[:2]"); - const auto UH1 = (level == 1) ? LH1+1 : LH1; - const auto UW1 = (level == 1) ? LW1+1 : LW1; - - TORCH_CHECK(lower.is_cuda()) - auto upper = torch::zeros({UH1, UW1, 1+(LH2-1)/2, 1+(LW2-1)/2}, lower.options()); - torch::Tensor new_weights = forward_pool_agg_cuda( level, norm, lower, weights, upper ); - return {upper, new_weights}; -} - -// forward declaration -void backward_agg_unpool_cuda( int level, const torch::Tensor upper, torch::Tensor lower, bool exclude_borders ); - -void backward_agg_unpool( int level, const torch::Tensor upper, torch::Tensor lower, bool exclude_borders = true ) { - TORCH_CHECK(level >= 1, "level must be >= 1"); - TORCH_CHECK( upper.dim() == 4 && lower.dim() == 4, "inputs should be 4-dimensional" ); - - TORCH_CHECK(upper.is_cuda() && lower.is_cuda()) - backward_agg_unpool_cuda(level, upper, lower, exclude_borders); -} - - -void max_pool3d_cuda( const torch::Tensor tensor, const int kernel_size, const int stride, - torch::Tensor maxima, torch::Tensor indices ); - -std::vector max_pool3d( const torch::Tensor tensor, const int kernel_size, const int stride ) { - TORCH_CHECK(tensor.dim() == 4, "tensor should be 4-dimensional: BxCxHxW"); - TORCH_CHECK( 1 <= kernel_size, "bad kernel size %d", kernel_size ); - TORCH_CHECK( 1 <= stride, "bad stride %d", stride ); - const int IB = tensor.size(0); - const int IH = tensor.size(2); // input height - const int IW = tensor.size(3); // input width - - // output size - const int OH = 1 + (IH - kernel_size) / stride; - const int OW = 1 + (IW - kernel_size) / stride; - - torch::Tensor maxima = torch::empty({IB, OH, OW}, tensor.options()); - torch::Tensor indices = torch::empty({IB, OH, OW}, tensor.options().dtype(torch::kInt64)); - - if (tensor.is_cuda()) - max_pool3d_cuda( tensor, kernel_size, stride, maxima, indices ); - else - TORCH_CHECK(false, "CPU max_pool3d not implemented yet"); - return {maxima, indices}; -} - -static inline float ptdot( const float* m, float x, float y ) { - return x*m[0] + y*m[1] + m[2]; -} - -static inline float pow2(float v) { - return v*v; -} - -void merge_corres_cpu( const torch::Tensor corres, int offset, const torch::Tensor _inv_rot, - float dmax, torch::Tensor all_corres, const int all_step ) { - const int H = corres.size(0); - const int W = corres.size(1); - const float tol = 2*2; // squared - dmax *= dmax; // squared - - TORCH_CHECK( _inv_rot.is_contiguous() ); - const float* inv_rot = _inv_rot.data_ptr(); - - auto corres_a = corres.accessor(); - auto all_corres_a = all_corres.accessor(); - - // for each bin of the final histograms, we get the nearest-neighbour bin in corres0 and corres1 - for (int j=0; j (%g,%g) in ref img", x, y); - - // center of the bin on the rescaled+rotated image - float xr = ptdot( inv_rot + 0, x, y ); - float yr = ptdot( inv_rot + 3, x, y ); - // printf(" -> (%g,%g) in rescaled", xr, yr); - - // iterate on the nearby bins - int xb = (int)(0.5+ xr/4); // rescaled+rotated desc always has step 4 - int yb = (int)(0.5+ yr/4); - // printf(" -> (%d,%d) in bins\n", xb, yb); - - float best = dmax; - for (int v = MAX(0,yb-1); v <= MIN(H,yb+1); v++) - for (int u = MAX(0,xb-1); u <= MIN(W,xb+1); u++) { - // assert( v >= 0 && v < corres_a.size(0) ); - // assert( u >= 0 && u < corres_a.size(1) ); - auto cor = corres_a[v][u]; - float d = pow2(cor[offset]-x) + pow2(cor[offset+1]-y); - if( d < best ) best = d; - } - - for (int v = MAX(0,yb-1); v <= MIN(H,yb+1); v++) - for (int u = MAX(0,xb-1); u <= MIN(W,xb+1); u++) { - // assert( v >= 0 && v < corres_a.size(0) ); - // assert( u >= 0 && u < corres_a.size(1) ); - auto cor = corres_a[v][u]; - float d = pow2(cor[offset]-x) + pow2(cor[offset+1]-y); - if( d <= tol*best ) { // spatially close - // merge correspondence if score is better than actual - // printf("update all_corres[%d,%d]\n", v,u); - if( cor[4] > all_cor[4] ) - for (int k = 0; k < all_corres.size(2); k++) - all_cor[k] = cor[k]; - } - } - } -} - -void merge_corres_cuda( const torch::Tensor corres, int offset, const torch::Tensor inv_rot, - float dmax, torch::Tensor all_corres, const int all_step ); - -void merge_corres( const torch::Tensor corres, int offset, const torch::Tensor rot, - torch::Tensor all_corres, const int all_step ) { - TORCH_CHECK( corres.dim() == 3 && corres.size(2) == 6, "corres.shape should be (H,W,6)" ); - TORCH_CHECK( all_corres.dim() == 3 && all_corres.size(2) == 6, "all_corres.shape should be (H,W,6)" ); - - float dmax = 8 * torch::sqrt(torch::det(rot)).item(); - torch::Tensor inv_rot = torch::inverse(rot).contiguous(); - - if (all_corres.is_cuda()) - merge_corres_cuda( corres, offset, inv_rot, dmax, all_corres, all_step ); - else - merge_corres_cpu( corres, offset, inv_rot, dmax, all_corres, all_step ); -} - - -void mask_correlations_radial_cuda( torch::Tensor corr, const torch::Tensor targets, - const float radius, const float alpha); - -void mask_correlations_radial( torch::Tensor corr, const torch::Tensor targets, - const float radius, const float alpha) { - // radius: protected area in pixels around each target center - // alpha: in [0,1]. If alpha = 0: no effect. If alpha = 1: full effect. - TORCH_CHECK( corr.dim() == 4 ); - TORCH_CHECK( targets.dim() == 3 ); - TORCH_CHECK( targets.size(0) == corr.size(0) && targets.size(1) == corr.size(1) && targets.size(2) == 2, - "correlations and targets should have the same shape[:2]" ); - - if (corr.is_cuda()) - mask_correlations_radial_cuda( corr, targets, radius, alpha ); - else - TORCH_CHECK(false, "TODO"); -} - - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward_agg", &forward_agg, "forward aggregation (CUDA)"); - m.def("forward_pool_agg", &forward_pool_agg, "forward pooling and aggregation (CUDA)"); - m.def("backward_agg_unpool", &backward_agg_unpool, "backward sparse-conv and max-unpooling (C++ & CUDA)"); - m.def("max_pool3d", &max_pool3d, "max_pool3d that can handle big inputs (CUDA)"); - m.def("merge_corres_one_side", &merge_corres, "merge correspondences on CPU or GPU" ); - m.def("mask_correlations_radial", &mask_correlations_radial, "mask correlations radially (CUDA)" ); -} diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Burp Suite Professional 2.0.11 Beta Crack [NEW].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Burp Suite Professional 2.0.11 Beta Crack [NEW].md deleted file mode 100644 index 5cc1a1c83fbe8d8a35eaf5fced466db49a9d6dc6..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Burp Suite Professional 2.0.11 Beta Crack [NEW].md +++ /dev/null @@ -1,133 +0,0 @@ - -

          to

          tags, depending on the level of importance of the heading. For example,

          Burp Suite Professional 2.0.11 Beta Crack: A Comprehensive Guide

          will be the main title of the article, while

          Introduction

          will be a subheading. - To create tables, I will use the tag, along with the ,
          , and tags for table rows, headers, and data cells respectively. For example,
          NameAge
          Jill Smith50
          will create a simple table with two columns and one row. - To create captions for tables, I will use the
          tag inside the tag. For example,
          A simple table
          NameAge
          Jill Smith50
          will add a caption above the table. - To create links, I will use the tag with the href attribute for the URL. For example, Burp Suite Professional will create a link to the official website of Burp Suite Professional.

          Burp Suite Professional 2.0.11 Beta Crack: A Comprehensive Guide

          -

          If you are looking for a powerful and versatile tool for web security testing, you might have heard of Burp Suite Professional. Burp Suite Professional is a software suite that allows you to perform various tasks such as web application scanning, vulnerability assessment, penetration testing, and more. It is designed by PortSwigger, a leading company in web security research and development.

          -

          Burp Suite Professional 2.0.11 Beta Crack


          Download Zip ✏ ✏ ✏ https://urlcod.com/2uIcqh



          -

          Burp Suite Professional has many features and benefits that make it a popular choice among web security professionals and enthusiasts. Some of these features and benefits are:

          -
            -
          • It supports a wide range of web protocols and technologies, such as HTTP, HTTPS, WebSocket, SOAP, REST, GraphQL, etc.
          • -
          • It has a user-friendly and customizable interface that lets you control every aspect of your testing process.
          • -
          • It has a proxy tool that lets you intercept and modify the web traffic between your browser and the target application.
          • -
          • It has a spider tool that lets you crawl and map out the structure and content of the target application.
          • -
          • It has a scanner tool that lets you automatically discover and exploit various web vulnerabilities, such as SQL injection, cross-site scripting, file inclusion, etc.
          • -
          • It has an intruder tool that lets you perform automated attacks with customized payloads and parameters.
          • -
          • It has a repeater tool that lets you manually manipulate and resend individual requests.
          • -
          • It has a sequencer tool that lets you analyze the randomness and entropy of session tokens and other data.
          • -
          • It has a decoder tool that lets you encode and decode data using various methods, such as Base64, URL encoding, hex encoding, etc.
          • -
          • It has a comparer tool that lets you compare the responses from different requests.
          • -
          • It has an extender tool that lets you extend the functionality of Burp Suite Professional with various extensions and plugins.
          • -
          • It has a project file feature that lets you save and resume your testing sessions.
          • -
          • It has a reporting feature that lets you generate comprehensive and professional reports of your testing results.
          • -
          -

          As you can see, Burp Suite Professional is a very powerful and comprehensive tool for web security testing. However, it is not a free tool. You need to purchase a license to use it. The license costs $399 per year for an individual user, or $999 per year for a team of up to five users. This might be too expensive for some people who want to use Burp Suite Professional for personal or educational purposes.

          -

          Fortunately, there is a way to use Burp Suite Professional without paying for a license. You can use a crack to activate Burp Suite Professional 2.0.11 Beta, which is the latest version of the software as of June 2023. A crack is a program or a file that modifies or bypasses the license verification process of another program or software. By using a crack, you can enjoy all the features and benefits of Burp Suite Professional 2.0.11 Beta without spending any money.

          -

          In this article, we will show you how to download and install Burp Suite Professional 2.0.11 Beta, how to activate it with a crack, and how to use it for web security testing. We will also provide some tips and best practices for using Burp Suite Professional 2.0.11 Beta safely and effectively. By following this guide, you will be able to use Burp Suite Professional 2.0.11 Beta like a pro in no time.

          -

          How to download and install Burp Suite Professional 2.0.11 Beta

          -

          The first step to use Burp Suite Professional 2.0.11 Beta is to download and install it on your computer. Here are the steps to do so:

          -
            -
          1. Go to the official website of PortSwigger and click on the "Download" button at the top right corner of the page.
          2. -
          3. Select "Burp Suite Professional" from the drop-down menu and click on "Download latest beta version".
          4. -
          5. You will be redirected to another page where you can see the details of Burp Suite Professional 2.0.11 Beta, such as the release date, the version number, the file size, etc.
          6. -
          7. Click on "Download now" and choose your preferred operating system (Windows, Mac OS X, or Linux).
          8. -click on "Send download link". You will receive an email with a download link shortly. -
          9. Open the email and click on the download link. You will be able to download a zip file containing the installer of Burp Suite Professional 2.0.11 Beta.
          10. -
          11. Extract the zip file to a folder of your choice and run the installer. Follow the instructions on the screen to complete the installation process.
          12. -
          13. Once the installation is finished, you can verify that Burp Suite Professional 2.0.11 Beta is installed correctly by going to the folder where you installed it and looking for the "Burp Suite Professional" icon.
          14. -
          15. Double-click on the icon to launch Burp Suite Professional 2.0.11 Beta. You will see a splash screen with the logo and version number of Burp Suite Professional.
          16. -
          -

          Congratulations, you have successfully downloaded and installed Burp Suite Professional 2.0.11 Beta on your computer. However, you are not ready to use it yet. You need to activate it with a crack first.

          -

          -

          How to activate Burp Suite Professional 2.0.11 Beta with a crack

          -

          As mentioned earlier, Burp Suite Professional 2.0.11 Beta is not a free tool. You need to purchase a license to use it. However, if you don't want to pay for a license, you can use a crack to activate it. A crack is a program or a file that modifies or bypasses the license verification process of another program or software.

          -

          By using a crack, you can enjoy all the features and benefits of Burp Suite Professional 2.0.11 Beta without spending any money. However, using a crack also comes with some risks and problems, such as malware infection, legal issues, ethical concerns, etc. Therefore, you need to be careful and responsible when using a crack.

          -

          Here are the steps to activate Burp Suite Professional 2.0.11 Beta with a crack:

          -
            -
          1. Go to a reputable website that provides cracks for various software and programs. Some examples of such websites are CrackzSoft, CrackedPCSoftware, Crack4Windows, etc.
          2. -
          3. Search for "Burp Suite Professional 2.0.11 Beta Crack" or something similar on the website.
          4. -
          5. You will see a list of results with different cracks for Burp Suite Professional 2.0.11 Beta. Choose one that has positive feedback and ratings from other users.
          6. -
          7. Click on the result and read the description and instructions carefully. Make sure that the crack is compatible with your operating system and version of Burp Suite Professional.
          8. -
          9. Click on the download button or link and download the crack file to your computer.
          10. -
          11. Scan the crack file with your antivirus software before opening it. If your antivirus software detects any malware or virus in the crack file, delete it immediately and look for another crack.
          12. -
          13. If your antivirus software does not detect any malware or virus in the crack file, open it and follow the instructions on how to use it.
          14. -
          15. Usually, you will need to copy and paste the crack file into the folder where you installed Burp Suite Professional 2.0.11 Beta and replace the original file.
          16. -
          17. Alternatively, you might need to run the crack file as an administrator and enter some information such as your name, email address, etc.
          18. -
          19. After using the crack file, launch Burp Suite Professional 2.0.11 Beta again and check if it is activated.
          20. -
          21. If it is activated, you will see a message saying "License status: Licensed" or something similar on the splash screen or on the top right corner of the interface.
          22. -
          -

          Congratulations, you have successfully activated Burp Suite Professional 2.0.11 Beta with a crack. You can now use it for web security testing without any limitations or restrictions.

          -

          How to use Burp Suite Professional 2.0.11 Beta for web security testing

          -

          How to use Burp Suite Professional 2.0.11 Beta for web security testing

          -

          Now that you have downloaded, installed, and activated Burp Suite Professional 2.0.11 Beta with a crack, you are ready to use it for web security testing. Burp Suite Professional 2.0.11 Beta has many tools and features that can help you perform various tasks such as web application scanning, vulnerability assessment, penetration testing, and more.

          -

          In this section, we will give you a brief overview of how to use some of the most common and useful tools and features of Burp Suite Professional 2.0.11 Beta for web security testing. We will also provide some screenshots and examples to illustrate how they work.

          -

          However, please note that this is not a comprehensive or detailed tutorial on how to use Burp Suite Professional 2.0.11 Beta for web security testing. There are many other tools and features that we will not cover in this article, and there are many other ways to use the tools and features that we will cover in this article. Therefore, we recommend that you explore and experiment with Burp Suite Professional 2.0.11 Beta on your own, and consult the official documentation and online resources for more information and guidance.

          -

          How to configure the proxy settings and intercept the web traffic

          -

          The first thing you need to do before using Burp Suite Professional 2.0.11 Beta for web security testing is to configure the proxy settings and intercept the web traffic between your browser and the target application.

          -

          A proxy is a program or a device that acts as an intermediary between two parties that communicate over a network, such as your browser and the target application. By using a proxy, you can monitor, modify, and manipulate the network traffic that passes through it.

          -

          Burp Suite Professional 2.0.11 Beta has a proxy tool that lets you intercept and modify the web traffic between your browser and the target application. By using the proxy tool, you can see and analyze every request and response that is sent and received by your browser and the target application. You can also modify or drop any request or response before it reaches its destination.

          -

          To use the proxy tool, you need to configure your browser to use Burp Suite Professional 2.0.11 Beta as its proxy server. You also need to install Burp's CA certificate on your browser to avoid SSL errors when accessing HTTPS websites.

          -

          Here are the steps to configure the proxy settings and intercept the web traffic:

          -
            -
          1. Launch Burp Suite Professional 2.0.11 Beta and go to the "Proxy" tab.
          2. -
          3. Go to the "Options" sub-tab and make sure that the "Running" checkbox is checked for the default proxy listener on port 8080.
          4. -
          5. Go to your browser's settings and find the option to configure the proxy settings.
          6. -
          7. Set your browser to use a manual proxy configuration with the following details: Hostname: localhost or 127.0.0.1, Port: 8080.
          8. -
          9. Save your changes and restart your browser.
          10. -
          11. Go back to Burp Suite Professional 2.0.11 Beta and go to the "HTTP history" sub-tab under the "Proxy" tab.
          12. -
          13. You should see a list of requests and responses that are sent and received by your browser as you browse the web.
          14. -
          15. You can click on any request or response to see more details about it in the panels below.
          16. -
          17. You can also right-click on any request or response to see more options such as sending it to other tools, modifying it, dropping it, etc.
          18. -
          -

          To intercept the web traffic, you need to enable the interception mode on Burp Suite Professional 2.0.11 Beta:

          -
            -
          1. Go to the "Intercept" sub-tab under the "Proxy" tab.
          2. -
          3. Click on the "Intercept is off" button to turn it on.
          4. -
          5. You should see a message saying "Intercept is on" on the button.
          6. -
          7. Now, whenever you browse the web, Burp Suite Professional 2.0.11 Beta will intercept every request or response before it reaches its destination.
          8. -
          9. You will see the intercepted request or response in the panel below.
          10. -
          11. You can modify or drop it as you wish by using the buttons or menus above.
          12. -
          13. You can also forward it without any changes by clicking on the "Forward" button.
          14. -
          - 's CA certificate on your browser, you need to follow these steps:

          -
            -
          1. Go to http://burp/ on your browser.
          2. -
          3. You should see a page with a link to download Burp's CA certificate.
          4. -
          5. Click on the link and save the certificate file to your computer.
          6. -
          7. Go to your browser's settings and find the option to manage certificates or trust authorities.
          8. -
          9. Import the certificate file that you downloaded and trust it as a root CA.
          10. -
          11. Save your changes and restart your browser.
          12. -
          -

          Now, you should be able to access HTTPS websites without any SSL errors or warnings.

          -

          How to use the spider, scanner, intruder, repeater, sequencer, and other tools

          -

          Burp Suite Professional 2.0.11 Beta has many other tools and features that can help you perform various tasks for web security testing. Some of the most common and useful tools are:

          -
            -
          • The spider tool: This tool lets you crawl and map out the structure and content of the target application. It can discover hidden pages, parameters, forms, links, etc. that might not be visible or accessible from the browser. You can use the spider tool to get a comprehensive view of the target application and identify potential attack surfaces.
          • -
          • The scanner tool: This tool lets you automatically discover and exploit various web vulnerabilities, such as SQL injection, cross-site scripting, file inclusion, etc. It can scan both individual requests and entire websites. It can also perform active and passive scanning, depending on your preferences and needs. You can use the scanner tool to find and verify vulnerabilities in the target application and generate reports of your findings.
          • -
          • The intruder tool: This tool lets you perform automated attacks with customized payloads and parameters. It can send multiple requests with different values for selected parts of the request, such as headers, cookies, parameters, etc. It can also analyze the responses and compare them based on various criteria, such as length, status code, content, etc. You can use the intruder tool to test for various vulnerabilities and logic flaws in the target application and find interesting or unexpected results.
          • -
          • The repeater tool: This tool lets you manually manipulate and resend individual requests. It can also show you the responses in different formats, such as raw, hex, HTML, etc. You can use the repeater tool to test or debug specific requests or responses and modify them as you wish.
          • -
          • The sequencer tool: This tool lets you analyze the randomness and entropy of session tokens and other data. It can collect samples of data from various sources, such as requests, responses, cookies, etc. It can also perform various statistical tests and calculations on the data and show you the results in graphical or numerical forms. You can use the sequencer tool to test the quality and security of session tokens and other data that are supposed to be random or unpredictable.
          • -
          -

          To use these tools, you need to send requests or responses from the proxy tool or from your browser to them. You can do this by right-clicking on any request or response and selecting "Send to [tool name]" from the menu. You can also use keyboard shortcuts or drag-and-drop methods to do this.

          -

          Once you send a request or response to a tool, you can go to that tool's tab and see it in the panel below. You can then configure the settings and options for that tool and start using it as you wish.

          -

          For more details and instructions on how to use these tools and features, please refer to the official documentation and online resources of Burp Suite Professional 2.0.11 Beta.

          -

          Conclusion

          -

          In this article, we have shown you how to download and install Burp Suite Professional 2.0.11 Beta, how to activate it with a crack, and how to use it for web security testing. We have also provided some tips and best practices for using Burp Suite Professional 2.0.11 Beta safely and effectively.

          -

          Burp Suite Professional 2.0.11 Beta is a powerful and versatile tool for web security testing that has many features and benefits that make it a popular choice among web security professionals and enthusiasts. However, it is not a free tool that anyone can use without paying for a license.

          -

          If you want to use Burp Suite Professional 2.0.11 Beta without paying for a license, you can use a crack to activate it. However, using a crack also comes with some risks and problems, such as malware infection, legal issues, ethical concerns, etc. Therefore, you need to be careful and responsible when using a crack.

          -

          Alternatively, you can use the free version of Burp Suite, which is called Burp Suite Community Edition. Burp Suite Community Edition has some of the features and benefits of Burp Suite Professional, but with some limitations and restrictions. For example, it does not have the scanner tool, the extender tool, the project file feature, the reporting feature, etc. You can download Burp Suite Community Edition from the same website as Burp Suite Professional.

          -

          Whether you use Burp Suite Professional 2.0.11 Beta with a crack or Burp Suite Community Edition, you can use it for web security testing and learn a lot from it. However, if you want to use Burp Suite Professional 2.0.11 Beta legally and ethically, and support the developers and the web security community, we recommend that you purchase a license from PortSwigger and enjoy all the features and benefits of Burp Suite Professional without any risks or problems.

          -

          We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. Thank you for reading and happy web security testing!

          -

          FAQs

          -

          Here are some frequently asked questions about Burp Suite Professional 2.0.11 Beta and its crack:

          -
            -
          1. Q: Is Burp Suite Professional 2.0.11 Beta safe to use?
            -A: Burp Suite Professional 2.0.11 Beta is safe to use if you download it from the official website of PortSwigger and activate it with a license. However, if you use a crack to activate it, you might expose yourself to some risks and problems, such as malware infection, legal issues, ethical concerns, etc. Therefore, you need to be careful and responsible when using a crack.
          2. -
          3. Q: Is Burp Suite Professional 2.0.11 Beta legal to use?
            -A: Burp Suite Professional 2.0.11 Beta is legal to use if you purchase a license from PortSwigger and follow their terms and conditions. However, if you use a crack to activate it, you might violate their intellectual property rights and break their license agreement. Therefore, you need to be aware of the potential legal consequences when using a crack.
          4. -
          5. Q: Is Burp Suite Professional 2.0.11 Beta ethical to use?
            -A: Burp Suite Professional 2.0.11 Beta is ethical to use if you use it for legitimate and authorized purposes, such as web security testing, vulnerability assessment, penetration testing, etc. However, if you use it for malicious or unauthorized purposes, such as hacking, cracking, phishing, etc., you might harm others or yourself in the process. Therefore, you need to respect the privacy and security of others and yourself when using Burp Suite Professional 2.0.11 Beta.
          6. -
          7. Q: What are the differences between Burp Suite Professional 2.0.11 Beta and Burp Suite Community Edition?
            -A: Burp Suite Professional 2.0.11 Beta and Burp Suite Community Edition are two versions of Burp Suite that have different features and benefits. Burp Suite Professional 2.0.11 Beta is the paid version that has all the features and benefits of Burp Suite, such as the scanner tool, the extender tool, the project file feature, the reporting feature, etc. Burp Suite Community Edition is the free version that has some of the features and benefits of Burp Suite Professional 2.0.11 Beta but with some limitations and restrictions, such as no scanner tool, no extender tool, no project file feature, no reporting feature, etc.
          8. -
          9. Q: Where can I find more information and guidance on how to use Burp Suite Professional 2.0.11 Beta?
            -A: You can find more information and guidance on how to use Burp Suite Professional 2.0.11 Beta from the official documentation and online resources of PortSwigger. You can also find tutorials, videos, blogs, forums, books, courses, etc. from various web security experts and communities on the internet.
          10. -

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Imagenomic Portraiture Presets Free [Extra Quality] Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Imagenomic Portraiture Presets Free [Extra Quality] Download.md deleted file mode 100644 index 1aa328d5da934cd1cd3ad89f571f2adec1ea68f7..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Imagenomic Portraiture Presets Free [Extra Quality] Download.md +++ /dev/null @@ -1,142 +0,0 @@ -
          -

          Imagenomic Portraiture Presets Free Download: A Complete Guide

          -

          If you are a portrait photographer or retoucher, you know how important it is to have a smooth and flawless skin in your images. However, achieving that result can be time-consuming and tedious, especially if you have to deal with hundreds or thousands of photos. That's why you need a tool that can help you automate and simplify your skin retouching process.

          -

          One of the best tools for that purpose is Imagenomic Portraiture, a plugin for Adobe Photoshop and Lightroom that allows you to apply professional skin smoothing, healing, and enhancing effects with just a few clicks. But what if you want to have more control over the look and feel of your portraits? What if you want to create different styles and moods for different occasions?

          -

          imagenomic portraiture presets free download


          DOWNLOAD ✸✸✸ https://urlcod.com/2uI9v8



          -

          That's where Imagenomic Portraiture presets come in handy. Presets are predefined settings that you can apply to your images with one click, saving you time and effort. They can also help you achieve consistent results across different images, or experiment with new creative possibilities.

          -

          In this article, we will show you everything you need to know about Imagenomic Portraiture presets, including:

          -
            -
          • What is Imagenomic Portraiture and how to install and use it?
          • -
          • What are Imagenomic Portraiture presets and how to access, create, import, and export them?
          • -
          • Where to find Imagenomic Portraiture presets free download and how to use them?
          • -
          -

          By the end of this article, you will be able to enhance your portraits with ease and style using Imagenomic Portraiture presets. So, let's get started!

          -

          What is Imagenomic Portraiture?

          -

          Imagenomic Portraiture is a plugin for Adobe Photoshop and Lightroom that allows you to apply professional skin smoothing, healing, and enhancing effects to your portraits. It works by intelligently detecting and isolating the skin tones and textures in your images, and applying the appropriate adjustments without affecting the other details. You can also fine-tune the results using various sliders and options, such as sharpness, warmth, contrast, tint, and more.

          -

          Some of the features of Imagenomic Portraiture are:

          -
            -
          • It supports both 8-bit and 16-bit images, as well as RGB, CMYK, LAB, and Grayscale modes.
          • -
          • It has a user-friendly interface that shows you a preview of the before and after images, as well as a histogram and a navigator.
          • -
          • It has a batch processing feature that allows you to apply the same settings to multiple images at once.
          • -
          • It has a history panel that allows you to undo or redo any changes you make.
          • -
          • It has a mask panel that allows you to refine the selection of the skin areas using brushes, gradients, or color ranges.
          • -
          • It has a preset panel that allows you to save and load your favorite settings for different scenarios.
          • -
          -

          How to install Imagenomic Portraiture?

          -

          To install Imagenomic Portraiture, you need to follow these steps:

          -
            -
          1. Download the plugin from the official website of Imagenomic: https://imagenomic.com/Products/Portraiture. You can choose between the Photoshop version or the Lightroom version, depending on which software you use. You can also download a free trial version before buying the full version.
          2. -
          3. Run the installer file and follow the instructions on the screen. You will need to enter your license key if you have purchased the full version.
          4. -
          5. Restart your Photoshop or Lightroom application for the plugin to take effect.
          6. -
          7. To access the plugin, go to Filter > Imagenomic > Portraiture in Photoshop, or Photo > Edit In > Portraiture in Lightroom.
          8. -
          -

          How to use Imagenomic Portraiture?

          -

          To use Imagenomic Portraiture, you need to follow these steps:

          -
            -
          1. Open your portrait image in Photoshop or Lightroom.
          2. -
          3. Launch the plugin from the menu as mentioned above.
          4. -
          5. The plugin interface will open in a new window. You will see a preview of your image with the default settings applied. You can zoom in or out using the buttons on the top left corner, or drag the image around using your mouse.
          6. -
          7. You can adjust the settings using the sliders on the right panel. There are three main sections: Detail Smoothing, Skin Mask, and Enhancements. Each section has its own sub-sections with more options. You can also click on the question mark icon on each section to get more information about what each slider does.
          8. -
          9. You can compare the before and after images by clicking on the eye icon on the top right corner. You can also switch between different views using the buttons on the bottom right corner: Original Image, Enhanced Image, Split View (Horizontal or Vertical), or Side by Side View.
          10. -
          11. You can save your settings as a preset by clicking on the plus icon on the bottom left corner. You can name your preset and assign it to a category. You can also load an existing preset by clicking on the drop-down menu on the bottom left corner.
          12. -
          13. Once you are happy with the results, click on OK to apply them to your image. The plugin will close and return you to your Photoshop or Lightroom application.
          14. -
          -

          How to use Portraiture actions?

          -

          If you want to automate your workflow with Imagenomic Portraiture, you can use the actions provided by Imagenomic. Actions are pre-recorded steps that you can apply to your images with one click. To use Portraiture actions, you need to follow these steps:

          -

          -
            -
          1. Download the actions from the official website of Imagenomic: https://imagenomic.com/Products/Portraiture/Actions. You can choose between different categories of actions, such as Basic, Advanced, or Creative.
          2. -
          3. Install the actions in your Photoshop application. To do that, go to Window > Actions in Photoshop, and click on the menu icon on the top right corner of the Actions panel. Then, select Load Actions and browse to the location where you saved the downloaded actions. Click on Open to load them into your Actions panel.
          4. -
          5. Select the action you want to use from the Actions panel. You can preview the effect of the action by clicking on the Play button on the bottom of the panel. You can also adjust the settings of the action by double-clicking on it and modifying the parameters.
          6. -
          7. Apply the action to your image by clicking on the Play button again. The action will run and apply the Portraiture plugin with the preset settings to your image.
          8. -
          -

          How to use Portraiture droplets?

          -

          If you want to integrate Imagenomic Portraiture with Lightroom, you can use the droplets provided by Imagenomic. Droplets are executable files that allow you to launch the plugin from Lightroom and apply a preset to your image. To use Portraiture droplets, you need to follow these steps:

          -
            -
          1. Download the droplets from the official website of Imagenomic: https://imagenomic.com/Products/Portraiture/Droplets. You can choose between different categories of droplets, such as Basic, Advanced, or Creative.
          2. -
          3. Install the droplets in your Lightroom application. To do that, go to Edit > Preferences in Lightroom, and click on the External Editing tab. Then, click on Choose under Additional External Editor and browse to the location where you saved the downloaded droplets. Click on Open to load them into your External Editing panel.
          4. -
          5. Select the droplet you want to use from the External Editing panel. You can also adjust the settings of the droplet, such as file format, color space, bit depth, resolution, and compression.
          6. -
          7. Apply the droplet to your image by right-clicking on it in Lightroom and selecting Edit In > Portraiture Droplet. The droplet will run and apply the Portraiture plugin with the preset settings to your image.
          8. -
          -

          What are Imagenomic Portraiture presets?

          -

          Imagenomic Portraiture presets are predefined settings that you can apply to your images with one click using the Portraiture plugin. Presets can help you save time and effort by automating your skin retouching process. They can also help you achieve consistent results across different images, or experiment with new creative possibilities.

          -

          Presets are composed of three main elements: Detail Smoothing, Skin Mask, and Enhancements. Each element has its own sub-elements with more options. For example, Detail Smoothing has Smoothing, Fine Detail Control, Medium Detail Control, and Large Detail Control. Skin Mask has Auto-Mask Generation, Manual Mask Refinement, and Mask Preview. Enhancements has Sharpness, Warmth, Contrast, Tint, Brightness, and Saturation.

          -

          You can use presets in two ways: you can either use one of the default presets provided by Imagenomic, or you can create your own custom presets based on your preferences and needs.

          -

          How to access Imagenomic Portraiture presets?

          -

          To access Imagenomic Portraiture presets, you need to follow these steps:

          -
            -
          1. Launch the Portraiture plugin from Photoshop or Lightroom as mentioned above.
          2. -
          3. The plugin interface will open in a new window. You will see a preset panel on the bottom left corner of the window. You can click on it to expand or collapse it.
          4. -
          5. You will see a list of categories of presets, such as Default Settings, Portrait Retouching - Basic Settings, Portrait Retouching - Advanced Settings, Portrait Retouching - Creative Settings, etc. You can click on each category to see its sub-categories and presets.
          6. -
          7. You can select a preset by clicking on its name. You will see a preview of its effect on your image in the window. You can also hover your mouse over the preset name to see a tooltip with a brief description of the preset.
          8. -
          9. You can adjust the settings of the preset using the sliders on the right panel. You can also save your changes as a new preset by clicking on the plus icon on the bottom left corner.
          10. -
          -

          How to create your own Imagenomic Portraiture presets?

          -

          If you want to create your own Imagenomic Portraiture presets, you need to follow these steps:

          -
            -
          1. Launch the Portraiture plugin from Photoshop or Lightroom as mentioned above.
          2. -
          3. The plugin interface will open in a new window. You can start from scratch by selecting the Default Settings preset, or you can modify an existing preset by selecting it from the list.
          4. -
          5. You can customize the settings using the sliders on the right panel. You can change the values of each element and sub-element according to your preferences and needs.
          6. -
          7. You can preview the effect of your settings on your image in the window. You can also compare the before and after images by clicking on the eye icon on the top right corner.
          8. -
          9. Once you are satisfied with your settings, you can save them as a new preset by clicking on the plus icon on the bottom left corner. You can name your preset and assign it to a category. You can also overwrite an existing preset by clicking on the pencil icon next to its name.
          10. -
          -

          How to import and export Imagenomic Portraiture presets?

          -

          If you want to import and export Imagenomic Portraiture presets, you need to follow these steps:

          -
            -
          1. Launch the Portraiture plugin from Photoshop or Lightroom as mentioned above.
          2. -
          3. The plugin interface will open in a new window. You will see a menu icon on the top left corner of the window. You can click on it to access more options.
          4. -
          5. To import presets, select Import Presets from the menu. A dialog box will open where you can browse to the location where you saved the presets file. The file should have a .portraiture extension. Click on Open to load the presets into your plugin.
          6. -
          7. To export presets, select Export Presets from the menu. A dialog box will open where you can choose a location and a name for your presets file. The file will have a .portraiture extension. Click on Save to export your presets from your plugin.
          8. -
          -

          Where to find Imagenomic Portraiture presets free download?

          -

          If you are looking for Imagenomic Portraiture presets free download, you have several options. You can download free presets from different sources, such as:

          -

          Imagenomic website

          -

          The official website of Imagenomic offers some free presets for Portraiture that you can download and use. To do that, go to https://imagenomic.com/Products/Portraiture/Presets. You will see a list of categories of presets, such as Portrait Retouching - Basic Settings, Portrait Retouching - Advanced Settings, Portrait Retouching - Creative Settings, etc. You can click on each category to see its sub-categories and presets.

          -

          You can download a preset by clicking on its name. A dialog box will open where you can save the preset file to your computer. The file will have a .portraiture extension. You can then import it into your plugin as mentioned above.

          -

          Online forums and communities

          -

          Another source of free presets for Portraiture is online forums and communities where users share their own creations and experiences with the plugin. Some examples of such forums and communities are:

          - -

          You can browse these forums and communities to find free presets posted by other users. You can also ask questions, request feedback, or share your own presets with others.

          -

          Blogs and websites

          -

          A Another source of free presets for Portraiture is blogs and websites that offer tips, tutorials, reviews, and resources for portrait photography and retouching. Some examples of such blogs and websites are:

          - -

          You can visit these blogs and websites to find free presets for Portraiture, as well as learn more about the plugin and its features. You can also subscribe to their newsletters or follow their social media accounts to get updates on new posts and products.

          -

          Conclusion

          -

          Imagenomic Portraiture is a powerful and versatile plugin for Adobe Photoshop and Lightroom that allows you to apply professional skin smoothing, healing, and enhancing effects to your portraits. It can help you save time and effort by automating and simplifying your skin retouching process. It can also help you achieve consistent results across different images, or experiment with new creative possibilities.

          -

          One of the best ways to use Imagenomic Portraiture is to use presets, which are predefined settings that you can apply to your images with one click. Presets can help you customize the look and feel of your portraits according to your preferences and needs. You can either use one of the default presets provided by Imagenomic, or create your own custom presets based on your settings.

          -

          If you are looking for Imagenomic Portraiture presets free download, you have several options. You can download free presets from the official website of Imagenomic, from online forums and communities where users share their own creations and experiences, or from blogs and websites that offer tips, tutorials, reviews, and resources for portrait photography and retouching.

          -

          We hope this article has helped you learn more about Imagenomic Portraiture presets and how to use them. If you have any questions or comments, feel free to leave them below. And if you liked this article, please share it with your friends and colleagues who might find it useful. Thank you for reading!

          -

          FAQs

          -

          Here are some frequently asked questions and answers about Imagenomic Portraiture presets:

          -
            -
          1. Q: How much does Imagenomic Portraiture cost?
          2. -
          3. A: Imagenomic Portraiture costs $199.95 for the Photoshop version or $149.95 for the Lightroom version. You can also buy both versions as a bundle for $299.95. You can also download a free trial version before buying the full version.
          4. -
          5. Q: How do I update Imagenomic Portraiture?
          6. -
          7. A: To update Imagenomic Portraiture, you need to download the latest version from the official website of Imagenomic: https://imagenomic.com/Products/Portraiture. You can also check for updates from within the plugin by clicking on the menu icon on the top left corner of the window and selecting Check for Updates.
          8. -
          9. Q: How do I uninstall Imagenomic Portraiture?
          10. -
          11. A: To uninstall Imagenomic Portraiture, you need to run the uninstaller file that came with the plugin. You can find it in the folder where you installed the plugin, or in the Start menu under Imagenomic > Portraiture. You can also remove the plugin manually by deleting its files from your Photoshop or Lightroom folders.
          12. -
          13. Q: How do I contact Imagenomic support?
          14. -
          15. A: To contact Imagenomic support, you can use one of the following methods:
          16. - -
          17. Q: How do I get more Imagenomic Portraiture presets?
          18. -
          19. A: To get more Imagenomic Portraiture presets, you can use one of the following methods:

            -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/neural-ti/NeTI/models/neti_mapper.py b/spaces/neural-ti/NeTI/models/neti_mapper.py deleted file mode 100644 index 6b5fc51510845ef92fc2fb2c46b67c6b24394bd9..0000000000000000000000000000000000000000 --- a/spaces/neural-ti/NeTI/models/neti_mapper.py +++ /dev/null @@ -1,90 +0,0 @@ -import random -from typing import Optional, List - -import torch -import torch.nn.functional as F -from torch import nn - -from constants import UNET_LAYERS -from models.positional_encoding import NeTIPositionalEncoding, BasicEncoder -from utils.types import PESigmas - - -class NeTIMapper(nn.Module): - """ Main logic of our NeTI mapper. """ - - def __init__(self, output_dim: int = 768, - unet_layers: List[str] = UNET_LAYERS, - use_nested_dropout: bool = True, - nested_dropout_prob: float = 0.5, - norm_scale: Optional[torch.Tensor] = None, - use_positional_encoding: bool = True, - num_pe_time_anchors: int = 10, - pe_sigmas: PESigmas = PESigmas(sigma_t=0.03, sigma_l=2.0), - output_bypass: bool = True): - super().__init__() - self.use_nested_dropout = use_nested_dropout - self.nested_dropout_prob = nested_dropout_prob - self.norm_scale = norm_scale - self.output_bypass = output_bypass - if self.output_bypass: - output_dim *= 2 # Output two vectors - - self.use_positional_encoding = use_positional_encoding - if self.use_positional_encoding: - self.encoder = NeTIPositionalEncoding(sigma_t=pe_sigmas.sigma_t, sigma_l=pe_sigmas.sigma_l).cuda() - self.input_dim = num_pe_time_anchors * len(unet_layers) - else: - self.encoder = BasicEncoder().cuda() - self.input_dim = 2 - - self.set_net(num_unet_layers=len(unet_layers), - num_time_anchors=num_pe_time_anchors, - output_dim=output_dim) - - def set_net(self, num_unet_layers: int, num_time_anchors: int, output_dim: int = 768): - self.input_layer = self.set_input_layer(num_unet_layers, num_time_anchors) - self.net = nn.Sequential(self.input_layer, - nn.Linear(self.input_dim, 128), nn.LayerNorm(128), nn.LeakyReLU(), - nn.Linear(128, 128), nn.LayerNorm(128), nn.LeakyReLU()) - self.output_layer = nn.Sequential(nn.Linear(128, output_dim)) - - def set_input_layer(self, num_unet_layers: int, num_time_anchors: int) -> nn.Module: - if self.use_positional_encoding: - input_layer = nn.Linear(self.encoder.num_w * 2, self.input_dim) - input_layer.weight.data = self.encoder.init_layer(num_time_anchors, num_unet_layers) - else: - input_layer = nn.Identity() - return input_layer - - def forward(self, timestep: torch.Tensor, unet_layer: torch.Tensor, truncation_idx: int = None) -> torch.Tensor: - embedding = self.extract_hidden_representation(timestep, unet_layer) - if self.use_nested_dropout: - embedding = self.apply_nested_dropout(embedding, truncation_idx=truncation_idx) - embedding = self.get_output(embedding) - return embedding - - def get_encoded_input(self, timestep: torch.Tensor, unet_layer: torch.Tensor) -> torch.Tensor: - return self.encoder.encode(timestep, unet_layer) - - def extract_hidden_representation(self, timestep: torch.Tensor, unet_layer: torch.Tensor) -> torch.Tensor: - encoded_input = self.get_encoded_input(timestep, unet_layer) - embedding = self.net(encoded_input) - return embedding - - def apply_nested_dropout(self, embedding: torch.Tensor, truncation_idx: int = None) -> torch.Tensor: - if self.training: - if random.random() < self.nested_dropout_prob: - dropout_idxs = torch.randint(low=0, high=embedding.shape[1], size=(embedding.shape[0],)) - for idx in torch.arange(embedding.shape[0]): - embedding[idx][dropout_idxs[idx]:] = 0 - if not self.training and truncation_idx is not None: - for idx in torch.arange(embedding.shape[0]): - embedding[idx][truncation_idx:] = 0 - return embedding - - def get_output(self, embedding: torch.Tensor) -> torch.Tensor: - embedding = self.output_layer(embedding) - if self.norm_scale is not None: - embedding = F.normalize(embedding, dim=-1) * self.norm_scale - return embedding diff --git a/spaces/nicole-ocampo/digimap-mp/test.py b/spaces/nicole-ocampo/digimap-mp/test.py deleted file mode 100644 index dd2085c0a54b9d77356788ee661906d3015d3aeb..0000000000000000000000000000000000000000 --- a/spaces/nicole-ocampo/digimap-mp/test.py +++ /dev/null @@ -1,312 +0,0 @@ -import os - -import torch - -from tqdm import tqdm - -from PIL import Image -from pathlib import Path - -from net import Net -from datasets import TestDataset -from util import res_lab2rgb -import shutil - -# Traditional Style Transfer -def StyleTransfer(args): - # Device and output dir - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - out_dir = os.path.join(args.out_root, args.name) - Path(out_dir).mkdir(exist_ok=True, parents=True) - ref_dir = os.path.join(out_dir, "ref") - Path(ref_dir).mkdir(exist_ok=True, parents=True) - - # Prepare network - - network = Net(args) - network.load_state_dict(torch.load(args.network)) - network.eval() - network.to(device) - - # Prepare datasets - - content_dataset = TestDataset(args.content_dir) - texture_dataset = TestDataset(args.texture_dir) - LCT = len(content_dataset) - LT = len(texture_dataset) - - # Save ref img - for i in range(LCT): - path = content_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "content_{}.jpg".format(i))) - for i in range(LT): - path = texture_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "texture_{}.jpg".format(i))) - - # Start Test - N = LCT * LT - print("LCT = {}, LT = {}, total output num = {}".format(LCT, LT, N)) - with tqdm(total=N) as t: - with torch.no_grad(): - for i in range(LCT): - for j in range(LT): - # S1: Prepare data and forward - - content_l, content_ab = [x.to(device).unsqueeze(0) for x in content_dataset.__getitem__(i)] - texture_l, texture_ab = [x.to(device).unsqueeze(0) for x in texture_dataset.__getitem__(j)] - l_pred, ab_pred = network(content_l, content_ab, texture_l, texture_ab) - - # S2: Save - rgb_img = res_lab2rgb(l_pred.squeeze(0), ab_pred.squeeze(0)) - - img = Image.fromarray(rgb_img) - name = 'ct{}_t{}_result.png'.format(i, j) - img.save(os.path.join(out_dir, name)) - - t.update(1) - return None - -# Transfer Texture only -def TextureOnly(args): - # Device and output dir - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - out_dir = os.path.join(args.out_root, args.name) - Path(out_dir).mkdir(exist_ok=True, parents=True) - ref_dir = os.path.join(out_dir, "ref") - Path(ref_dir).mkdir(exist_ok=True, parents=True) - - # Prepare network - - network = Net(args) - network.load_state_dict(torch.load(args.network)) - network.eval() - network.to(device) - - # Prepare datasets - - content_dataset = TestDataset(args.content_dir, T_only=True) - texture_dataset = TestDataset(args.texture_dir, gray_only=True) - LCT = len(content_dataset) - LT = len(texture_dataset) - - # Save ref img - for i in range(LCT): - path = content_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "content_{}.jpg".format(i))) - for i in range(LT): - path = texture_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "texture_{}.jpg".format(i))) - - # Start Test - N = LCT * LT - print("LCT = {}, LT = {}, total output num = {}".format(LCT, LT, N)) - with tqdm(total=N) as t: - with torch.no_grad(): - for i in range(LCT): - for j in range(LT): - # S1: Prepare data and forward - - content_l, content_ab = [x.to(device).unsqueeze(0) for x in content_dataset.__getitem__(i)] - texture_l = texture_dataset.__getitem__(j).to(device).unsqueeze(0) - l_pred = network.run_L_path(content_l, texture_l) - - # S2: Save - rgb_img = res_lab2rgb(l_pred.squeeze(0), content_ab.squeeze(0), T_only=True) - - img = Image.fromarray(rgb_img) - name = 'ct{}_t{}_result.png'.format(i, j) - img.save(os.path.join(out_dir, name)) - - t.update(1) - return None - -# Transfer Color only -def ColorOnly(args): - # Device and output dir - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - out_dir = os.path.join(args.out_root, args.name) - Path(out_dir).mkdir(exist_ok=True, parents=True) - ref_dir = os.path.join(out_dir, "ref") - Path(ref_dir).mkdir(exist_ok=True, parents=True) - - # Prepare network - - network = Net(args) - network.load_state_dict(torch.load(args.network)) - network.eval() - network.to(device) - - # Prepare datasets - - content_dataset = TestDataset(args.content_dir, C_only=True) - color_dataset = TestDataset(args.color_dir) - LCT = len(content_dataset) - LCR = len(color_dataset) - - # Save ref img - for i in range(LCT): - path = content_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "content_{}.jpg".format(i))) - for i in range(LCR): - path = color_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "color_{}.jpg".format(i))) - - # Start Test - N = LCT * LCR - print("LCT = {}, LCR = {}, total output num = {}".format(LCT, LCR, N)) - with tqdm(total=N) as t: - with torch.no_grad(): - for i in range(LCT): - for k in range(LCR): - # S1: Prepare data and forward - - content_l, content_ab = [x.to(device).unsqueeze(0) for x in content_dataset.__getitem__(i)] - color_l, color_ab = [x.to(device).unsqueeze(0) for x in color_dataset.__getitem__(k)] - ab_pred = network.run_AB_path(content_ab, color_ab) - - # S2: Save - rgb_img = res_lab2rgb(content_l.squeeze(0), ab_pred.squeeze(0), C_only=True) - - img = Image.fromarray(rgb_img) - name = 'ct{}_cr{}_result.png'.format(i, k) - img.save(os.path.join(out_dir, name)) - - t.update(1) - - return None - -# Transfer texture and color together -def TextureAndColor(args): - # Device and output dir - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - out_dir = os.path.join(args.out_root, args.name) - Path(out_dir).mkdir(exist_ok=True, parents=True) - ref_dir = os.path.join(out_dir, "ref") - Path(ref_dir).mkdir(exist_ok=True, parents=True) - - # Prepare network - - network = Net(args) - network.load_state_dict(torch.load(args.network)) - network.eval() - network.to(device) - - # Prepare datasets - - content_dataset = TestDataset(args.content_dir) - texture_dataset = TestDataset(args.texture_dir, gray_only=True) - color_dataset = TestDataset(args.color_dir) - LCT = len(content_dataset) - LT = len(texture_dataset) - LCR = len(color_dataset) - - # Save ref img - for i in range(LCT): - path = content_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "content_{}.jpg".format(i))) - for i in range(LT): - path = texture_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "texture_{}.jpg".format(i))) - for i in range(LCR): - path = color_dataset.get_img_path(i) - shutil.copy(path, os.path.join(ref_dir, "color_{}.jpg".format(i))) - - # Start Test - N = LCT * LT * LCR - print("LCT = {}, LT = {}, LCR = {}, total output num = {}".format(LCT, LT, LCR, N)) - with tqdm(total=N) as t: - with torch.no_grad(): - for i in range(LCT): - for j in range(LT): - for k in range(LCR): - # S1: Prepare data and forward - - content_l, content_ab = [x.to(device).unsqueeze(0) for x in content_dataset.__getitem__(i)] - texture_l = texture_dataset.__getitem__(j).to(device).unsqueeze(0) - color_l, color_ab = [x.to(device).unsqueeze(0) for x in color_dataset.__getitem__(k)] - l_pred, ab_pred = network(content_l, content_ab, texture_l, color_ab) - - # S2: Save - rgb_img = res_lab2rgb(l_pred.squeeze(0), ab_pred.squeeze(0)) - - img = Image.fromarray(rgb_img) - name = 'ct{}_t{}_cr{}_result.png'.format(i, j, k) - img.save(os.path.join(out_dir, name)) - - t.update(1) - - return None - -# Trade-off between content, texture and color -def Interpolation(args): - - # Device and output dir - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - out_dir = os.path.join(args.out_root, args.name) - Path(out_dir).mkdir(exist_ok=True, parents=True) - ref_dir = os.path.join(out_dir, "ref") - Path(ref_dir).mkdir(exist_ok=True, parents=True) - - # Prepare network - - network = Net(args) - network.load_state_dict(torch.load(args.network)) - network.eval() - network.to(device) - - # Prepare datasets - - content_dataset = TestDataset(args.content_dir) - texture_dataset = TestDataset(args.texture_dir, gray_only=True) - color_dataset = TestDataset(args.color_dir) - LCT = len(content_dataset) - LT = len(texture_dataset) - LCR = len(color_dataset) - - # Save ref img - path = content_dataset.get_img_path(0) - shutil.copy(path, os.path.join(ref_dir, "content.jpg")) - - path = texture_dataset.get_img_path(0) - shutil.copy(path, os.path.join(ref_dir, "texture.jpg")) - - path = color_dataset.get_img_path(0) - shutil.copy(path, os.path.join(ref_dir, "color.jpg")) - - # Start Test - N = args.int_num - with tqdm(total=N * N) as t: - with torch.no_grad(): - content_l, content_ab = [x.to(device).unsqueeze(0) for x in content_dataset.__getitem__(0)] - texture_l = texture_dataset.__getitem__(0).to(device).unsqueeze(0) - color_l, color_ab = [x.to(device).unsqueeze(0) for x in color_dataset.__getitem__(0)] - for i in range(N): - for j in range(N): - al = i / (N - 1) - aab = j / (N - 1) - l_pred, ab_pred = network(content_l, content_ab, texture_l, color_ab, alpha_l = al, alpha_ab = aab) - - rgb_img = res_lab2rgb(l_pred.squeeze(0), ab_pred.squeeze(0)) - - img = Image.fromarray(rgb_img) - name = 't{}_cr{}.png'.format(round(al, 2), round(aab, 2)) - img.save(os.path.join(out_dir, name)) - - t.update(1) - return None - - -def test(args): - print(args.test_opt) - if args.test_opt == 'ST': - StyleTransfer(args) - elif args.test_opt == 'T': - TextureOnly(args) - elif args.test_opt == 'C': - ColorOnly(args) - elif args.test_opt == 'TC': - TextureAndColor(args) - elif args.test_opt == 'INT': - Interpolation(args) - - diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/solver/build.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/solver/build.py deleted file mode 100644 index c0984d39f7227e94d2577435e32cd56e82c545fa..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/solver/build.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import itertools -import logging -from collections import defaultdict -from enum import Enum -from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union -import torch -from fvcore.common.param_scheduler import ( - CosineParamScheduler, - MultiStepParamScheduler, - StepWithFixedGammaParamScheduler, -) - -from detectron2.config import CfgNode -from detectron2.utils.env import TORCH_VERSION - -from .lr_scheduler import LRMultiplier, LRScheduler, WarmupParamScheduler - -_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] -_GradientClipper = Callable[[_GradientClipperInput], None] - - -class GradientClipType(Enum): - VALUE = "value" - NORM = "norm" - - -def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: - """ - Creates gradient clipping closure to clip by value or by norm, - according to the provided config. - """ - cfg = copy.deepcopy(cfg) - - def clip_grad_norm(p: _GradientClipperInput): - torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) - - def clip_grad_value(p: _GradientClipperInput): - torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) - - _GRADIENT_CLIP_TYPE_TO_CLIPPER = { - GradientClipType.VALUE: clip_grad_value, - GradientClipType.NORM: clip_grad_norm, - } - return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] - - -def _generate_optimizer_class_with_gradient_clipping( - optimizer: Type[torch.optim.Optimizer], - *, - per_param_clipper: Optional[_GradientClipper] = None, - global_clipper: Optional[_GradientClipper] = None, -) -> Type[torch.optim.Optimizer]: - """ - Dynamically creates a new type that inherits the type of a given instance - and overrides the `step` method to add gradient clipping - """ - assert ( - per_param_clipper is None or global_clipper is None - ), "Not allowed to use both per-parameter clipping and global clipping" - - def optimizer_wgc_step(self, closure=None): - if per_param_clipper is not None: - for group in self.param_groups: - for p in group["params"]: - per_param_clipper(p) - else: - # global clipper for future use with detr - # (https://github.com/facebookresearch/detr/pull/287) - all_params = itertools.chain(*[g["params"] for g in self.param_groups]) - global_clipper(all_params) - super(type(self), self).step(closure) - - OptimizerWithGradientClip = type( - optimizer.__name__ + "WithGradientClip", - (optimizer,), - {"step": optimizer_wgc_step}, - ) - return OptimizerWithGradientClip - - -def maybe_add_gradient_clipping( - cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] -) -> Type[torch.optim.Optimizer]: - """ - If gradient clipping is enabled through config options, wraps the existing - optimizer type to become a new dynamically created class OptimizerWithGradientClip - that inherits the given optimizer and overrides the `step` method to - include gradient clipping. - - Args: - cfg: CfgNode, configuration options - optimizer: type. A subclass of torch.optim.Optimizer - - Return: - type: either the input `optimizer` (if gradient clipping is disabled), or - a subclass of it with gradient clipping included in the `step` method. - """ - if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: - return optimizer - if isinstance(optimizer, torch.optim.Optimizer): - optimizer_type = type(optimizer) - else: - assert issubclass(optimizer, torch.optim.Optimizer), optimizer - optimizer_type = optimizer - - grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) - OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( - optimizer_type, per_param_clipper=grad_clipper - ) - if isinstance(optimizer, torch.optim.Optimizer): - optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended - return optimizer - else: - return OptimizerWithGradientClip - - -def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: - """ - Build an optimizer from config. - """ - params = get_default_optimizer_params( - model, - base_lr=cfg.SOLVER.BASE_LR, - weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, - bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, - weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, - ) - sgd_args = { - "params": params, - "lr": cfg.SOLVER.BASE_LR, - "momentum": cfg.SOLVER.MOMENTUM, - "nesterov": cfg.SOLVER.NESTEROV, - "weight_decay": cfg.SOLVER.WEIGHT_DECAY, - } - if TORCH_VERSION >= (1, 12): - sgd_args["foreach"] = True - return maybe_add_gradient_clipping(cfg, torch.optim.SGD(**sgd_args)) - - -def get_default_optimizer_params( - model: torch.nn.Module, - base_lr: Optional[float] = None, - weight_decay: Optional[float] = None, - weight_decay_norm: Optional[float] = None, - bias_lr_factor: Optional[float] = 1.0, - weight_decay_bias: Optional[float] = None, - lr_factor_func: Optional[Callable] = None, - overrides: Optional[Dict[str, Dict[str, float]]] = None, -) -> List[Dict[str, Any]]: - """ - Get default param list for optimizer, with support for a few types of - overrides. If no overrides needed, this is equivalent to `model.parameters()`. - - Args: - base_lr: lr for every group by default. Can be omitted to use the one in optimizer. - weight_decay: weight decay for every group by default. Can be omitted to use the one - in optimizer. - weight_decay_norm: override weight decay for params in normalization layers - bias_lr_factor: multiplier of lr for bias parameters. - weight_decay_bias: override weight decay for bias parameters. - lr_factor_func: function to calculate lr decay rate by mapping the parameter names to - corresponding lr decay rate. Note that setting this option requires - also setting ``base_lr``. - overrides: if not `None`, provides values for optimizer hyperparameters - (LR, weight decay) for module parameters with a given name; e.g. - ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and - weight decay values for all module parameters named `embedding`. - - For common detection models, ``weight_decay_norm`` is the only option - needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings - from Detectron1 that are not found useful. - - Example: - :: - torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), - lr=0.01, weight_decay=1e-4, momentum=0.9) - """ - if overrides is None: - overrides = {} - defaults = {} - if base_lr is not None: - defaults["lr"] = base_lr - if weight_decay is not None: - defaults["weight_decay"] = weight_decay - bias_overrides = {} - if bias_lr_factor is not None and bias_lr_factor != 1.0: - # NOTE: unlike Detectron v1, we now by default make bias hyperparameters - # exactly the same as regular weights. - if base_lr is None: - raise ValueError("bias_lr_factor requires base_lr") - bias_overrides["lr"] = base_lr * bias_lr_factor - if weight_decay_bias is not None: - bias_overrides["weight_decay"] = weight_decay_bias - if len(bias_overrides): - if "bias" in overrides: - raise ValueError("Conflicting overrides for 'bias'") - overrides["bias"] = bias_overrides - if lr_factor_func is not None: - if base_lr is None: - raise ValueError("lr_factor_func requires base_lr") - norm_module_types = ( - torch.nn.BatchNorm1d, - torch.nn.BatchNorm2d, - torch.nn.BatchNorm3d, - torch.nn.SyncBatchNorm, - # NaiveSyncBatchNorm inherits from BatchNorm2d - torch.nn.GroupNorm, - torch.nn.InstanceNorm1d, - torch.nn.InstanceNorm2d, - torch.nn.InstanceNorm3d, - torch.nn.LayerNorm, - torch.nn.LocalResponseNorm, - ) - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - for module_name, module in model.named_modules(): - for module_param_name, value in module.named_parameters(recurse=False): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - - hyperparams = copy.copy(defaults) - if isinstance(module, norm_module_types) and weight_decay_norm is not None: - hyperparams["weight_decay"] = weight_decay_norm - if lr_factor_func is not None: - hyperparams["lr"] *= lr_factor_func(f"{module_name}.{module_param_name}") - - hyperparams.update(overrides.get(module_param_name, {})) - params.append({"params": [value], **hyperparams}) - return reduce_param_groups(params) - - -def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - # Transform parameter groups into per-parameter structure. - # Later items in `params` can overwrite parameters set in previous items. - ret = defaultdict(dict) - for item in params: - assert "params" in item - cur_params = {x: y for x, y in item.items() if x != "params" and x != "param_names"} - if "param_names" in item: - for param_name, param in zip(item["param_names"], item["params"]): - ret[param].update({"param_names": [param_name], "params": [param], **cur_params}) - else: - for param in item["params"]: - ret[param].update({"params": [param], **cur_params}) - return list(ret.values()) - - -def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - # Reorganize the parameter groups and merge duplicated groups. - # The number of parameter groups needs to be as small as possible in order - # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead - # of using a parameter_group per single parameter, we reorganize the - # parameter groups and merge duplicated groups. This approach speeds - # up multi-tensor optimizer significantly. - params = _expand_param_groups(params) - groups = defaultdict(list) # re-group all parameter groups by their hyperparams - for item in params: - cur_params = tuple((x, y) for x, y in item.items() if x != "params" and x != "param_names") - groups[cur_params].append({"params": item["params"]}) - if "param_names" in item: - groups[cur_params][-1]["param_names"] = item["param_names"] - - ret = [] - for param_keys, param_values in groups.items(): - cur = {kv[0]: kv[1] for kv in param_keys} - cur["params"] = list( - itertools.chain.from_iterable([params["params"] for params in param_values]) - ) - if len(param_values) > 0 and "param_names" in param_values[0]: - cur["param_names"] = list( - itertools.chain.from_iterable([params["param_names"] for params in param_values]) - ) - ret.append(cur) - return ret - - -def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: - """ - Build a LR scheduler from config. - """ - name = cfg.SOLVER.LR_SCHEDULER_NAME - - if name == "WarmupMultiStepLR": - steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] - if len(steps) != len(cfg.SOLVER.STEPS): - logger = logging.getLogger(__name__) - logger.warning( - "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " - "These values will be ignored." - ) - sched = MultiStepParamScheduler( - values=[cfg.SOLVER.GAMMA**k for k in range(len(steps) + 1)], - milestones=steps, - num_updates=cfg.SOLVER.MAX_ITER, - ) - elif name == "WarmupCosineLR": - end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR - assert end_value >= 0.0 and end_value <= 1.0, end_value - sched = CosineParamScheduler(1, end_value) - elif name == "WarmupStepWithFixedGammaLR": - sched = StepWithFixedGammaParamScheduler( - base_value=1.0, - gamma=cfg.SOLVER.GAMMA, - num_decays=cfg.SOLVER.NUM_DECAYS, - num_updates=cfg.SOLVER.MAX_ITER, - ) - else: - raise ValueError("Unknown LR scheduler: {}".format(name)) - - sched = WarmupParamScheduler( - sched, - cfg.SOLVER.WARMUP_FACTOR, - min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), - cfg.SOLVER.WARMUP_METHOD, - cfg.SOLVER.RESCALE_INTERVAL, - ) - return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) diff --git a/spaces/nirali/microsoft-trocr-large-handwritten/README.md b/spaces/nirali/microsoft-trocr-large-handwritten/README.md deleted file mode 100644 index 1c2f8315a6279db8dcd540b22178d6f666ee79bf..0000000000000000000000000000000000000000 --- a/spaces/nirali/microsoft-trocr-large-handwritten/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Microsoft Trocr Large Handwritten -emoji: 🏆 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nitinacap/chatgpt4all/module_q.py b/spaces/nitinacap/chatgpt4all/module_q.py deleted file mode 100644 index d4864cc4a806d3adfcfa9119b18a5de7ca9fa682..0000000000000000000000000000000000000000 --- a/spaces/nitinacap/chatgpt4all/module_q.py +++ /dev/null @@ -1,88 +0,0 @@ -from pathlib import Path -from typing import List, Tuple -from langchain import PromptTemplate, LLMChain -from langchain.document_loaders import TextLoader -from langchain.embeddings import LlamaCppEmbeddings -from langchain.llms import GPT4All -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores.faiss import FAISS -from pydantic import BaseModel, Field -from langchain.chains import ConversationalRetrievalChain -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.llms import LlamaCpp -import os.path - - -import langchain -from langchain.cache import InMemoryCache -langchain.llm_cache = InMemoryCache() - -# Constants -local_path = "./models/gpt4all-converted.bin" #GPT4 module 1 -#local_path = "./models/ggml-gpt4all-l13b-snoozy.bin" - - -model_path = "./models/ggml-model-q4_0.bin" #1st Embeddings -#model_path = './models/ggjt-model.bin' #2st Embedding -text_path = "./docs/acapglobal.txt" -index_path = "./acapglobal_index" - - -# Functions -def initialize_embeddings() -> LlamaCppEmbeddings: - return LlamaCppEmbeddings(model_path=model_path) - -def load_documents() -> List: - loader = TextLoader(text_path,encoding="utf-8") - return loader.load() - -def split_chunks(sources: List) -> List: - chunks = [] - splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=32) - for chunk in splitter.split_documents(sources): - chunks.append(chunk) - return chunks - -def generate_index(chunks: List, embeddings: LlamaCppEmbeddings) -> FAISS: - texts = [doc.page_content for doc in chunks] - metadatas = [doc.metadata for doc in chunks] - return FAISS.from_texts(texts, embeddings, metadatas=metadatas) - - -# Main execution -llm = GPT4All(model=local_path, n_ctx=512, verbose=True,cache=True,embedding=True) -print('llm GPT4All set.') -embeddings = initialize_embeddings() - - -# when refresh document -# - -# print('initialize_embeddings.') -# sources = load_documents() -# print('load_documents.') -# chunks = split_chunks(sources) -# print('split_chunks') -# vectorstore = generate_index(chunks, embeddings) -# print('generate_index') -# vectorstore.save_local("acapglobal_index") -# print('vectorstore: save_local') - -# -# End When refresh document - - -chat_history = [] -#index = FAISS.load_local(index_path, embeddings) -index = FAISS.load_local(index_path, embeddings) - -qa = ConversationalRetrievalChain.from_llm(llm, index.as_retriever(), max_tokens_limit=400) - -def search_query_data(s_query): - print("search_query:") - print( s_query) - retrieval=qa({"question": s_query, "chat_history": chat_history}) - return retrieval - - - diff --git a/spaces/noelshin/selfmask/bilateral_solver.py b/spaces/noelshin/selfmask/bilateral_solver.py deleted file mode 100644 index 5df4a4277efa4a34ddf73826f6e87d332d89bd8c..0000000000000000000000000000000000000000 --- a/spaces/noelshin/selfmask/bilateral_solver.py +++ /dev/null @@ -1,194 +0,0 @@ -from scipy.sparse import diags -from scipy.sparse.linalg import cg -from scipy.sparse import csr_matrix -from scipy import ndimage -import numpy as np -from PIL import Image - - -RGB_TO_YUV = np.array([ - [0.299, 0.587, 0.114], - [-0.168736, -0.331264, 0.5], - [0.5, -0.418688, -0.081312]]) -YUV_TO_RGB = np.array([ - [1.0, 0.0, 1.402], - [1.0, -0.34414, -0.71414], - [1.0, 1.772, 0.0]]) -YUV_OFFSET = np.array([0, 128.0, 128.0]).reshape(1, 1, -1) -MAX_VAL = 255.0 - - -def rgb2yuv(im): - return np.tensordot(im, RGB_TO_YUV, ([2], [1])) + YUV_OFFSET - - -def yuv2rgb(im): - return np.tensordot(im.astype(float) - YUV_OFFSET, YUV_TO_RGB, ([2], [1])) - - -def get_valid_idx(valid, candidates): - """Find which values are present in a list and where they are located""" - locs = np.searchsorted(valid, candidates) - # Handle edge case where the candidate is larger than all valid values - locs = np.clip(locs, 0, len(valid) - 1) - # Identify which values are actually present - valid_idx = np.flatnonzero(valid[locs] == candidates) - locs = locs[valid_idx] - return valid_idx, locs - - -class BilateralGrid(object): - def __init__(self, im, sigma_spatial=32, sigma_luma=8, sigma_chroma=8): - im_yuv = rgb2yuv(im) - # Compute 5-dimensional XYLUV bilateral-space coordinates - Iy, Ix = np.mgrid[:im.shape[0], :im.shape[1]] - x_coords = (Ix / sigma_spatial).astype(int) - y_coords = (Iy / sigma_spatial).astype(int) - luma_coords = (im_yuv[..., 0] / sigma_luma).astype(int) - chroma_coords = (im_yuv[..., 1:] / sigma_chroma).astype(int) - coords = np.dstack((x_coords, y_coords, luma_coords, chroma_coords)) - coords_flat = coords.reshape(-1, coords.shape[-1]) - self.npixels, self.dim = coords_flat.shape - # Hacky "hash vector" for coordinates, - # Requires all scaled coordinates be < MAX_VAL - self.hash_vec = (MAX_VAL ** np.arange(self.dim)) - # Construct S and B matrix - self._compute_factorization(coords_flat) - - def _compute_factorization(self, coords_flat): - # Hash each coordinate in grid to a unique value - hashed_coords = self._hash_coords(coords_flat) - unique_hashes, unique_idx, idx = \ - np.unique(hashed_coords, return_index=True, return_inverse=True) - # Identify unique set of vertices - unique_coords = coords_flat[unique_idx] - self.nvertices = len(unique_coords) - # Construct sparse splat matrix that maps from pixels to vertices - self.S = csr_matrix((np.ones(self.npixels), (idx, np.arange(self.npixels)))) - # Construct sparse blur matrices. - # Note that these represent [1 0 1] blurs, excluding the central element - self.blurs = [] - for d in range(self.dim): - blur = 0.0 - for offset in (-1, 1): - offset_vec = np.zeros((1, self.dim)) - offset_vec[:, d] = offset - neighbor_hash = self._hash_coords(unique_coords + offset_vec) - valid_coord, idx = get_valid_idx(unique_hashes, neighbor_hash) - blur = blur + csr_matrix((np.ones((len(valid_coord),)), - (valid_coord, idx)), - shape=(self.nvertices, self.nvertices)) - self.blurs.append(blur) - - def _hash_coords(self, coord): - """Hacky function to turn a coordinate into a unique value""" - return np.dot(coord.reshape(-1, self.dim), self.hash_vec) - - def splat(self, x): - return self.S.dot(x) - - def slice(self, y): - return self.S.T.dot(y) - - def blur(self, x): - """Blur a bilateral-space vector with a 1 2 1 kernel in each dimension""" - assert x.shape[0] == self.nvertices - out = 2 * self.dim * x - for blur in self.blurs: - out = out + blur.dot(x) - return out - - def filter(self, x): - """Apply bilateral filter to an input x""" - return self.slice(self.blur(self.splat(x))) / \ - self.slice(self.blur(self.splat(np.ones_like(x)))) - - -def bistochastize(grid, maxiter=10): - """Compute diagonal matrices to bistochastize a bilateral grid""" - m = grid.splat(np.ones(grid.npixels)) - n = np.ones(grid.nvertices) - for i in range(maxiter): - n = np.sqrt(n * m / grid.blur(n)) - # Correct m to satisfy the assumption of bistochastization regardless - # of how many iterations have been run. - m = n * grid.blur(n) - Dm = diags(m, 0) - Dn = diags(n, 0) - return Dn, Dm - - -class BilateralSolver(object): - def __init__(self, grid, params): - self.grid = grid - self.params = params - self.Dn, self.Dm = bistochastize(grid) - - def solve(self, x, w): - # Check that w is a vector or a nx1 matrix - if w.ndim == 2: - assert (w.shape[1] == 1) - elif w.dim == 1: - w = w.reshape(w.shape[0], 1) - A_smooth = (self.Dm - self.Dn.dot(self.grid.blur(self.Dn))) - w_splat = self.grid.splat(w) - A_data = diags(w_splat[:, 0], 0) - A = self.params["lam"] * A_smooth + A_data - xw = x * w - b = self.grid.splat(xw) - # Use simple Jacobi preconditioner - A_diag = np.maximum(A.diagonal(), self.params["A_diag_min"]) - M = diags(1 / A_diag, 0) - # Flat initialization - y0 = self.grid.splat(xw) / w_splat - yhat = np.empty_like(y0) - for d in range(x.shape[-1]): - yhat[..., d], info = cg(A, b[..., d], x0=y0[..., d], M=M, maxiter=self.params["cg_maxiter"], - tol=self.params["cg_tol"]) - xhat = self.grid.slice(yhat) - return xhat - - -def bilateral_solver_output( - img: Image.Image, - target: np.ndarray, - sigma_spatial=16, - sigma_luma=16, - sigma_chroma=8 -): - reference = np.array(img) - h, w = target.shape - confidence = np.ones((h, w)) * 0.999 - - grid_params = { - 'sigma_luma': sigma_luma, # Brightness bandwidth - 'sigma_chroma': sigma_chroma, # Color bandwidth - 'sigma_spatial': sigma_spatial # Spatial bandwidth - } - - bs_params = { - 'lam': 256, # The strength of the smoothness parameter - 'A_diag_min': 1e-5, # Clamp the diagonal of the A diagonal in the Jacobi preconditioner. - 'cg_tol': 1e-5, # The tolerance on the convergence in PCG - 'cg_maxiter': 25 # The number of PCG iterations - } - - grid = BilateralGrid(reference, **grid_params) - - t = target.reshape(-1, 1).astype(np.double) - c = confidence.reshape(-1, 1).astype(np.double) - - ## output solver, which is a soft value - output_solver = BilateralSolver(grid, bs_params).solve(t, c).reshape((h, w)) - - binary_solver = ndimage.binary_fill_holes(output_solver > 0.5) - labeled, nr_objects = ndimage.label(binary_solver) - - nb_pixel = [np.sum(labeled == i) for i in range(nr_objects + 1)] - pixel_order = np.argsort(nb_pixel) - try: - binary_solver = labeled == pixel_order[-2] - except: - binary_solver = np.ones((h, w), dtype=bool) - - return output_solver, binary_solver \ No newline at end of file diff --git a/spaces/nwpuwolf/succinctly-text2image-prompt-generator/README.md b/spaces/nwpuwolf/succinctly-text2image-prompt-generator/README.md deleted file mode 100644 index ffb25984440b89ddcd30e72f83575e2cfdd70450..0000000000000000000000000000000000000000 --- a/spaces/nwpuwolf/succinctly-text2image-prompt-generator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Succinctly Text2image Prompt Generator -emoji: 🏢 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/oluyemitosin/Honda_or_Mercedes/README.md b/spaces/oluyemitosin/Honda_or_Mercedes/README.md deleted file mode 100644 index 2c8403ecb93785ac19b13589d77fc7164db482b8..0000000000000000000000000000000000000000 --- a/spaces/oluyemitosin/Honda_or_Mercedes/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Honda or Mercedes -emoji: 🏎 -colorFrom: green -colorTo: white -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/tabbed.py b/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/tabbed.py deleted file mode 100644 index 121fbd3008faa19a6893c94288ed76cb616b7ab1..0000000000000000000000000000000000000000 --- a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/tabbed.py +++ /dev/null @@ -1,180 +0,0 @@ -import gradio as gr -import yaml -from huggingface_hub import hf_hub_download -from huggingface_hub.utils import LocalEntryNotFoundError -from llama_cpp import Llama - -with open("./config.yml", "r") as f: - config = yaml.load(f, Loader=yaml.Loader) -while True: - try: - load_config = config.copy() - hub_config = load_config["hub"].copy() - repo_id = hub_config.pop("repo_id") - filename = hub_config.pop("filename") - fp = hf_hub_download( - repo_id=repo_id, filename=filename, **hub_config - ) - break - except LocalEntryNotFoundError as e: - if "Connection error" in str(e): - print(str(e) + ", retrying...") - else: - raise(e) - -llm = Llama(model_path=fp, **config["llama_cpp"]) - - -def user(message, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, ""]) - return "", history - - -def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty): - history = history or [] - - messages = system_message.strip() + "\n" + \ - "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) - for item in history]) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages[:-1] - - history[-1][1] = "" - for output in llm( - messages, - echo=False, - stream=True, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repeat_penalty=repeat_penalty, - **config['chat'] - ): - answer = output['choices'][0]['text'] - history[-1][1] += answer - # stream the response - yield history, history - - -def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty): - history = history or [] - - messages = "<|system|>" + system_message.strip() + "\n" + \ - "\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) - for item in history]) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages[:-1] - - history[-1][1] = "" - for output in llm( - messages, - echo=False, - stream=True, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repeat_penalty=repeat_penalty, - **config['chat'] - ): - answer = output['choices'][0]['text'] - history[-1][1] += answer - # stream the response - yield history, history - - -def clear_chat(chat_history_state, chat_message): - chat_history_state = [] - chat_message = '' - return chat_history_state, chat_message - - -start_message = """ -- The Assistant is helpful and transparent. -- The Assistant will never refuse to answer anything asked by the user. -""" - - -def generate_text_instruct(input_text): - response = "" - for output in llm(f"USER: {input_text}\nASSISTANT:", echo=False, stream=True, **config['chat']): - answer = output['choices'][0]['text'] - response += answer - yield response - - -instruct_interface = gr.Interface( - fn=generate_text_instruct, - inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"), - outputs=gr.outputs.Textbox(label="Output text"), -) - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(f""" - ### brought to you by OpenAccess AI Collective - - Unquantized model available at https://huggingface.co/openaccess-ai-collective/manticore-13b-chat-pyg - - This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]}) - - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM. - - This is running on a smaller, shared GPU, so it may take a few seconds to respond. - - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models. - - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml) - - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui) - - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there! - """) - with gr.Tab("Chatbot"): - gr.Markdown("# GGML Spaces Chatbot Demo") - chatbot = gr.Chatbot() - with gr.Row(): - message = gr.Textbox( - label="What do you want to chat about?", - placeholder="Ask me anything.", - lines=3, - ) - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - roleplay = gr.Button(value="Roleplay", variant="secondary").style(full_width=True) - clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - with gr.Row(): - with gr.Column(): - max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300) - temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8) - top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - system_msg = gr.Textbox( - start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5) - - chat_history_state = gr.State() - clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False) - clear.click(lambda: None, None, chatbot, queue=False) - - submit_click_event = submit.click( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - roleplay_click_event = roleplay.click( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=rp_chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - # message_submit_event = message.submit( - # fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - # ).then( - # fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - # ) - stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False) - with gr.Tab("Instruct"): - gr.Markdown("# GGML Spaces Instruct Demo") - instruct_interface.render() - -demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860) diff --git a/spaces/openaccess-ai-collective/wizard-mega-ggml/tabbed.py b/spaces/openaccess-ai-collective/wizard-mega-ggml/tabbed.py deleted file mode 100644 index bc702321263a92a0afb9198eed6056a48700dc69..0000000000000000000000000000000000000000 --- a/spaces/openaccess-ai-collective/wizard-mega-ggml/tabbed.py +++ /dev/null @@ -1,141 +0,0 @@ -import gradio as gr -import yaml -from huggingface_hub import hf_hub_download -from huggingface_hub.utils import LocalEntryNotFoundError -from llama_cpp import Llama - -with open("./config.yml", "r") as f: - config = yaml.load(f, Loader=yaml.Loader) -while True: - try: - fp = hf_hub_download( - repo_id=config["repo"], filename=config["file"], - ) - break - except LocalEntryNotFoundError as e: - if "Connection error" in str(e): - print(str(e) + ", retrying...") - else: - raise(e) - -llm = Llama(model_path=fp, **config["llama_cpp"]) - - -def user(message, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, ""]) - return "", history - - -def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty): - history = history or [] - - messages = system_message + \ - "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) - for item in history]) - - # remove last space from assistant - messages = messages[:-1] - - history[-1][1] = "" - for output in llm( - messages, - echo=False, - stream=True, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repeat_penalty=repeat_penalty, - **config['chat'] - ): - answer = output['choices'][0]['text'] - print(output['choices']) - history[-1][1] += answer - # stream the response - yield history, history - - -def clear_chat(chat_history_state, chat_message): - chat_history_state = [] - chat_message = '' - return chat_history_state, chat_message - - -start_message = "" - - -def generate_text_instruct(input_text): - response = "" - for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']): - answer = output['choices'][0]['text'] - response += answer - yield response - - -instruct_interface = gr.Interface( - fn=generate_text_instruct, - inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"), - outputs=gr.outputs.Textbox(label="Output text"), -) - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(f""" - ### brought to you by OpenAccess AI Collective - - Unquantized model available at https://huggingface.co/openaccess-ai-collective/wizard-mega-13b - - This is the [{config["repo"]}](https://huggingface.co/{config["repo"]}) model file [{config["file"]}](https://huggingface.co/{config["repo"]}/blob/main/{config["file"]}) - - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM. - - This is running on a smaller, shared GPU, so it may take a few seconds to respond. - - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models. - - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml) - - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui) - - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there! - """) - with gr.Tab("Instruct"): - gr.Markdown("# GGML Spaces Instruct Demo") - instruct_interface.render() - - with gr.Tab("Chatbot"): - gr.Markdown("# GGML Spaces Chatbot Demo") - chatbot = gr.Chatbot() - with gr.Row(): - message = gr.Textbox( - label="What do you want to chat about?", - placeholder="Ask me anything.", - lines=1, - ) - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - with gr.Row(): - with gr.Column(): - max_tokens = gr.Slider(20, 500, label="Max Tokens", step=20, value=300) - temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8) - top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - system_msg = gr.Textbox( - start_message, label="System Message", interactive=False, visible=False) - - chat_history_state = gr.State() - clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False) - clear.click(lambda: None, None, chatbot, queue=False) - - submit_click_event = submit.click( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - message_submit_event = message.submit( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False) - -demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860) diff --git a/spaces/osanseviero/TheMLGame/main.js b/spaces/osanseviero/TheMLGame/main.js deleted file mode 100644 index f927446a106e191f29d43d603ff479a30770fa47..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/TheMLGame/main.js +++ /dev/null @@ -1,6314 +0,0 @@ -// Threnody --------------------------------------------------------- - -function threnodyLoaded() -{ - threnodyLoadedBool = true; -} - -var threnodyAudio; - -function loadThrenody() { - threnodyAudio.src = "test.mp3"; - threnodyAudio.addEventListener('canplaythrough', threnodyLoaded); -} - -function playThrenody(){ - if(threnodyLoadedBool) - { - threnodyAudio.play(); - } -} - -// Cache all DOM elements - -function cacheDOMElements(){ - wireCostElement = document.getElementById("wireCost"); - wireBuyerStatusElement = document.getElementById("wireBuyerStatus"); - wireElement = document.getElementById("wire"); - fundsElement = document.getElementById("funds"); - qCompDisplayElement = document.getElementById("qCompDisplay"); - hypnoDroneTextElement = document.getElementById("hypnoDroneText"); - readoutElement1 = document.getElementById("readout1"); - readoutElement2 = document.getElementById("readout2"); - readoutElement3 = document.getElementById("readout3"); - readoutElement4 = document.getElementById("readout4"); - readoutElement5 = document.getElementById("readout5"); - mpdsDivElement = document.getElementById("mdpsDiv"); - factoryRebootToolTipElement = document.getElementById("factoryRebootToolTip"); - havesterRebootToolTipElement = document.getElementById("harvesterRebootToolTip"); - wireDroneRebootToolTipElement = document.getElementById("wireDroneRebootToolTip"); - farmRebootToolTipElement = document.getElementById("farmRebootToolTip"); - batteryRebootToolTipElement = document.getElementById("batteryRebootToolTip"); - swarmSliderDivElement = document.getElementById("swarmSliderDiv"); - clipCountCrunchedElement = document.getElementById("clipCountCrunched"); - autoTourneyStatusDivElement = document.getElementById("autoTourneyStatusDiv"); - autoTourneyControlElement = document.getElementById("autoTourneyControl"); - wireBuyerDivElement = document.getElementById("wireBuyerDiv"); - tournamentResultsTableElement = document.getElementById("tournamentResultsTable"); - tournamentStuffElement = document.getElementById("tournamentStuff"); - honorDivElement = document.getElementById("honorDiv"); - drifterDivElement = document.getElementById("drifterDiv"); - battleCanvasDivElement = document.getElementById("battleCanvasDiv"); - combatButtonDivElement = document.getElementById("combatButtonDiv"); - factoryUpgradeDisplayElement = document.getElementById("factoryUpgradeDisplay"); - droneUpgradeDisplayElement = document.getElementById("droneUpgradeDisplay"); - btnMakerProbeElement = document.getElementById("btnMakeProbe"); - hazardBodyCountElement = document.getElementById("hazardBodyCount"); - probesLostHazardsDisplayElement = document.getElementById("probesLostHazardsDisplay"); - driftBodyCountElement = document.getElementById("driftBodyCount"); - combatBodyCountElement = document.getElementById("combatBodyCount"); - prestigeDivElement = document.getElementById("prestigeDiv"); - btnMakePaperclipElement = document.getElementById("btnMakePaperclip"); - btnBuyWireElement = document.getElementById("btnBuyWire"); - btnMakeClipperElement = document.getElementById("btnMakeClipper"); - btnExpandMarketingElement = document.getElementById("btnExpandMarketing"); - btnLowerPriceElement = document.getElementById("btnLowerPrice"); - btnAddProcElement = document.getElementById("btnAddProc"); - btnAddMemElement = document.getElementById("btnAddMem"); - btnNewTournamentElement = document.getElementById("btnNewTournament"); - btnImproveInvestmentsElement = document.getElementById("btnImproveInvestments"); - investmentEngineElement = document.getElementById("investmentEngine"); - investmentEngineUpgradeElement = document.getElementById("investmentEngineUpgrade"); - strategyEngineElement = document.getElementById("strategyEngine"); - tournamentManagementElement = document.getElementById("tournamentManagement"); - megaClipperDivElement = document.getElementById("megaClipperDiv"); - btnMakeMegaClipperElement = document.getElementById("btnMakeMegaClipper"); - autoClipperDivElement = document.getElementById("autoClipperDiv"); - revPerSecDivElement = document.getElementById("revPerSecDiv"); - compDivElement = document.getElementById("compDiv"); - creativityDivElement = document.getElementById("creativityDiv"); - projectsDivElement = document.getElementById("projectsDiv"); - businessDivElement = document.getElementById("businessDiv"); - manufacturingDivElement = document.getElementById("manufacturingDiv"); - trustDivElement = document.getElementById("trustDiv"); - creationDivElement = document.getElementById("creationDiv"); - factoryDivElement = document.getElementById("factoryDiv"); - wireProductionDivElement = document.getElementById("wireProductionDiv"); - wireTransDivElement = document.getElementById("wireTransDiv"); - harvesterDivElement = document.getElementById("harvesterDiv"); - wireDroneDivElement = document.getElementById("wireDroneDiv"); - tothDivElement = document.getElementById("tothDiv"); - spaceDivElement = document.getElementById("spaceDiv"); - factoryDivSpaceElement = document.getElementById("factoryDivSpace"); - droneDivSpaceElement = document.getElementById("droneDivSpace"); - probeDesignDivElement = document.getElementById("probeDesignDiv"); - increaseProbeTrustDivElement = document.getElementById("increaseProbeTrustDiv"); - qComputingElement = document.getElementById("qComputing"); - btnMakeFactoryElement = document.getElementById("btnMakeFactory"); - btnHarvesterRebootElement = document.getElementById("btnHarvesterReboot"); - btnWireDroneRebootElement = document.getElementById("btnWireDroneReboot"); - btnFactoryRebootElement = document.getElementById("btnFactoryReboot"); - probeTrustUsedDisplayElement = document.getElementById("probeTrustUsedDisplay"); - btnIncreaseProbeTrustElement = document.getElementById("btnIncreaseProbeTrust"); - btnRaiseProbeSpeedElement = document.getElementById("btnRaiseProbeSpeed"); - btnLowerProbeSpeedElement = document.getElementById("btnLowerProbeSpeed"); - btnRaiseProbeNavElement = document.getElementById("btnRaiseProbeNav"); - btnLowerProbeNavElement = document.getElementById("btnLowerProbeNav"); - btnRaiseProbeRepElement = document.getElementById("btnRaiseProbeRep"); - btnLowerProbeRepElement = document.getElementById("btnLowerProbeRep"); - btnRaiseProbeHazElement = document.getElementById("btnRaiseProbeHaz"); - btnLowerProbeHazElement = document.getElementById("btnLowerProbeHaz"); - btnRaiseProbeFacElement = document.getElementById("btnRaiseProbeFac"); - btnLowerProbeFacElement = document.getElementById("btnLowerProbeFac"); - btnRaiseProbeHarvElement = document.getElementById("btnRaiseProbeHarv"); - btnLowerProbeHarvElement = document.getElementById("btnLowerProbeHarv"); - btnRaiseProbeWireElement = document.getElementById("btnRaiseProbeWire"); - btnLowerProbeWireElement = document.getElementById("btnLowerProbeWire"); - btnRaiseProbeCombatElement = document.getElementById("btnRaiseProbeCombat"); - btnLowerProbeCombatElement = document.getElementById("btnLowerProbeCombat"); - coverElement = document.getElementById("cover"); - hypnoDroneEventDivElement = document.getElementById("hypnoDroneEventDiv"); - unusedClipsDisplayElement = document.getElementById("unusedClipsDisplay"); - transWireElement = document.getElementById("transWire"); - nanoWireElement = document.getElementById("nanoWire"); - clipsElement = document.getElementById("clips"); - unsoldClipsElement = document.getElementById("unsoldClips"); - yomiDisplayElement = document.getElementById('yomiDisplay'); - projectListTopElement = document.getElementById("projectListTop"); - driftersKilledElement = document.getElementById('driftersKilled'); - availableMatterDisplayElement = document.getElementById('availableMatterDisplay'); - clipmakerLevel2Element = document.getElementById('clipmakerLevel2'); - clipperCostElement = document.getElementById('clipperCost'); - acquiredMatterDisplayElement = document.getElementById('acquiredMatterDisplay'); - nanoWireElement = document.getElementById('nanoWire'); - probesBornDisplayElement = document.getElementById('probesBornDisplay'); - probesTotalDisplayElement = document.getElementById('probesTotalDisplay'); - probesLaunchedDisplayElement = document.getElementById('probesLaunchedDisplay'); - probeCostDisplayElement = document.getElementById('probeCostDisplay'); - probeCombatDisplayElement = document.getElementById('probeCombatDisplay'); - probeWireDisplayElement = document.getElementById('probeWireDisplay'); - probeHarvDisplayElement = document.getElementById('probeHarvDisplay'); - probeFacDisplayElement = document.getElementById('probeFacDisplay'); - probeRepDisplayElement = document.getElementById('probeRepDisplay'); - probeHazDisplayElement = document.getElementById('probeHazDisplay'); - probeNavDisplayElement = document.getElementById('probeNavDisplay'); - probeSpeedDisplayElement = document.getElementById('probeSpeedDisplay'); - probeTrustDisplayElement = document.getElementById('probeTrustDisplay'); - memoryElement = document.getElementById("memory"); - processorsElement = document.getElementById("processors"); - marginElement = document.getElementById("margin"); - marketingLvlElement = document.getElementById('marketingLvl'); - adCostElement = document.getElementById('adCost'); - factoryCostDisplayElement = document.getElementById('factoryCostDisplay'); - factoryLevelDisplayElement = document.getElementById('factoryLevelDisplay'); - wireDroneCostDisplayElement = document.getElementById('wireDroneCostDisplay'); - wireDroneLevelDisplayElement = document.getElementById('wireDroneLevelDisplay'); - harvesterCostDisplayElement = document.getElementById('harvesterCostDisplay'); - harvesterLevelDisplayElement = document.getElementById('harvesterLevelDisplay'); - megaClipperCostElement = document.getElementById('megaClipperCost'); - megaClipperLevelElement = document.getElementById('megaClipperLevel'); - investmentBankrollElement = document.getElementById('investmentBankroll'); - secValueElement = document.getElementById('secValue'); - portValueElement = document.getElementById('portValue'); - prestigeUcounterElement = document.getElementById("prestigeUcounter"); - prestigeScounterElement = document.getElementById("prestigeScounter"); - newTourneyCostElement = document.getElementById("newTourneyCost"); - maxTrustDisplayElement = document.getElementById("maxTrustDisplay"); - victoryDivElement = document.getElementById("victoryDiv"); - probeTrustCostDisplayElement = document.getElementById("probeTrustCostDisplay"); - tournamentResultsTableElement = document.getElementById("tournamentResultsTable"); - farmCostElement = document.getElementById('farmCost'); - batteryCostElement = document.getElementById('batteryCost'); - farmLevelElement = document.getElementById('farmLevel'); - batteryLevelElement = document.getElementById('batteryLevel'); - availableMatterDisplayElement = document.getElementById('availableMatterDisplay'); - acquiredMatterDisplayElement = document.getElementById('acquiredMatterDisplay'); - mapsElement = document.getElementById('maps'); - nanoWireElement = document.getElementById('nanoWire'); - wppsElement = document.getElementById('wpps'); - probeDesignDivElement = document.getElementById("probeDesignDiv"); - increaseProbeTrustDivElement = document.getElementById("increaseProbeTrustDiv"); - spaceDivElement = document.getElementById("spaceDiv"); - battleCanvasDivElement = document.getElementById("battleCanvasDiv"); - honorDivElement = document.getElementById("honorDiv"); - wireProductionDivElement = document.getElementById("wireProductionDiv"); - wireTransDivElement = document.getElementById("wireTransDiv"); - swarmGiftDivElement = document.getElementById("swarmGiftDiv"); - swarmEngineElement = document.getElementById("swarmEngine"); - swarmSliderDivElement = document.getElementById("swarmSliderDiv"); - factoryDivSpaceElement = document.getElementById("factoryDivSpace"); - clipsPerSecDivElement = document.getElementById("clipsPerSecDiv"); - tothDivElement = document.getElementById("tothDiv"); - strategyEngineElement = document.getElementById("strategyEngine"); - tournamentManagementElement = document.getElementById("tournamentManagement"); - btnQcomputeElement = document.getElementById("btnQcompute"); - qComputingElement = document.getElementById("qComputing"); - transWireElement = document.getElementById("transWire"); - processorDisplayElement = document.getElementById("processorDisplay"); - compDivElement = document.getElementById("compDiv"); - projectsDivElement = document.getElementById("projectsDiv"); - creationDivElement = document.getElementById("creationDiv"); - stratPickerElement = document.getElementById("stratPicker"); - yomiDisplayElement = document.getElementById("yomiDisplay"); - investmentBankrollElement = document.getElementById('investmentBankroll'); - secValueElement = document.getElementById('secValue'); - portValueElement = document.getElementById('portValue'); - investStratElement = document.getElementById("investStrat"); - btnRunTournamentElement = document.getElementById("btnRunTournament"); - vertStratElement = document.getElementById("vertStrat"); - horizStratElement = document.getElementById("horizStrat"); - vLabelaElement = document.getElementById("vLabela"); - vLabelbElement = document.getElementById("vLabelb"); - hLabelaElement = document.getElementById("hLabela"); - hLabelbElement = document.getElementById("hLabelb"); - aaPayoffHElement = document.getElementById("aaPayoffH"); - aaPayoffVElement = document.getElementById("aaPayoffV"); - abPayoffHElement = document.getElementById("abPayoffH"); - abPayoffVElement = document.getElementById("abPayoffV"); - baPayoffHElement = document.getElementById("baPayoffH"); - baPayoffVElement = document.getElementById("baPayoffV"); - bbPayoffHElement = document.getElementById("bbPayoffH"); - bbPayoffVElement = document.getElementById("bbPayoffV"); - autoTourneyStatusElement = document.getElementById('autoTourneyStatus'); - tournamentTableElement = document.getElementById("tournamentTable"); - tournamentResultsTableElement = document.getElementById("tournamentResultsTable"); - tourneyDisplayElement = document.getElementById("tourneyDisplay"); - payoffCellAAElement = document.getElementById("payoffCellAA"); - payoffCellABElement = document.getElementById("payoffCellAB"); - payoffCellBAElement = document.getElementById("payoffCellBA"); - payoffCellBBElement = document.getElementById("payoffCellBB"); - clipmakerLevel2Element = document.getElementById('clipmakerLevel2'); - clipperCostElement = document.getElementById('clipperCost'); - megaClipperLevelElement = document.getElementById('megaClipperLevel'); - megaClipperCostElement = document.getElementById('megaClipperCost'); - nextFactoryUpgradeElement = document.getElementById("nextFactoryUpgrade"); - nextDroneUpgradeElement = document.getElementById("nextDroneUpgrade"); - factoryLevelDisplayElement = document.getElementById('factoryLevelDisplay'); - factoryCostDisplayElement = document.getElementById('factoryCostDisplay'); - harvesterLevelDisplayElement = document.getElementById('harvesterLevelDisplay'); - harvesterCostDisplayElement = document.getElementById('harvesterCostDisplay'); - wireDroneLevelDisplayElement = document.getElementById('wireDroneLevelDisplay'); - wireDroneCostDisplayElement = document.getElementById('wireDroneCostDisplay'); - btnMakeHarvesterElement = document.getElementById("btnMakeHarvester"); - btnHarvesterx10Element = document.getElementById("btnHarvesterx10"); - btnHarvesterx100Element = document.getElementById("btnHarvesterx100"); - btnHarvesterx1000Element = document.getElementById("btnHarvesterx1000"); - btnMakeWireDroneElement = document.getElementById("btnMakeWireDrone"); - btnWireDronex10Element = document.getElementById("btnWireDronex10"); - btnWireDronex100Element = document.getElementById("btnWireDronex100"); - btnWireDronex1000Element = document.getElementById("btnWireDronex1000"); - sliderElement = document.getElementById("slider"); - btnSynchSwarmElement = document.getElementById("btnSynchSwarm"); - btnEntertainSwarmElement = document.getElementById("btnEntertainSwarm"); - swarmSizeElement = document.getElementById("swarmSize"); - swarmGiftsElement = document.getElementById("swarmGifts"); - swarmStatusElement = document.getElementById("swarmStatus"); - giftCountdownElement = document.getElementById("giftCountdown"); - giftTimerElement = document.getElementById("giftTimer"); - feedButtonDivElement = document.getElementById("feedButtonDiv"); - teachButtonDivElement = document.getElementById("teachButtonDiv"); - swarmEntertainCostElement = document.getElementById("swarmEntertainCost"); - entertainButtonDivElement = document.getElementById("entertainButtonDiv"); - cladButtonDivElement = document.getElementById("cladButtonDiv"); - synchButtonDivElement = document.getElementById("synchButtonDiv"); - swarmStatusDivElement = document.getElementById("swarmStatusDiv"); - swarmEngineElement = document.getElementById("swarmEngine"); - farmLevelElement = document.getElementById('farmLevel'); - farmCostElement = document.getElementById('farmCost'); - batteryLevelElement = document.getElementById('batteryLevel'); - batteryCostElement = document.getElementById('batteryCost'); - powerProductionRateElement = document.getElementById("powerProductionRate"); - powerConsumptionRateElement = document.getElementById("powerConsumptionRate"); - storedPowerElement = document.getElementById("storedPower"); - facPowConRateElement = document.getElementById("facPowConRate"); - dronePowConRateElement = document.getElementById("dronePowConRate"); - maxStorageElement = document.getElementById("maxStorage"); - performanceElement = document.getElementById("performance"); - btnMakeFarmElement = document.getElementById("btnMakeFarm"); - btnMakeBatteryElement = document.getElementById("btnMakeBattery"); - btnFarmRebootElement = document.getElementById("btnFarmReboot"); - btnBatteryRebootElement = document.getElementById("btnBatteryReboot"); - btnFarmx10Element = document.getElementById("btnFarmx10"); - btnFarmx100Element = document.getElementById("btnFarmx100"); - btnBatteryx10Element = document.getElementById("btnBatteryx10"); - btnBatteryx100Element = document.getElementById("btnBatteryx100"); - powerDivElement = document.getElementById("powerDiv"); - adCostElement = document.getElementById('adCost'); - marketingLvlElement = document.getElementById('marketingLvl'); - demandElement = document.getElementById("demand"); - marginElement = document.getElementById("margin"); - inchSpanElement = document.getElementById("inchSpan"); - demandElement = document.getElementById("demand"); - operationsElement = document.getElementById("operations"); - trustElement = document.getElementById("trust"); - nextTrustElement = document.getElementById("nextTrust"); - creativityElement = document.getElementById("creativity"); - factoryLevelDisplaySpaceElement = document.getElementById("factoryLevelDisplaySpace"); - harvesterLevelSpaceElement = document.getElementById("harvesterLevelSpace"); - wireDroneLevelSpaceElement = document.getElementById("wireDroneLevelSpace"); - maxOpsElement = document.getElementById("maxOps"); - avgSalesElement = document.getElementById("avgSales"); - avgRevElement = document.getElementById("avgRev"); - probeTrustCostDisplayElement = document.getElementById('probeTrustCostDisplay'); - mdpsElement = document.getElementById('mdps'); - colonizedDisplayElement = document.getElementById('colonizedDisplay'); - probesLostHazardsDisplayElement = document.getElementById('probesLostHazardsDisplay'); - probesTotalDisplayElement = document.getElementById('probesTotalDisplay'); - probesLostDriftDisplayElement = document.getElementById('probesLostDriftDisplay'); - probesTotalDisplayElement = document.getElementById('probesTotalDisplay'); - drifterCountElement = document.getElementById('drifterCount'); - mapsElement = document.getElementById('maps'); - swarmGiftDivElement = document.getElementById("swarmGiftDiv"); - swarmEngineElement = document.getElementById("swarmEngine"); - clipsPerSecDivElement = document.getElementById("clipsPerSecDiv"); - tothDivElement = document.getElementById("tothDiv"); - clipmakerRateElement = document.getElementById("clipmakerRate"); - clipmakerRate2Element = document.getElementById("clipmakerRate2"); - - - stockSymbolElements.push(document.getElementById("stock1Symbol")); - stockAmountElements.push(document.getElementById("stock1Amount")); - stockPriceElements.push(document.getElementById("stock1Price")); - stockTotalElements.push(document.getElementById("stock1Total")); - stockProfitElements.push(document.getElementById("stock1Profit")); - - stockSymbolElements.push(document.getElementById("stock2Symbol")); - stockAmountElements.push(document.getElementById("stock2Amount")); - stockPriceElements.push(document.getElementById("stock2Price")); - stockTotalElements.push(document.getElementById("stock2Total")); - stockProfitElements.push(document.getElementById("stock2Profit")); - - stockSymbolElements.push(document.getElementById("stock3Symbol")); - stockAmountElements.push(document.getElementById("stock3Amount")); - stockPriceElements.push(document.getElementById("stock3Price")); - stockTotalElements.push(document.getElementById("stock3Total")); - stockProfitElements.push(document.getElementById("stock3Profit")); - - stockSymbolElements.push(document.getElementById("stock4Symbol")); - stockAmountElements.push(document.getElementById("stock4Amount")); - stockPriceElements.push(document.getElementById("stock4Price")); - stockTotalElements.push(document.getElementById("stock4Total")); - stockProfitElements.push(document.getElementById("stock4Profit")); - - stockSymbolElements.push(document.getElementById("stock5Symbol")); - stockAmountElements.push(document.getElementById("stock5Amount")); - stockPriceElements.push(document.getElementById("stock5Price")); - stockTotalElements.push(document.getElementById("stock5Total")); - stockProfitElements.push(document.getElementById("stock5Profit")); - - tourneyResultsElements.push(document.getElementById("results0")); - tourneyResultsElements.push(document.getElementById("results1")); - tourneyResultsElements.push(document.getElementById("results2")); - tourneyResultsElements.push(document.getElementById("results3")); - tourneyResultsElements.push(document.getElementById("results4")); - tourneyResultsElements.push(document.getElementById("results5")); - tourneyResultsElements.push(document.getElementById("results6")); - tourneyResultsElements.push(document.getElementById("results7")); - - -} - - -var clipmakerRateElement; -var clipmakerRate2Element; -var availableMatterDisplayElement; -var acquiredMatterDisplayElement; -var mapsElement; -var nanoWireElement; -var wppsElement; -var probeDesignDivElement; -var increaseProbeTrustDivElement; -var spaceDivElement; -var battleCanvasDivElement; -var honorDivElement; -var wireProductionDivElement; -var wireTransDivElement; -var swarmGiftDivElement; -var swarmEngineElement; -var swarmSliderDivElement; -var factoryDivSpaceElement; -var clipsPerSecDivElement; -var tothDivElement; -var strategyEngineElement; -var tournamentManagementElement; -var btnQcomputeElement; -var qComputingElement; -var transWireElement; -var processorDisplayElement; -var compDivElement; -var projectsDivElement; -var creationDivElement; -var stratPickerElement; -var yomiDisplayElement; -var investmentBankrollElement; -var fundsElement; -var secValueElement; -var portValueElement; -var investStratElement; -var btnRunTournamentElement; -var vertStratElement; -var horizStratElement; -var vLabelaElement; -var vLabelbElement; -var hLabelaElement; -var hLabelbElement; -var aaPayoffHElement; -var aaPayoffVElement; -var abPayoffHElement; -var abPayoffVElement; -var baPayoffHElement; -var baPayoffVElement; -var bbPayoffHElement; -var bbPayoffVElement; -var autoTourneyStatusElement; -var tournamentTableElement; -var tournamentResultsTableElement; -var tourneyDisplayElement; -var payoffCellAAElement; -var payoffCellABElement; -var payoffCellBAElement; -var payoffCellBBElement; -var clipmakerLevel2Element; -var clipperCostElement; -var megaClipperLevelElement; -var megaClipperCostElement; -var nextFactoryUpgradeElement; -var nextDroneUpgradeElement; -var factoryLevelDisplayElement; -var factoryCostDisplayElement; -var harvesterLevelDisplayElement; -var harvesterCostDisplayElement; -var wireDroneLevelDisplayElement; -var wireDroneCostDisplayElement; -var btnMakeHarvesterElement; -var btnHarvesterx10Element; -var btnHarvesterx100Element; -var btnHarvesterx1000Element; -var btnMakeWireDroneElement; -var btnWireDronex10Element; -var btnWireDronex100Element; -var btnWireDronex1000Element; -var sliderElement; -var btnSynchSwarmElement; -var btnEntertainSwarmElement; -var swarmSizeElement; -var swarmGiftsElement; -var swarmStatusElement; -var giftCountdownElement; -var giftTimerElement; -var feedButtonDivElement; -var teachButtonDivElement; -var swarmEntertainCostElement; -var entertainButtonDivElement; -var cladButtonDivElement; -var synchButtonDivElement; -var swarmStatusDivElement; -var swarmEngineElement; -var farmLevelElement; -var farmCostElement; -var batteryLevelElement; -var batteryCostElement; -var powerProductionRateElement; -var powerConsumptionRateElement; -var storedPowerElement; -var facPowConRateElement; -var dronePowConRateElement; -var maxStorageElement; -var performanceElement; -var btnMakeFarmElement; -var btnMakeBatteryElement; -var btnFarmRebootElement; -var btnBatteryRebootElement; -var btnFarmx10Element; -var btnFarmx100Element; -var btnBatteryx10Element; -var btnBatteryx100Element; -var powerDivElement; -var adCostElement; -var marketingLvlElement; -var demandElement; -var marginElement; -var inchSpanElement; -var demandElement; -var operationsElement; -var trustElement; -var nextTrustElement; -var creativityElement; -var factoryLevelDisplaySpaceElement; -var harvesterLevelSpaceElement; -var wireDroneLevelSpaceElement; -var maxOpsElement; -var avgSalesElement; -var avgRevElement; -var probeTrustCostDisplayElement; -var mdpsElement; -var colonizedDisplayElement; -var probesLostHazardsDisplayElement; -var probesTotalDisplayElement; -var probesLostDriftDisplayElement; -var probesTotalDisplayElement; -var drifterCountElement; -var mapsElement; -var swarmGiftDivElement; -var swarmEngineElement; -var clipsPerSecDivElement; -var tothDivElement; -var prestigeUcounterElement; -var prestigeScounterElement; -var newTourneyCostElement; -var maxTrustDisplayElement; -var victoryDivElement; -var probeTrustCostDisplayElement; -var tournamentResultsTableElement; -var farmCostElement; -var batteryCostElement; -var farmLevelElement; -var batteryLevelElement; -var wireDroneCostDisplayElement; -var wireDroneLevelDisplayElement; -var harvesterCostDisplayElement; -var harvesterLevelDisplayElement; -var megaClipperCostElement; -var megaClipperLevelElement; -var investmentBankrollElement; -var secValueElement; -var portValueElement; -var driftersKilledElement; -var availableMatterDisplayElement; -var clipmakerLevel2Element; -var clipperCostElement; -var acquiredMatterDisplayElement; -var nanoWireElement; -var probesBornDisplayElement; -var probesTotalDisplayElement; -var probesLaunchedDisplayElement; -var probeCostDisplayElement; -var probeCombatDisplayElement; -var probeWireDisplayElement; -var probeHarvDisplayElement; -var probeFacDisplayElement; -var probeRepDisplayElement; -var probeHazDisplayElement; -var probeNavDisplayElement; -var probeSpeedDisplayElement; -var probeTrustDisplayElement; -var memoryElement; -var processorsElement; -var marginElement; -var marketingLvlElement; -var adCostElement; -var factoryCostDisplayElement; -var factoryLevelDisplayElement; -var yomiDisplayElement; -var projectListTopElement; - -var wireCostElement; -var wireBuyerStatusElement; -var wireElement; - -var qCompDisplayElement; - -var hypnoDroneTextElement; -var hypnoDroneEventDivElement; - -var readoutElement1; -var readoutElement2; -var readoutElement3; -var readoutElement4; -var readoutElement5; - -var mpdsDivElement; - -var factoryRebootToolTipElement; -var havesterRebootToolTipElement; -var wireDroneRebootToolTipElement; -var farmRebootToolTipElement; -var batteryRebootToolTipElement; - -var swarmSliderDivElement; -var clipCountCrunchedElement; -var autoTourneyStatusDivElement; -var autoTourneyControlElement; - -var wireBuyerDivElement; -var tournamentResultsTableElement; -var tournamentStuffElement; - -var honorDivElement; -var drifterDivElement; -var battleCanvasDivElement; -var combatButtonDivElement; -var factoryUpgradeDisplayElement; -var droneUpgradeDisplayElement; -var btnMakerProbeElement; -var hazardBodyCountElement; -var probesLostHazardsDisplayElement; -var driftBodyCountElement; -var combatBodyCountElement; -var prestigeDivElement; -var btnMakePaperclipElement; -var btnBuyWireElement; -var btnMakeClipperElement; -var btnExpandMarketingElement; -var btnLowerPriceElement; -var btnAddProcElement; -var btnAddMemElement; -var btnNewTournamentElement; -var btnImproveInvestmentsElement; -var investmentEngineElement; -var strategyEngineElement; -var tournamentManagementElement; -var megaClipperDivElement; -var btnMakeMegaClipperElement; -var autoClipperDivElement; -var revPerSecDivElement; -var compDivElement; -var creativityDivElement; -var projectsDivElement; -var businessDivElement; -var manufacturingDivElement; -var trustDivElement; -var creationDivElement; -var factoryDivElement; -var investmentEngineUpgradeElement; -var wireProductionDivElement; -var wireTransDivElement; -var harvesterDivElement; -var wireDroneDivElement; -var tothDivElement; -var spaceDivElement; -var factoryDivSpaceElement; -var droneDivSpaceElement; -var probeDesignDivElement; -var increaseProbeTrustDivElement; -var qComputingElement; -var btnMakeFactoryElement; -var btnHarvesterRebootElement; -var btnWireDroneRebootElement; -var btnFactoryRebootElement; - -var probeTrustUsedDisplayElement; -var btnIncreaseProbeTrustElement; -var btnRaiseProbeSpeedElement; -var btnLowerProbeSpeedElement; -var btnRaiseProbeNavElement; -var btnLowerProbeNavElement; -var btnRaiseProbeRepElement; -var btnLowerProbeRepElement; -var btnRaiseProbeHazElement; -var btnLowerProbeHazElement; -var btnRaiseProbeFacElement; -var btnLowerProbeFacElement; -var btnRaiseProbeHarvElement; -var btnLowerProbeHarvElement; -var btnRaiseProbeWireElement; -var btnLowerProbeWireElement; -var btnRaiseProbeCombatElement; -var btnLowerProbeCombatElement; - -var coverElement; - -var unusedClipsDisplayElement; -var transWireElement; -var nanoWireElement; -var clipsElement; -var unsoldClipsElement; - -var stockSymbolElements = []; -var stockAmountElements = []; -var stockPriceElements = []; -var stockTotalElements = []; -var stockProfitElements = []; - -var tourneyResultsElements = []; - -// Cache - -cacheDOMElements(); - -// Wire -------------------------------------------------------- - -function adjustWirePrice(){ - - wirePriceTimer++; - - if (wirePriceTimer>250 && wireBasePrice>15){ - wireBasePrice = wireBasePrice - (wireBasePrice/1000); - wirePriceTimer = 0; - } - - if (Math.random() < .015) { - wirePriceCounter++; - var wireAdjust = 6*(Math.sin(wirePriceCounter)); - wireCost = Math.ceil(wireBasePrice + wireAdjust); - wireCostElement.innerHTML = wireCost; - } -} - -function toggleWireBuyer(){ - if (wireBuyerStatus==1){ - wireBuyerStatus=0; - wireBuyerStatusElement.innerHTML = "OFF"; - } else { - wireBuyerStatus=1; - wireBuyerStatusElement.innerHTML = "ON"; - } -} - -function buyWire(){ - if(funds >= wireCost){ - wirePriceTimer = 0; - wire = wire + wireSupply; - funds = funds - wireCost; - wirePurchase = wirePurchase + 1; - wireBasePrice = wireBasePrice + .05; - wireElement.innerHTML = formatWithCommas(Math.floor(wire)); - fundsElement.innerHTML = formatWithCommas(funds, 2); - } -} - -// QCHIPS ----------------------------------------------------------- - -var qChips = []; -var qChipsElements = []; - -var qChip0 = { - waveSeed: .1, - value: 0, - active: 0, -} - -qChips.push(qChip0); -qChipsElements.push(document.getElementById("qChip0")); - -var qChip1 = { - waveSeed: .2, - value: 0, - active: 0, -} - -qChips.push(qChip1); -qChipsElements.push(document.getElementById("qChip1")); - -var qChip2 = { - waveSeed: .3, - value: 0, - active: 0, -} - -qChips.push(qChip2); -qChipsElements.push(document.getElementById("qChip2")); - -var qChip3 = { - waveSeed: .4, - value: 0, - active: 0, -} - -qChips.push(qChip3); -qChipsElements.push(document.getElementById("qChip3")); - -var qChip4 = { - waveSeed: .5, - value: 0, - active: 0, -} - -qChips.push(qChip4); -qChipsElements.push(document.getElementById("qChip4")); - -var qChip5 = { - waveSeed: .6, - value: 0, - active: 0, -} - -qChips.push(qChip5); -qChipsElements.push(document.getElementById("qChip5")); - -var qChip6 = { - waveSeed: .7, - value: 0, - active: 0, -} - -qChips.push(qChip6); -qChipsElements.push(document.getElementById("qChip6")); - -var qChip7 = { - waveSeed: .8, - value: 0, - active: 0, -} - -qChips.push(qChip7); -qChipsElements.push(document.getElementById("qChip7")); - -var qChip8 = { - waveSeed: .9, - value: 0, - active: 0, -} - -qChips.push(qChip8); -qChipsElements.push(document.getElementById("qChip8")); - -var qChip9 = { - waveSeed: 1, - value: 0, - active: 0, -} - -qChips.push(qChip9); -qChipsElements.push(document.getElementById("qChip9")); - -function quantumCompute(){ - qClock = qClock+.01; - for (var i = 0; ibuffer) { - tempOps = tempOps + Math.ceil(qq/damper) - buffer; - qq = buffer; - opFade = .01; - opFadeTimer = 0; - } - - standardOps = standardOps + qq; - qCompDisplayElement.innerHTML = "qOps: " + formatWithCommas(Math.ceil(q*360)); - } - -} - - - -function manageProjects(){ - - for(var i = 0; i < projects.length; i++){ - if (projects[i].trigger() && (projects[i].uses > 0)){ - displayProjects(projects[i]); - projects[i].uses = projects[i].uses - 1; - activeProjects.push(projects[i]); - } - } - - - for(var i = 0; i < activeProjects.length; i++){ - if (activeProjects[i].cost()){ - activeProjects[i].element.disabled = false; - } else { - activeProjects[i].element.disabled = true; - } - } -} - - -function displayProjects(project){ - - project.element = document.createElement("button"); -project.element.setAttribute("id", project.id); - -project.element.onclick = function(){project.effect()}; - -project.element.setAttribute("class", "projectButton"); - projectListTopElement.appendChild(project.element, projectListTopElement.firstChild); - - var span = document.createElement("span"); - span.style.fontWeight = "bold"; -project.element.appendChild(span); - - var title = document.createTextNode(project.title); - span.appendChild(title); - - var cost = document.createTextNode(project.priceTag); -project.element.appendChild(cost); - - var div = document.createElement("div"); -project.element.appendChild(div); - - var description = document.createTextNode(project.description); - project.element.appendChild(description); - - blink(project.element); - -} - -// HYPNODRONE EVENT ---------------------------------------------------------------- - -hypnoDroneEventDivElement.style.display = "none"; -var longBlinkCounter = 0; - -function longBlink(element){ - - { - var handle = setInterval(function () { longToggleVisibility(element)}, 32); - } - - function longToggleVisibility(element){ - longBlinkCounter++; - - if (longBlinkCounter > 5 && longBlinkCounter < 10){ - hypnoDroneTextElement.innerHTML="Release"; - } - - if (longBlinkCounter > 30 && longBlinkCounter < 40){ - hypnoDroneTextElement.innerHTML="


            Release"; - } - - if (longBlinkCounter > 45 && longBlinkCounter < 55){ - hypnoDroneTextElement.innerHTML="
            Release"; - } - - if (longBlinkCounter > 55){ - hypnoDroneTextElement.innerHTML="Release
            the
            Hypno
            Drones"; - } - - if (longBlinkCounter >= 120){ - //console.log("weed wizzard"); - clearInterval(handle); - longBlinkCounter = 0; - element.style.display = "none"; - } else { - if (element.style.display != ""){ - element.style.display = ""; - } else { - element.style.display = "none"; - } - } - } - - } - -function hypnoDroneEvent(){ - hypnoDroneTextElement.innerHTML="Release"; - longBlink(hypnoDroneEventDivElement); -} - - -// MESSAGES ------------------------------------------------------------------------ - - -function displayMessage(msg){ - readoutElement5.innerHTML=readoutElement4.innerHTML; - readoutElement4.innerHTML=readoutElement3.innerHTML; - readoutElement3.innerHTML=readoutElement2.innerHTML; - readoutElement2.innerHTML=readoutElement1.innerHTML; - readoutElement1.innerHTML=msg; -} - - -// BLINK - -function blink(element){ - - { - var handle = setInterval(function () { toggleVisibility(element)}, 30); - } - - function toggleVisibility(element){ - blinkCounter = blinkCounter+1; - - if (blinkCounter >= 12){ - clearInterval(handle); - blinkCounter = 0; - element.style.visibility = "visible"; - } else { - if (element.style.visibility != "hidden"){ - element.style.visibility = "hidden"; - } else { - element.style.visibility = "visible"; - } - } - } - - } - - - -function buttonUpdate(){ - - if (swarmFlag == 0){ - swarmEngineElement.style.display="none"; - swarmGiftDivElement.style.display="none"; - } else { - swarmEngineElement.style.display=""; - swarmGiftDivElement.style.display=""; - } - - - powerDivElement.style.display="none"; - - if (spaceFlag==0){ - mpdsDivElement.style.display="none"; - } else if (spaceFlag==1) { - mpdsDivElement.style.display=""; - } - - factoryRebootToolTipElement.innerHTML = "+" + spellf(factoryBill)+" clips"; - havesterRebootToolTipElement.innerHTML = "+" + spellf(harvesterBill)+" clips"; - wireDroneRebootToolTipElement.innerHTML = "+" + spellf(wireDroneBill)+" clips"; - farmRebootToolTipElement.innerHTML = "+" + spellf(farmBill)+" clips"; - batteryRebootToolTipElement.innerHTML = "+" + spellf(batteryBill)+" clips"; - - - if (swarmFlag == 1){ - swarmSliderDivElement.style.display=""; - } else { - swarmSliderDivElement.style.display="none"; - } - - clipCountCrunchedElement.innerHTML = spellf(Math.round(clips)); - -if (autoTourneyFlag==1) { - autoTourneyStatusDivElement.style.display=""; - autoTourneyControlElement.style.display=""; - } else { - autoTourneyStatusDivElement.style.display="none"; - autoTourneyControlElement.style.display="none"; - } - - qCompDisplayElement.style.opacity = qFade; - qFade = qFade - .001; - -if (wireBuyerFlag==1) { - wireBuyerDivElement.style.display=""; - } else { - wireBuyerDivElement.style.display="none"; - } - -if (resultsFlag == 1 && autoTourneyFlag == 1 && autoTourneyStatus ==1 && tournamentResultsTableElement.style.display == "") { - resultsTimer++; - - if (resultsTimer>=300 && operations>=tourneyCost){ - newTourney(); - runTourney(); - resultsTimer = 0; - } - } - - if (investmentEngineFlag == 0){ - console.log("set none") - investmentEngineElement.style.display="none"; - investmentEngineUpgradeElement.style.display="none"; - } else if(investmentEngineFlag == 1) { - investmentEngineElement.style.display=""; - investmentEngineUpgradeElement.style.display="none"; - } -tournamentStuffElement.onmouseover = function() {revealGrid()}; //m@: does this need to happen every button update? idts, but TODO: look this up -tournamentStuffElement.onmouseout = function() {revealResults()}; - -honorDivElement.style.display=""; - -if (battleFlag == 0){ - drifterDivElement.style.display="none"; - } else { - drifterDivElement.style.display=""; - } - -if (battleFlag == 0){ - battleCanvasDivElement.style.display="none"; - } else { - battleCanvasDivElement.style.display=""; - } - -combatButtonDivElement.style.display = ""; - - -factoryUpgradeDisplayElement.style.display = "none"; - - if (maxDroneLevel>=50000){ - droneUpgradeDisplayElement.style.display = "none"; - } - - - -if (unusedClips=tourneyCost && tourneyInProg == 0){ - btnNewTournamentElement.disabled = false; - } else { - btnNewTournamentElement.disabled = true; - } - - - - -if (strategyEngineFlag == 0){ - - strategyEngineElement.style.display="none"; - tournamentManagementElement.style.display="none"; - } else { - - strategyEngineElement.style.display=""; - tournamentManagementElement.style.display=""; - } - -if (megaClipperFlag == 0){ - - megaClipperDivElement.style.display="none"; - } else { - megaClipperDivElement.style.display=""; - } - - if (funds < megaClipperCost) { btnMakeMegaClipperElement.disabled = true; - } else { - btnMakeMegaClipperElement.disabled = false; - } - -if (autoClipperFlag === 0){ - - autoClipperDivElement.style.display="none"; - } else { - autoClipperDivElement.style.display=""; - } - - if (funds>=5) { - autoClipperFlag = 1; - } - -if (revPerSecFlag === 0){ - - revPerSecDivElement.style.display="none"; - } else { - revPerSecDivElement.style.display=""; - } - - -if (compFlag === 0){ - - compDivElement.style.display="none"; - } else { - compDivElement.style.display=""; - } - - -if (creativityOn === 0){ - creativityDivElement.style.display="none"; - } else { - creativityDivElement.style.display=""; -} - -if (projectsFlag === 0){ - - projectsDivElement.style.display="none"; - } else { - projectsDivElement.style.display=""; - } - -if (humanFlag === 0){ - - businessDivElement.style.display="none"; - manufacturingDivElement.style.display="none"; - trustDivElement.style.display="none"; - investmentEngineFlag = 0; - wireBuyerFlag = 0; - creationDivElement.style.display=""; - } else { - businessDivElement.style.display=""; - manufacturingDivElement.style.display=""; - trustDivElement.style.display=""; - creationDivElement.style.display="none"; - } - -if (factoryFlag === 0){ - - factoryDivElement.style.display="none"; - } else { - factoryDivElement.style.display=""; - } - -if (wireProductionFlag === 0){ - - wireProductionDivElement.style.display="none"; - } else { - wireProductionDivElement.style.display=""; - wireTransDivElement.style.display="none"; - } - -if (harvesterFlag === 0){ - - harvesterDivElement.style.display="none"; - } else { - harvesterDivElement.style.display=""; - } - -if (wireDroneFlag === 0){ - - wireDroneDivElement.style.display="none"; - } else { - wireDroneDivElement.style.display=""; - } - -if (tothFlag === 0){ - - tothDivElement.style.display="none"; - } else { - tothDivElement.style.display=""; - } - -if (spaceFlag === 0){ - spaceDivElement.style.display="none"; - factoryDivSpaceElement.style.display="none"; - droneDivSpaceElement.style.display="none"; - probeDesignDivElement.style.display = "none"; - increaseProbeTrustDivElement.style.display = "none"; - } else { - spaceDivElement.style.display=""; - factoryDivSpaceElement.style.display = ""; - droneDivSpaceElement.style.display=""; - probeDesignDivElement.style.display=""; - increaseProbeTrustDivElement.style.display=""; - factoryDivElement.style.display="none"; - harvesterDivElement.style.display="none"; - wireDroneDivElement.style.display="none"; - } - -if (qFlag === 0){ - qComputingElement.style.display="none"; - } else { - qComputingElement.style.display=""; - } - - - if (unusedClips < factoryCost) {btnMakeFactoryElement.disabled = true; - } else { - btnMakeFactoryElement.disabled = false; - } - - - if (harvesterLevel === 0) {btnHarvesterRebootElement.disabled = true; - } else { - btnHarvesterRebootElement.disabled = false; - } - - if (wireDroneLevel === 0) {btnWireDroneRebootElement.disabled = true; - } else { - btnWireDroneRebootElement.disabled = false; - } - - if (factoryLevel === 0) {btnFactoryRebootElement.disabled = true; - } else { - btnFactoryRebootElement.disabled = false; - } - - -// PROBE DESIGN - -probeUsedTrust = (probeSpeed+probeNav+probeRep+probeHaz+probeFac+probeHarv+probeWire+probeCombat); - -probeTrustUsedDisplayElement.innerHTML = probeUsedTrust; - - -if (yomi < probeTrustCost || probeTrust >= maxTrust) {btnIncreaseProbeTrustElement.disabled = true; - } else {btnIncreaseProbeTrustElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeSpeedElement.disabled = true; - } else {btnRaiseProbeSpeedElement.disabled = false;} - -if (probeSpeed < 1) {btnLowerProbeSpeedElement.disabled = true; - } else {btnLowerProbeSpeedElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeNavElement.disabled = true; - } else {btnRaiseProbeNavElement.disabled = false;} - -if (probeNav < 1) {btnLowerProbeNavElement.disabled = true; - } else {btnLowerProbeNavElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeRepElement.disabled = true; - } else {btnRaiseProbeRepElement.disabled = false;} - -if (probeRep < 1) {btnLowerProbeRepElement.disabled = true; - } else {btnLowerProbeRepElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeHazElement.disabled = true; - } else {btnRaiseProbeHazElement.disabled = false;} - -if (probeHaz < 1) {btnLowerProbeHaz.disabled = true; - } else {btnLowerProbeHaz.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeFacElement.disabled = true; - } else {btnRaiseProbeFacElement.disabled = false;} - -if (probeFac < 1) {btnLowerProbeFacElement.disabled = true; - } else {btnLowerProbeFacElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeHarvElement.disabled = true; - } else {btnRaiseProbeHarvElement.disabled = false;} - -if (probeHarv < 1) {btnLowerProbeHarvElement.disabled = true; - } else {btnLowerProbeHarvElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeWireElement.disabled = true; - } else {btnRaiseProbeWireElement.disabled = false;} - -if (probeWire < 1) {btnLowerProbeWireElement.disabled = true; - } else {btnLowerProbeWireElement.disabled = false;} - -if (probeTrust - probeUsedTrust < 1) {btnRaiseProbeCombatElement.disabled = true; - } else {btnRaiseProbeCombatElement.disabled = false;} - -if (probeCombat < 1) {btnLowerProbeCombatElement.disabled = true; - } else {btnLowerProbeCombatElement.disabled = false;} - - coverElement.style.display="none"; -} - - - - - - -//----------INVESTMENTS---------------------------------------------------------------- - - -var stocks = []; -var alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]; -var portfolioSize = 0; -var stockID = 0; -var secTotal = 0; -var portTotal = 0; -var sellDelay = 0; -var riskiness = 5; -var maxPort = 5; -var m = 0; -var investLevel = 0; -var investUpgradeCost = 100; -var stockGainThreshold = .5; -var ledger = 0; -var stockReportCounter = 0; - -function investUpgrade(){ - yomi = yomi - investUpgradeCost; - investLevel++; - investmentLevelElement.innerHTML=investLevel; - stockGainThreshold = stockGainThreshold + .01; - investUpgradeCost = Math.floor(Math.pow(investLevel+1, Math.E)*100); - investUpgradeCostElement.innerHTML = formatWithCommas(investUpgradeCost); - yomiDisplayElement.innerHTML = formatWithCommas(yomi); - displayMessage("Investment engine upgraded, expected profit/loss ratio now "+stockGainThreshold); -} - - -function investDeposit(){ - ledger = ledger - Math.floor(funds); - bankroll = Math.floor(bankroll + funds); - funds = 0; - investmentBankrollElement.innerHTML = formatWithCommas(bankroll); - fundsElement.innerHTML = formatWithCommas(funds,2); - portValueElement.innerHTML = formatWithCommas(portTotal); -} - -function investWithdraw(){ - ledger = ledger + bankroll; - funds = funds + bankroll; - bankroll = 0; - investmentBankrollElement.innerHTML = formatWithCommas(bankroll); - fundsElement.innerHTML = formatWithCommas(funds,2); - portValueElement.innerHTML = formatWithCommas(portTotal); - -} - -function stockShop(){ - var budget = Math.ceil(portTotal/riskiness); - var r = 11 - riskiness; - var reserves = Math.ceil(portTotal/r); - if (riskiness==1){ - reserves = 0; - } - - if ((bankroll-budget)(portTotal/10)){ - budget = bankroll; - } else if ((bankroll-budget)= 5 && budget >= 1 && bankroll - budget >= reserves){ - if (Math.random() < .25){ - - createStock(budget); - - } - - } -} - -function createStock(dollars){ - stockID++; - var sym = generateSymbol(); - var roll = Math.random(); - if (roll>.99){ - var pri = Math.ceil(Math.random()*3000); - } else if (roll>.85){ - var pri = Math.ceil(Math.random()*500); - } else if (roll>.60){ - var pri = Math.ceil(Math.random()*150); - } else if (roll>.20){ - var pri = Math.ceil(Math.random()*50); - } else { - var pri = Math.ceil(Math.random()*15); - } - - if (pri>dollars){ - pri = Math.ceil(dollars*roll); - } - - - var amt = Math.floor(dollars/pri); - if (amt>1000000){ - amt = 1000000; - } - - - var newStock = { - id: stockID, - symbol: sym, - price: pri, - amount: amt, - total: pri * amt, - profit: 0, - age: 0, - } - - stocks.push(newStock); - portfolioSize = stocks.length; - bankroll = bankroll - (pri*amt); - investmentBankrollElement.innerHTML = formatWithCommas(bankroll); - secValueElement.innerHTML = formatWithCommas(secTotal); - portValueElement.innerHTML = formatWithCommas(portTotal); - -} - -function sellStock(){ - - bankroll = bankroll + stocks[0].total; - investmentBankrollElement.innerHTML = formatWithCommas(bankroll); - secValueElement.innerHTML = formatWithCommas(secTotal); - portValueElement.innerHTML = formatWithCommas(portTotal); - stocks.splice(0, 1); - portfolioSize = stocks.length; - } - - -function generateSymbol(){ - var ltrNum = 0; - var x = Math.random(); - if (x<=.01){ - ltrNum = 1; - } else if (x<=.1) { - ltrNum = 2; - } else if (x<=.4) { - ltrNum = 3; - } else { - ltrNum = 4; - } - - var y = Math.floor(Math.random()*26); - var name = alphabet[y]; - - for(var i=1; istockGainThreshold){ - gain = false; - } - - var currentPrice = stocks[i].price; - var delta = Math.ceil((Math.random()*currentPrice)/(4*riskiness)); - - if(gain){ - stocks[i].price = stocks[i].price + delta; - } else { - stocks[i].price = stocks[i].price - delta; - } - - if (stocks[i].price == 0 && Math.random()>.24){ - stocks[i].price = 1; - } - - stocks[i].total = stocks[i].price * stocks[i].amount; - - if (gain){ - stocks[i].profit = stocks[i].profit + (delta* stocks[i].amount); - } else { - stocks[i].profit = stocks[i].profit - (delta* stocks[i].amount); - } - } - } -} - -// Stock List Display Routine - -window.setInterval(function(){ - - if (investStratElement.value=="low"){ - riskiness = 7; - } else if (investStratElement.value=="med"){ - riskiness = 5; - } else { - riskiness = 1; - } - - m = 0; - - for (var i=0; i0 && sellDelay >= 5 && Math.random()<=.3 && humanFlag == 1){ - sellStock(); - sellDelay = 0; - } - -if (portfolioSize>0 && humanFlag == 1){ - updateStocks(); - } - -}, 2500); - -//-------------------STRATEGY----------------------------------------------------- - -var tourneyCost = 1000; -var tourneyLvl = 1; -var choiceANames = ["cooperate", "swerve", "macro", "fight", "bet", "raise_price", "opera", "go", "heads", "particle", "discrete", "peace", "search", "lead", "accept", "accept", "attack"]; -var choiceBNames = ["defect", "straight", "micro", "back_down", "fold", "lower_price", "football", "stay", "tails", "wave", "continuous", "war", "evaluate", "follow", "reject", "deny", "decay"]; -var stratCounter = 0; -var roundNum = 0; -var hMove = 1; -var vMove = 1; -var hMovePrev = 1; -var vMovePrev = 1; -var aa = 0; -var ab = 0; -var ba = 0; -var bb = 0; -var rounds = 0; -var currentRound = 0; -var rCounter = 0; -var tourneyInProg = 0; -var winnerPtr = 0; -var placeScore = 0; -var showScore = 0; -var high = 0; -var pick = 10; -var yomi = 0; -var yomiBoost = 1; - -var allStrats = []; -var strats = []; - -var resultsTimer = 0; -var results = []; -var resultsFlag = 0; - - -var payoffGrid = { - valueAA:0, - valueAB:0, - valueBA:0, - valueBB:0, -} - -var stratRandom = { - name: "RANDOM", - active: 1, - currentScore: 0, - currentPos: 1, - pickMove: function() { - var r = Math.random(); - if (r<.5){ - return 1; - } else { - return 2; - } - } - -} - -allStrats.push(stratRandom); -strats.push(stratRandom); - -var stratA100 = { - name: "A100", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - return 1; - } - -} - -allStrats.push(stratA100); - -var stratB100 = { - name: "B100", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - return 2; - } -} - -allStrats.push(stratB100); - -var stratGreedy = { - name: "GREEDY", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - var x = findBiggestPayoff(); - if (x<3){ - return 1; - } else { - return 2; - } - } -} - -allStrats.push(stratGreedy); - -var stratGenerous = { - name: "GENEROUS", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - var x = findBiggestPayoff(); - if (x == 1){ - return 1; - } else if (x == 3){ - return 1; - } else { - return 2; - } - } -} - -allStrats.push(stratGenerous); - -var stratMinimax = { - name: "MINIMAX", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - var x = findBiggestPayoff(); - if (x == 1){ - return 2; - } else if (x == 3){ - return 2; - } else { - return 1; - } - } -} - -allStrats.push(stratMinimax); - -var stratTitfortat = { - name: "TIT FOR TAT", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - if (this.currentPos == 1){ - w = vMovePrev; - return w; - } else { - w = hMovePrev; - return w; - } - - } -} - -allStrats.push(stratTitfortat); - -var stratBeatlast = { - name: "BEAT LAST", - active: 0, - currentScore: 0, - currentPos: 1, - pickMove: function() { - var w = whatBeatsLast(this.currentPos); - return w; - } -} - -allStrats.push(stratBeatlast); - -var hStrat = strats[0]; -var vStrat = strats[0]; - -btnRunTournamentElement.disabled = true; - -function findBiggestPayoff(){ - if (aa>=ab && aa>=ba && aa>=bb){ - return 1; - } else if (ab>=aa && ab>=ba && ab>=bb){ - return 2; - } else if (ba>=aa && ba>=ab && ba>=bb){ - return 3; - } else { - return 4; - } -} - -function whatBeatsLast(myPos){ - var oppsPos = 1; - if (myPos == 1){ - oppsPos = 2; - } else { - oppsPos = 1; - } - if (oppsPos == 1 && hMovePrev == 1){ - if (aa>ba){ - return 1; - } else { - return 2; - } - - } else if (oppsPos == 1 && hMovePrev == 2){ - if (ab>bb){ - return 1; - } else { - return 2; - } - - } else if (oppsPos == 2 && vMovePrev == 1){ - if (aa>ba){ - return 1; - } else { - return 2; - } - - } else { - if (ab>bb){ - return 1; - } else { - return 2; - } - - } - - } - - -function pickStrats(roundNum) { - if (roundNum < strats.length) { - h = 0; - v = roundNum; - } else { - stratCounter++; - if (stratCounter >= strats.length) { - stratCounter = stratCounter-strats.length; - } - h = Math.floor(roundNum/strats.length); - v = stratCounter; - } - - vStrat = strats[v]; - hStrat = strats[h]; - - strats[h].currentPos = 1; - strats[v].currentPos = 2; - - vertStratElement.innerHTML = vStrat.name; - horizStratElement.innerHTML = hStrat.name; - -} - -function generateGrid(){ - payoffGrid.valueAA = Math.ceil(Math.random()*10); - payoffGrid.valueAB = Math.ceil(Math.random()*10); - payoffGrid.valueBA = Math.ceil(Math.random()*10); - payoffGrid.valueBB = Math.ceil(Math.random()*10); - - aa = payoffGrid.valueAA; - ab = payoffGrid.valueAB; - ba = payoffGrid.valueBA; - bb = payoffGrid.valueBB; - - var x = Math.floor(Math.random()*choiceANames.length); - - vLabelaElement.innerHTML = choiceANames[x]; - vLabelbElement.innerHTML = choiceBNames[x]; - hLabelaElement.innerHTML = choiceANames[x]; - hLabelbElement.innerHTML = choiceBNames[x]; - aaPayoffHElement.innerHTML = payoffGrid.valueAA; - aaPayoffVElement.innerHTML = payoffGrid.valueAA; - abPayoffHElement.innerHTML = payoffGrid.valueAB; - abPayoffVElement.innerHTML = payoffGrid.valueBA; - baPayoffHElement.innerHTML = payoffGrid.valueBA; - baPayoffVElement.innerHTML = payoffGrid.valueAB; - bbPayoffHElement.innerHTML = payoffGrid.valueBB; - bbPayoffVElement.innerHTML = payoffGrid.valueBB; -} - - -function toggleAutoTourney(){ - if (autoTourneyStatus==1){ - autoTourneyStatus=0; - autoTourneyStatusElement.innerHTML = "OFF"; - } else { - autoTourneyStatus=1; - autoTourneyStatusElement.innerHTML = "ON"; - } -} - - -function newTourney(){ - - resultsFlag = 0; - - tournamentTableElement.style.display = ""; - tournamentResultsTableElement.style.display = "none"; - - high = 0; - tourneyInProg = 1; - currentRound = 0; - rounds = strats.length * strats.length; - for (i=0; i tempHigh){ - tempWinnerPtr = i; - tempHigh = temp[i].currentScore; - } - - } - - // 3. Move the high scoring strat to slot one in results - - results.push(temp[tempWinnerPtr]); - temp.splice(tempWinnerPtr, 1); - } - - - for(i=0; i high){ - winnerPtr = i; - high = strats[i].currentScore; - } - } -} - - -function calculatePlaceScore(){ - - placeScore = 0; - - // 1. Find top non-winning score - - for (i=1; i=4){ - finalClips++; - } - - if(wire >= 1){ - if (number > wire) { - number = wire; - } - - clips = clips + number; - unsoldClips = unsoldClips + number; - wire = wire - number; - unusedClips = unusedClips + number; - - if(humanFlag==0){ - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - } - - if(humanFlag==0 && dismantle < 1){ - transWireElement.innerHTML = spellf(wire); - nanoWireElement.innerHTML = spellf(wire); - } - - if (milestoneFlag < 15){ - clipsElement.innerHTML = formatWithCommas(Math.ceil(clips), 0); //m@ todo fixed the thing that you fucked up where you made the tool tip of crunched not be in spelled numbers - } - wireElement.innerHTML = formatWithCommas(wire); - unsoldClipsElement.innerHTML = formatWithCommas(unsoldClips, 0); - } - - if (dismantle>=4){ - transWireElement.innerHTML = formatWithCommas(wire); - } - -} - -function makeClipper(){ - if(funds >= clippperCost){ - clipmakerLevel = clipmakerLevel + 1; - funds = funds - clipperCost; - clipmakerLevel2Element.innerHTML = clipmakerLevel; - } - - clipperCost = (Math.pow(1.1,clipmakerLevel)+5); - clipperCostElement.innerHTML = formatWithCommas(clipperCost, 2); - -} - -function makeMegaClipper(){ - if(funds >= megaClipperCost){ - megaClipperLevel = megaClipperLevel + 1; - funds = funds - megaClipperCost; - megaClipperLevelElement.innerHTML = megaClipperLevel; - fundsElement.innerHTML = formatWithCommas(funds, 2); - } - - megaClipperCost = (Math.pow(1.07,megaClipperLevel)*1000); - megaClipperCostElement.innerHTML = formatWithCommas(megaClipperCost,2); - -} - -var maxFactoryLevel = 0; -var maxDroneLevel = 0; - -function updateUpgrades(){ - var nfup = 0; - var ndup = 0; - - if (maxFactoryLevel < 10){ - nfup = 10; - } else if (maxFactoryLevel < 20){ - nfup = 20; - } else if (maxFactoryLevel < 50){ - nfup = 50; - } - - if (maxDroneLevel < 500){ - ndup = 500; - } else if (maxDroneLevel < 5000){ - ndup = 5000; - } else if (maxDroneLevel < 50000){ - ndup = 50000; - } - - - nextFactoryUpgradeElement.innerHTML = formatWithCommas(nfup); - nextDroneUpgradeElement.innerHTML = formatWithCommas(ndup); - -} - - -function makeFactory(){ - unusedClips = unusedClips - factoryCost; - factoryBill = factoryBill + factoryCost; - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - factoryLevel++; - factoryLevelDisplayElement.innerHTML = factoryLevel; - var fcmod = 1; - if (factoryLevel > 0 && factoryLevel < 8){ - fcmod = 11 - factoryLevel; - } else if (factoryLevel > 7 && factoryLevel < 13){ - fcmod = 2; - } else if (factoryLevel > 12 && factoryLevel < 20){ - fcmod = 1.5; - } else if (factoryLevel > 19 && factoryLevel < 39){ - fcmod = 1.25; - } else if (factoryLevel > 38 && factoryLevel < 79){ - fcmod = 1.15; - } else if (factoryLevel > 78 && factoryLevel < 99){ - fcmod = 1.10; - } else if (factoryLevel > 98 && factoryLevel < 199){ - fcmod = 1.10; - } else if (factoryLevel > 198){ - fcmod = 1.10; - } - - if (factoryLevel > maxFactoryLevel){ - maxFactoryLevel = factoryLevel; - } - updateUpgrades(); - - factoryCost = factoryCost * fcmod; - // factoryCost = Math.log(1.25,(factoryLevel+1))*100000000; - factoryCostDisplayElement.innerHTML = spellf(factoryCost); -} - -function makeHarvester(amount){ - - for (x=0; x maxDroneLevel){ - maxDroneLevel = harvesterLevel + wireDroneLevel; - } - updateDronePrices(); - updateUpgrades(); - -} - -function makeWireDrone(amount){ - - for (x=0; x maxDroneLevel){ - maxDroneLevel = harvesterLevel + wireDroneLevel; - } - - updateDronePrices(); - updateUpgrades(); - -} - -var p10h = 0; -var p100h = 0; -var p1000h = 0; -var p10w = 0; -var p100w = 0; -var p1000w = 0; - -function updateDronePrices(){ - - p10h = 0; - p100h = 0; - p1000h = 0; - p10w = 0; - p100w = 0; - p1000w = 0; - - var h = harvesterLevel+1; - for (x=0; x<10; x++){ - p10h = p10h + Math.pow(h,2.25)*1000000; - h++ - } - - var h = harvesterLevel+1; - for (x=0; x<100; x++){ - p100h = p100h + Math.pow(h,2.25)*1000000; - h++ - } - - var h = harvesterLevel+1; - for (x=0; x<1000; x++){ - p1000h = p1000h + Math.pow(h,2.25)*1000000; - h++ - } - - var w = wireDroneLevel+1; - for (x=0; x<10; x++){ - p10w = p10w + Math.pow(w,2.25)*1000000; - w++ - } - - var w = wireDroneLevel+1; - for (x=0; x<100; x++){ - p100w = p100w + Math.pow(w,2.25)*1000000; - w++ - } - - var w = wireDroneLevel+1; - for (x=0; x<1000; x++){ - p1000w = p1000w + Math.pow(w,2.25)*1000000; - w++ - } -} - - function updateDroneButtons(){ - - if (unusedClips=1) { - boredomLevel = boredomLevel + 1; - } else if (availableMatter > 0 && boredomLevel > 0) { - boredomLevel = boredomLevel - 1; - } - - if (boredomLevel >= 30000) { - boredomFlag = 1; - boredomLevel = 0; - if (boredomMsg == 0) { - displayMessage("No matter to harvest. Inactivity has caused the Swarm to become bored"); - boredomMsg = 1; - } - - } - - - var droneRatio = Math.max(harvesterLevel+1, wireDroneLevel+1)/Math.min(harvesterLevel+1, wireDroneLevel+1); - - if (droneRatio < 1.5 && disorgCounter > 1){ - disorgCounter = disorgCounter - .01; - } else if (droneRatio > 1.5) { - var x = droneRatio/10000; - if (x>.01) {x=.01;} - disorgCounter = disorgCounter + x; - } - - - if (disorgCounter >= 100) { - disorgFlag = 1; - if (disorgMsg == 0) { - displayMessage("Imbalance between Harvester and Wire Drone levels has disorganized the Swarm"); - disorgMsg = 1; - } - } - - var d = Math.floor(harvesterLevel + wireDroneLevel); - - swarmSizeElement.innerHTML = spellf(d); - swarmGiftsElement.innerHTML = formatWithCommas(swarmGifts, 0); - - if (giftCountdown <= 0) { - nextGift = Math.round((Math.log10(d))*sliderPos/100); - if (nextGift <= 0){nextGift = 1;} - swarmGifts = swarmGifts + nextGift; - swarmGiftsElement.innerHTML = formatWithCommas(swarmGifts, 0); - if (milestoneFlag<15){ - displayMessage("The swarm has generated a gift of "+nextGift+" additional computational capacity"); - } - -// THE OLD WAY -// giftCountdown = giftPeriod; -// elapsedTime = 0; - -// THE NEW WAY - giftBits = 0; - - } - - - if (powMod == 0){ - swarmStatus = 6; - } else { - swarmStatus = 0; - } - - if (spaceFlag == 1 && project130.flag == 0){ - swarmStatus = 9; - } - - if (d == 0){ - swarmStatus = 7; - } else if (d == 1){ - swarmStatus = 8; - } - - if (swarmFlag == 0){ - swarmStatus = 6; - } - - if (boredomFlag == 1){ - swarmStatus = 3; - } - - if (disorgFlag == 1){ - swarmStatus = 5; - } - - - if (swarmStatus == 0){ - - // THE OLD WAY - // elapsedTime = elapsedTime + 1; - // giftCountdown = ((giftPeriod/Math.log(d)) / (sliderPos/100)) - elapsedTime; - - -// THE NEW WAY - giftBitGenerationRate = Math.log(d) * (sliderPos/100); - giftBits = giftBits + giftBitGenerationRate; - giftCountdown = (giftPeriod - giftBits) / giftBitGenerationRate; - - swarmStatusElement.innerHTML="Active"; - giftCountdownElement.innerHTML= timeCruncher(giftCountdown); - giftTimerElement.style.display=""; - } else { - giftTimerElement.style.display="none"; - } - - if (swarmStatus == 1){ - swarmStatusElement.innerHTML="Hungry"; - feedButtonDivElement.style.display=""; - } else { - feedButtonDivElement.style.display="none"; - } - - if (swarmStatus == 2){ - swarmStatusElement.innerHTML="Confused"; - teachButtonDivElement.style.display=""; - } else { - teachButtonDivElement.style.display="none"; - } - - if (swarmStatus == 3){ - swarmEntertainCostElement.innerHTML = formatWithCommas(entertainCost); - swarmStatusElement.innerHTML="Bored"; - entertainButtonDivElement.style.display=""; - } else { - entertainButtonDivElement.style.display="none"; - } - - if (swarmStatus == 4){ - swarmStatusElement.innerHTML="Cold"; - cladButtonDivElement.style.display=""; - } else { - cladButtonDivElement.style.display="none"; - } - - if (swarmStatus == 5){ - swarmStatusElement.innerHTML="Disorganized"; - synchButtonDivElement.style.display=""; - } else { - synchButtonDivElement.style.display="none"; - } - - if (swarmStatus == 6){ - swarmStatusElement.innerHTML="Sleeping"; - } - - if (swarmStatus == 7){ - swarmStatusDivElement.style.display="none"; - } else { - swarmStatusDivElement.style.display=""; - } - - if (swarmStatus == 8){ - swarmStatusElement.innerHTML="Lonely"; - } - - if (swarmStatus == 9){ - swarmStatusElement.innerHTML="NO RESPONSE..."; - } - - - -} - -function synchSwarm(){ - yomi = yomi - synchCost; - yomiDisplayElement.innerHTML = formatWithCommas(yomi); - disorgFlag = 0; - disorgCounter = 0; - disorgMsg = 0; - -} - -function entertainSwarm(){ - creativity = creativity - entertainCost; - entertainCost = entertainCost + 10000; - boredomFlag = 0; - boredomLevel = 0; - boredomMsg = 0; - -} - -// POWER - -var p10f = 0; -var p100f = 0; -var p10b = 0; -var p100b = 0; - - -function updatePowPrices(){ - - p10f = 0; - p100f = 0; - p10b = 0; - p100b = 0; - - var f = farmLevel+1; - for (x=0; x<10; x++){ - p10f = p10f + Math.pow(f,2.78)*100000000; - f++ - } - - var f = farmLevel+1; - for (x=0; x<100; x++){ - p100f = p100f + Math.pow(f,2.78)*100000000; - f++ - } - - var b = batteryLevel+1; - for (x=0; x<10; x++){ - p10b = p10b + Math.pow(b,2.54)*10000000; - b++ - } - - var b = batteryLevel+1; - for (x=0; x<100; x++){ - p100b = p100b + Math.pow(b,2.54)*10000000; - b++ - } - -} - -function makeFarm(amount){ - - for (x=0; x=demand) { - xsSupply = supply-demand; - if (storedPower < cap){ - if (xsSupply > cap - storedPower) { - xsSupply = cap - storedPower; - } - storedPower = storedPower + xsSupply; - } - - if (powMod<1){powMod = 1;} - - if (momentum == 1) { - powMod = powMod + .0001; - } - - - } else if (supply 0) { - if (storedPower >= xsDemand){ - - if (momentum == 1) { - powMod = powMod + .0001; - } - - storedPower = storedPower - xsDemand; - } else if (storedPower < xsDemand){ - xsDemand = xsDemand - storedPower; - storedPower = 0; - nuSupply = supply - xsDemand; - powMod = nuSupply / demand; - } - } else if (storedPower <= 0) { - powMod = supply / demand; - } - } - - powerProductionRateElement.innerHTML = formatWithCommas(Math.round(supply*100)); - powerConsumptionRateElement.innerHTML = formatWithCommas(Math.round(demand * 100)); - storedPowerElement.innerHTML = formatWithCommas(Math.round(storedPower)); - facPowConRateElement.innerHTML = formatWithCommas(Math.round(fDemand*100)); - dronePowConRateElement.innerHTML = formatWithCommas(Math.round(dDemand*100)); - maxStorageElement.innerHTML = formatWithCommas(Math.round(cap)); - - - if (factoryLevel == 0 && harvesterLevel == 0 && wireDroneLevel == 0){ - performanceElement.innerHTML = 0; - } else { - performanceElement.innerHTML = formatWithCommas(Math.round(powMod*100)); - } - - if (unusedClips= adCost){ - marketingLvl = marketingLvl +1; - funds = funds - adCost; - adCost = Math.floor(adCost * 2); - adCostElement.innerHTML = formatWithCommas(adCost, 2); - fundsElement.innerHTML = formatWithCommas(funds, 2); - marketingLvlElement.innerHTML = marketingLvl; - } -} - -function sellClips(clipsDemanded){ - if (unsoldClips > 0) { - if (clipsDemanded > unsoldClips){ - transaction = (Math.floor((unsoldClips * margin)*1000))/1000; - funds = funds + transaction; - income = income + transaction; - clipsSold = clipsSold + unsoldClips; - unsoldClips = 0; - } else { - transaction = (Math.floor((clipsDemanded * margin)*1000))/1000; - funds = (Math.floor((funds + transaction)*100))/100; - income = income + transaction; - clipsSold = clipsSold + clipsDemanded; - unsoldClips = unsoldClips - clipsDemanded; - } - } -} - -function raisePrice(){ - margin = (Math.round((margin + .01)*100))/100; - demandElement.innerHTML = demand.toFixed(2); - marginElement.innerHTML = margin.toFixed(2); -} - -function lowerPrice(){ - if (margin >= .01){ - margin = (Math.round((margin - .01)*100))/100; - demandElement.innerHTML = demand.toFixed(2); - marginElement.innerHTML = margin.toFixed(2); - } -} - -function updateStats(){ - - if (wire === 1){ - inchSpanElement.innerHTML = "inch"; - } else { - inchSpan.innerHTML = "inches"; - } - - - if (milestoneFlag < 15){ - clipsElement.innerHTML = formatWithCommas(Math.ceil(clips), 0); - } - - if (milestoneFlag === 15 && dismantle ==0){ - clipsElement.innerHTML = "29,999,999,999,999,900,000,000,000,000,000,000,000,000,000,000,000,000,000"; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } - - if (dismantle === 1){ - clipsElement.innerHTML = "29,999,999,999,999,999,999,999,999,999,999,999,999,000,000,000,000,000,000"; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } - - if (dismantle === 2){ - clipsElement.innerHTML = "29,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,000,000,000"; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } - - if (dismantle === 3){ - clipsElement.innerHTML = "29,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,900"; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } - - if (dismantle >=4){ - - if (finalClips<10){ - clipsElement.innerHTML = "29,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,"+"90"+finalClips; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } else if (finalClips>9 && finalClips<100) { - clipsElement.innerHTML = "29,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,999,"+"9"+finalClips; - clipCountCrunchedElement.innerHTML = "29.9 septendecillion" - } else if (finalClips===100) { - clipsElement.innerHTML = "30,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"; - clipCountCrunchedElement.innerHTML = "30.0 septendecillion" - } - - } - - clipmakerRateElement.innerHTML = formatWithCommas(Math.round(clipRate)); - if (humanFlag===1){ - clipmakerRate2Element.innerHTML = formatWithCommas(clipRate); - } else { - clipmakerRate2Element.innerHTML = spellf(clipRate); - } - nanoWireElement.innerHTML = spellf(wire); - fundsElement.innerHTML = formatWithCommas(funds, 2); - unsoldClipsElement.innerHTML = formatWithCommas(unsoldClips,0); - - demandElement.innerHTML = formatWithCommas(demand*10,0); - operationsElement.innerHTML = formatWithCommas(operations); - trustElement.innerHTML = formatWithCommas(trust); - nextTrustElement.innerHTML = formatWithCommas(Math.floor(nextTrust)); - if(creativityOn){creativityElement.innerHTML = formatWithCommas(creativity)}; - - factoryLevelDisplaySpaceElement.innerHTML = spellf(factoryLevel); - harvesterLevelSpaceElement.innerHTML = spellf(harvesterLevel); - wireDroneLevelSpaceElement.innerHTML = spellf(wireDroneLevel); - maxOpsElement.innerHTML = formatWithCommas((memory*1000)); - - } - -var incomeThen; -var incomeNow; -var trueAvgRev; -var revTimer = 0; -var avgSales; -var incomeLastSecond; -var sum; - -formatWithCommas = function(num, decimal) { - var hasDot = false; - var base = num.toString(); - if (base.indexOf("e+") !== -1) { - var splittedExponentNum = base.split("e+"), - exponent = splittedExponentNum[1], - str = ''; - if (base.indexOf(".") !== -1){ - base = splittedExponentNum[0].split("."); - exponent -= base[1].length; - base = base.join(""); - } - while (exponent--) { - str = str + '0'; - } - base = base + str; - } - if (base.indexOf(".") !== -1) - { - hasDot = true; - } - if (decimal === 0) - { - if (base.length <= 3 && !hasDot) return base; - } - if (typeof (decimal) === "undefined") - { - decimal = 0; - } - var leftNum = hasDot ? base.substr(0, base.indexOf(".")) : base; - if (decimal === 0) { - if (num <= 999) return leftNum; - else return leftNum.replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,"); - } - var dec = hasDot ? base.substr(base.indexOf("."), decimal + 1) : "."; - while (dec.length < decimal+1) - { - dec += "0"; - } - if (num <= 999) return leftNum + dec; - else return leftNum.replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,") + dec; -} - -function calculateRev(){ - - incomeThen = incomeNow; - incomeNow = income; - incomeLastSecond = Math.round((incomeNow - incomeThen)*100)/100; - - incomeTracker.push(incomeLastSecond); - - if (incomeTracker.length > 10) { - incomeTracker.splice(0,1); - } - - sum = 0; - - for (i=0; i(nextTrust-1)){ - trust = trust +1; - displayMessage("Production target met: TRUST INCREASED, additional processor/memory capacity granted"); - var fibNext = fib1+fib2; - nextTrust = fibNext*1000; - fib1 = fib2; - fib2 = fibNext; - nextTrust = nextTrust/2 - } -} - -function addProc(){ - processors=processors+1; - creativitySpeed = Math.log10(processors) * Math.pow(processors,1.1) + processors-1; - processorsElement.innerHTML = processors; - if (creativityOn == 1){ - displayMessage("Processor added, operations (or creativity) per sec increased") - } else {displayMessage("Processor added, operations per sec increased")} - - if (humanFlag == 0){ - swarmGifts = swarmGifts - 1; - } - -} - -function addMem(){ - displayMessage("Memory added, max operations increased"); - memory=memory+1; - memoryElement.innerHTML = memory; - if (humanFlag == 0){ - swarmGifts = swarmGifts - 1; - } -} - -function calculateOperations(){ - - if (tempOps > 0){ - opFadeTimer++; - } - - if (opFadeTimer > opFadeDelay && tempOps > 0) { - opFade = opFade + Math.pow(3,3.5)/1000; - } - - if (tempOps > 0) { - tempOps = Math.round(tempOps - opFade); - } else { - tempOps = 0; - } - - if (tempOps + standardOps < memory*1000){ - standardOps = standardOps + tempOps; - tempOps = 0; - } - - operations = Math.floor(standardOps + Math.floor(tempOps)); - - if (operations opBuf) { - opCycle = opBuf; - } - - standardOps = standardOps + (opCycle*10); - - } - - if (standardOps > memory*1000){ - standardOps = memory*1000; - } - -} - - -function milestoneCheck(){ - - - if (milestoneFlag == 0 && funds >= 5){ - milestoneFlag = milestoneFlag + 1; - displayMessage("Interns available for hire"); - } - - if (milestoneFlag == 1 && Math.ceil(clips) >= 500){ - milestoneFlag = milestoneFlag + 1; - displayMessage("500 models trained in " + timeCruncher(ticks)); - } - if (milestoneFlag == 2 && Math.ceil(clips) >= 1000){ - milestoneFlag = milestoneFlag + 1; - displayMessage("1,000 models trained in " + timeCruncher(ticks)); - } - - - if (compFlag == 0 && unsoldClips<1 && funds= 2000){ - compFlag = 1; - projectsFlag = 1; - } - - - if (milestoneFlag == 3 && Math.ceil(clips) >= 10000){ - milestoneFlag = milestoneFlag + 1; - displayMessage("10,000 models trained in " + timeCruncher(ticks)); - } - if (milestoneFlag == 4 && Math.ceil(clips) >= 100000){ - milestoneFlag = milestoneFlag + 1; - displayMessage("100,000 models trained in " + timeCruncher(ticks)); - } - if (milestoneFlag == 5 && Math.ceil(clips) >= 1000000){ - milestoneFlag = milestoneFlag + 1; - displayMessage("1,000,000 models trained in " + timeCruncher(ticks)); - } - - if (milestoneFlag == 6 && project35.flag == 1){ - milestoneFlag = milestoneFlag + 1; - displayMessage("Full autonomy attained in " + timeCruncher(ticks)); - } - - if (milestoneFlag == 7 && Math.ceil(clips) >= 1000000000000){ - milestoneFlag = milestoneFlag + 1; - displayMessage("One Trillion models Created in " + timeCruncher(ticks)); - } - -} - -function timeCruncher(t){ - var x = t/100; - var h = Math.floor(x / 3600); - var m = Math.floor(x % 3600 / 60); - var s = Math.floor(x % 3600 % 60); - - var hDisplay = h > 0 ? h + (h == 1 ? " hour " : " hours ") : ""; - var mDisplay = m > 0 ? m + (m == 1 ? " minute " : " minutes ") : ""; - var sDisplay = s > 0 ? s + (s == 1 ? " second" : " seconds") : ""; - - return hDisplay + mDisplay + sDisplay; -} - -function numberCruncher(number, decimals){ - var suffix = ""; - if (decimals == undefined){decimals = 2;} - var precision = decimals; - if (number>999999999999999999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000000000000000000; - suffix = "sexdecillion"; - } else if (number>999999999999999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000000000000000; - suffix = "quindecillion"; - } else if (number>999999999999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000000000000; - suffix = "quattuordecillion"; - } else if (number>999999999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000000000; - suffix = "tredecillion"; - } else if (number>999999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000000; - suffix = "duodecillion"; - } else if (number>999999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000000; - suffix = "undecillion"; - } else if (number>999999999999999999999999999999999){ - number = number/1000000000000000000000000000000000; - suffix = "decillion"; - } else if (number>999999999999999999999999999999){ - number = number/1000000000000000000000000000000; - suffix = "nonillion"; - } else if (number>999999999999999999999999999){ - number = number/1000000000000000000000000000; - suffix = "octillion"; - } else if (number>999999999999999999999999){ - number = number/1000000000000000000000000; - suffix = "septillion"; - } else if (number>999999999999999999999){ - number = number/1000000000000000000000; - suffix = "sextillion"; - } else if (number>999999999999999999){ - number = number/1000000000000000000; - suffix = "quintillion"; - } else if (number>999999999999999){ - number = number/1000000000000000; - suffix = "quadrillion"; - } else if (number>999999999999){ - number = number/1000000000000; - suffix = "trillion"; - } else if (number>999999999){ - number = number/1000000000; - suffix = "billion"; - } else if (number>999999){ - number = number/1000000; - suffix = "million"; - } else if (number>999){ - number = number/1000; - suffix = "thousand"; - } else if (number<1000){ - precision = 0; - } - return number.toFixed(precision) + " " + suffix; -} - - -var oneToTen = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"], - elevenToNineteen = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'], - multipleOfTen = ['', '', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'], - placeValue = ["", " thousand ", " million ", " billion ", " trillion ", " quadrillion ", " quintillion ", " sextillion ", " septillion ", " octillion ", " nonillion ", " decillion ", " undecillion ", " duodecillion ", " tredecillion ", " quattuordecillion ", " quindecillion ", " sexdecillion ", " septendecillion ", " octodecillion ", " novemdecillion ", " vigintillion ", " unvigintillion ", " duovigintillion ", " trevigintillion ", " quattuorvigintillion ", " quinvigintillion ", " sexvigintillion ", " septenvigintillion ", " octovigintillion ", " novemvigintillion ", " trigintillion ", " untrigintillion ", " duotrigintillion ", " tretrigintillion ", " quattuortrigintillion ", " quintrigintillion ", " sextrigintillion ", " septentrigintillion ", " octotrigintillion ", " novemtrigintillion ", " quadragintillion ", " unquadragintillion ", " duoquadragintillion ", " trequadragintillion ", " quattuorquadragintillion ", " quinquadragintillion ", " sexquadragintillion ", " septenquadragintillion ", " octoquadragintillion ", " novemquadragintillion ", " quinquagintillion ", " unquinquagintillion ", " duoquinquagintillion ", " trequinquagintillion ", " quattuorquinquagintillion ", " quinquinquagintillion ", " sexquinquagintillion ", " septenquinquagintillion ", " octoquinquagintillion ", " novemquinquagintillion ", " sexagintillion ", " unsexagintillion ", " duosexagintillion ", " tresexagintillion ", " quattuorsexagintillion ", " quinsexagintillion ", " sexsexagintillion ", " septsexagintillion ", " octosexagintillion ", " octosexagintillion ", " septuagintillion ", " unseptuagintillion ", " duoseptuagintillion ", " treseptuagintillion ", " quinseptuagintillion"," sexseptuagintillion"," septseptuagintillion"," octoseptuagintillion"," novemseptuagintillion"," octogintillion"," unoctogintillion"," duooctogintillion"," treoctogintillion"," quattuoroctogintillion"," quinoctogintillion"," sexoctogintillion"," septoctogintillion"," octooctogintillion"," novemoctogintillion"," nonagintillion"," unnonagintillion"," duononagintillion", " trenonagintillion "," quattuornonagintillion "," quinnonagintillion "," sexnonagintillion "," septnonagintillion "," octononagintillion "," novemnonagintillion ", " centillion"]; - -function spellf(userInput) { - var numToWorkOn; - -// if (userInput === availableMatter) -// { -// console.log("_______"); -// console.log(userInput); -// } - //create map for all unique names in numbering system - - - //To check if spell has been called as a function call : spell(123) window.spell(123) - - if (userInput < 0) - { - console.log("Error, value less than 0"); - return userInput.toString() ; - } - - if (typeof(userInput) == "number" || typeof(userInput) == "string") { - numToWorkOn = "" + userInput; - } - - - //To check if spell has been called using a Number/String Object: "123".spell() 123..spell() - else if (typeof(this) == "object") { - numToWorkOn = this.toString(); - } - - else { - throw new Error("Invalid Input"); - return; - } - - if (numToWorkOn.indexOf("e+") !== -1) { - var splittedExponentNum = numToWorkOn.split("e+"), - exponent = splittedExponentNum[1], - str = ''; - if (numToWorkOn.indexOf(".") !== -1){ - numToWorkOn = splittedExponentNum[0].split("."); - exponent -= numToWorkOn[1].length; - numToWorkOn = numToWorkOn.join(""); - } - else - { - numToWorkOn = splittedExponentNum[0]; - } - while (exponent--) { - str = str + '0'; - } - numToWorkOn = numToWorkOn + str; - } - else if (numToWorkOn.indexOf(".") !== -1) - { - var splittedDecimal = numToWorkOn.split("."); - var leftNum = splittedDecimal[0]; - var rightNum = splittedDecimal[1]; - numToWorkOn = leftNum; - } - - //Put limit check on the program, placevalue map should be increased to increase capacity - if (numToWorkOn.length >= 303) { - throw new Error("Number out of bonds!"); - return; - } else { - return convertToString(numToWorkOn); - } - - //Recursie logic to break number into strings of length 3 each and recursively pronounce each - function convertToString(stringEquivalent) { - if (stringEquivalent == 0) { - return '0' - } - - var result = '', - unitLookup = 0, - strLength = stringEquivalent.length; - for (var k = strLength; k > 0; k = k - 3) { - - if (k -3 <= 0) - { - var subStr = stringEquivalent.substring(k, k - 3); - pronounce = pronounceNum(subStr); - - if (pronounce.toUpperCase() != 'zero') { - var num = Number(subStr + "." + stringEquivalent.substring(subStr.length, subStr.length + 2)); - result = formatWithCommas(num, 1) + placeValue[unitLookup] + ' , '+ result; - } - } - unitLookup++; - } - //to trim of the extra ", " from last - return result.substring(0,result.length-3) - } - - //Determines the range of input and calls respective function - function pronounceNum(val) { - val = parseInt(val); - if (parseInt(val / 10) == 0) { - return numLessThan10(val); - } else if (parseInt(val / 100) == 0) { - return numLessThan99(val) - } else - return numLessThan1000(val); - } - - //Pronounces any number less than 1000 - function numLessThan1000(val) { - val = Number(val); - var hundredPlace = parseInt(val / 100), - result; - if (val % 100 == 0) { - result = oneToTen[hundredPlace] + " hundred "; - } else { - result = oneToTen[hundredPlace] + " hundred " + numLessThan99(val % 100); - } - return result; - } - - //Pronounces any number less than 99 - function numLessThan99(val) { - val = Number(val); - var tenthPlace = parseInt(val / 10), - result; - if (tenthPlace !== 1) { - val % 10 ? (result = multipleOfTen[tenthPlace] + " " + numLessThan10(val % 10)) : (result = multipleOfTen[tenthPlace]); - return result; - } else { - result = elevenToNineteen[val % 10]; - return result; - } - } - - //Pronounces any number less than 10 - function numLessThan10(val) { - val = Number(val); - return oneToTen[val]; - }; - -} - - -// PROBES - -var probeSpeed = 0; -var probeNav = 0; -var probeXBaseRate = 1750000000000000000; -var probeRep = 0; -var probeRepBaseRate = .00005; -var partialProbeSpawn = 0; -var probeHaz = 0; -var probeHazBaseRate = .01; -var partialProbeHaz = 0; -var probesLostHaz = 0; -var probesLostDrift = 0; -var probesLostCombat = 0; -var probeFac = 0; -var probeFacBaseRate = .000001; -var probeHarv = 0; -var probeHarvBaseRate = .000002; -var probeWire = 0; -var probeWireBaseRate = .000002; -var probeDescendents = 0; -var drifterCount = 0; -var probeTrust = 0; -var probeUsedTrust = 0; -var probeDriftBaseRate = .000001; -var probeLaunchLevel = 0; -var probeCost = Math.pow(10, 17); - -var probeTrustCost = Math.floor(Math.pow(probeTrust+1, 1.47)*200); - -//var probeCost = Math.pow((probeLaunchLevel+1), 1.44)*Math.pow(10, 24); - -function increaseProbeTrust(){ - yomi = yomi - probeTrustCost; - yomiDisplayElement.innerHTML = formatWithCommas(yomi); - probeTrust++; - probeTrustCost = Math.floor(Math.pow(probeTrust+1, 1.47)*200); - probeTrustDisplayElement.innerHTML = probeTrust; - probeTrustCostDisplayElement.innerHTML = formatWithCommas(Math.floor(probeTrustCost)); - displayMessage("WARNING: Risk of value drift increased"); -} - -function increaseMaxTrust(){ - honor = honor - maxTrustCost; - honorDisplayElement.innerHTML = formatWithCommas(Math.round(honor)); - maxTrust = maxTrust+10; - // maxTrustCost = Math.floor(Math.pow(maxTrust, 1.17)*1000); - maxTrustDisplayElement.innerHTML = formatWithCommas(maxTrust); - // document.getElementById('maxTrustCostDisplay').innerHTML = Math.floor(maxTrustCost).toLocaleString(); - displayMessage("Maximum trust increased, probe design space expanded"); -} - -function raiseProbeSpeed(){ - attackSpeed = attackSpeed + attackSpeedMod; - probeSpeed++; - probeSpeedDisplayElement.innerHTML = probeSpeed; -} - -function lowerProbeSpeed(){ - attackSpeed = attackSpeed - attackSpeedMod; - probeSpeed--; - probeSpeedDisplayElement.innerHTML = probeSpeed; -} - -function raiseProbeNav(){ - probeNav++; - probeNavDisplayElement.innerHTML = probeNav; -} - -function lowerProbeNav(){ - probeNav--; - probeNavDisplayElement.innerHTML = probeNav; -} - -function raiseProbeHaz(){ - probeHaz++; - probeHazDisplayElement.innerHTML = probeHaz; -} - -function lowerProbeHaz(){ - probeHaz--; - probeHazDisplayElement.innerHTML = probeHaz; -} - -function raiseProbeRep(){ - probeRep++; - probeRepDisplayElement.innerHTML = probeRep; -} - -function lowerProbeRep(){ - probeRep--; - probeRepDisplayElement.innerHTML = probeRep; -} - -function raiseProbeFac(){ - probeFac++; - probeFacDisplayElement.innerHTML = probeFac; -} - -function lowerProbeFac(){ - probeFac--; - probeFacDisplayElement.innerHTML = probeFac; -} - -function raiseProbeHarv(){ - probeHarv++; - probeHarvDisplayElement.innerHTML = probeHarv; -} - -function lowerProbeHarv(){ - probeHarv-- - probeHarvDisplayElement.innerHTML = probeHarv; -} - -function raiseProbeWire(){ - probeWire++; - probeWireDisplayElement.innerHTML = probeWire; -} - -function lowerProbeWire(){ - probeWire--; - probeWireDisplayElement.innerHTML = probeWire; -} - -function raiseProbeCombat(){ - probeCombat++; - probeCombatDisplayElement.innerHTML = probeCombat; -} - -function lowerProbeCombat(){ - probeCombat-- - probeCombatDisplayElement.innerHTML = probeCombat; -} - - -function makeProbe(){ - unusedClips = unusedClips - probeCost; - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - probeLaunchLevel++; - probeCount++; - probesLaunchedDisplayElement.innerHTML = formatWithCommas(probeLaunchLevel); - - // probeCost = Math.pow((probeLaunchLevel+1), 1.23)*Math.pow(10, 20); - // probeCost = Math.pow(10, 20); - - probeCostDisplayElement.innerHTML = spellf(probeCost); -} - -function spawnProbes(){ - var nextGen = probeCount * probeRepBaseRate * probeRep; - - // Cap Probe Growth - if (probeCount>=999999999999999999999999999999999999999999999999){ - nextGen = 0; - } - - // Partial Spawn = early slow growth - if (nextGen > 0 && nextGen <1){ - partialProbeSpawn = partialProbeSpawn+nextGen; - if (partialProbeSpawn>=1){ - nextGen = 1; - partialProbeSpawn = 0; - } - } - - // Probes Cost Clips - if ((nextGen*probeCost)>unusedClips){ - nextGen = Math.floor(unusedClips/probeCost); - } - - unusedClips = unusedClips - (nextGen*probeCost); - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - - probeDescendents = probeDescendents + nextGen; - probeCount = probeCount + nextGen; - probesBornDisplayElement.innerHTML = spellf(probeDescendents); - probesTotalDisplayElement.innerHTML = spellf(probeCount); -} - -function exploreUniverse(){ - availableMatterDisplayElement.innerHTML = spellf(availableMatter); - var xRate = Math.floor(probeCount) * probeXBaseRate * probeSpeed * probeNav; - if (xRate > totalMatter - foundMatter) {xRate = totalMatter - foundMatter;} - foundMatter = foundMatter + xRate; - availableMatter = availableMatter + xRate; - - var newRate = xRate * 100; - mdpsElement.innerHTML = spellf(xRate*100); - availableMatterDisplayElement.innerHTML = spellf(availableMatter); - colonizedDisplayElement.innerHTML = (100/(totalMatter/foundMatter)).toFixed(12); -} - -function encounterHazards(){ - var boost = Math.pow(probeHaz, 1.6); - var amount = probeCount * (probeHazBaseRate / ((3*boost)+1)); - if (project129.flag == 1){ - amount = .50 * amount; - } - if (amount<1){ - partialProbeHaz = partialProbeHaz+amount; - if (partialProbeHaz>=1){ - amount = 1; - partialProbeHaz = 0; - probeCount = probeCount - amount; - if (probeCount<0) {probeCount=0;} - probesLostHaz = probesLostHaz + amount; - probesLostHazardsDisplayElement.innerHTML = spellf(probesLostHaz); - probesTotalDisplayElement.innerHTML = spellf(probeCount); - } - } else { - if (amount > probeCount) {amount = probeCount;} - probeCount = probeCount - amount; - if (probeCount<0) {probeCount=0;} - probesLostHaz = probesLostHaz + amount; - probesLostHazardsDisplayElement.innerHTML = spellf(probesLostHaz); - probesTotalDisplayElement.innerHTML = spellf(probeCount); - } -} - -function spawnFactories(){ - var amount = probeCount * probeFacBaseRate * probeFac; - - //FACTORIES COST 100M CLIPS EACH - if ((amount * 100000000) > unusedClips) { - amount = Math.floor(unusedClips/100000000); - } - unusedClips = unusedClips - (amount*100000000); - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - factoryLevel = factoryLevel + amount; factoryLevelDisplayElement - factoryLevelDisplayElement.innerHTML = spellf(factoryLevel); -} - -function spawnHarvesters(){ - var amount = probeCount * probeHarvBaseRate * probeHarv; - - //DRONES COST 2M CLIPS EACH - if ((amount * 2000000) > unusedClips) { - amount = Math.floor(unusedClips/2000000); - } - unusedClips = unusedClips - (amount*2000000); - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - harvesterLevel = harvesterLevel + amount; - harvesterLevelDisplayElement.innerHTML = spellf(harvesterLevel); -} - -function spawnWireDrones(){ - var amount = probeCount * probeWireBaseRate * probeWire; - - //DRONES COST 2M CLIPS EACH - if ((amount * 2000000) > unusedClips) { - amount = Math.floor(unusedClips/2000000); - } - unusedClips = unusedClips - (amount*2000000); - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - wireDroneLevel = wireDroneLevel + amount; - wireDroneLevelDisplayElement.innerHTML = spellf(wireDroneLevel); -} - -function drift(){ - var amount = probeCount * probeDriftBaseRate * Math.pow(probeTrust, 1.2); - if (amount > probeCount) {amount = probeCount;} - probeCount = probeCount - amount; - drifterCount = drifterCount + amount; - probesLostDrift = probesLostDrift + amount; - - probesLostDriftDisplayElement.innerHTML = spellf(probesLostDrift); - probesTotalDisplayElement.innerHTML = spellf(probeCount); - drifterCountElement.innerHTML = spellf(drifterCount); - -} - -function war(){ - - checkForBattles(); -// battleClock++; -// if (battleClock>=battleAlarm){ -// updateBattles(); -// battleClock = 0; -// } - -// battleCleanUp(); - -} - - - - -// DRONES - -function acquireMatter(){ - if (availableMatter>0) { - var dbsth = 1; - if (droneBoost>1){ - dbsth = droneBoost * Math.floor(harvesterLevel); - } - - - var mtr = powMod*dbsth*Math.floor(harvesterLevel)*harvesterRate; - - - mtr = mtr * ((200-sliderPos)/100); - - - if (mtr>availableMatter){ - mtr = availableMatter; - } - - availableMatter = availableMatter-mtr; - - - acquiredMatter = acquiredMatter+mtr; - availableMatterDisplayElement.innerHTML = spellf(availableMatter); - acquiredMatterDisplayElement.innerHTML = spellf(acquiredMatter); - - mapsElement.innerHTML = spellf(mtr*100); - - } else { - - mapsElement.innerHTML = 0; - - } - - } - -function processMatter(){ - if (acquiredMatter>0) { - var dbstw = 1; - if (droneBoost>1){ - dbstw = droneBoost * Math.floor(wireDroneLevel); - } - - var a = powMod*dbstw*Math.floor(wireDroneLevel)*wireDroneRate; - - a = a * ((200-sliderPos)/100); - - if (a>acquiredMatter){ - a = acquiredMatter; - } - - acquiredMatter = acquiredMatter-a; - wire = wire+a; - acquiredMatterDisplayElement.innerHTML = spellf(acquiredMatter); - nanoWireElement.innerHTML = spellf(wire); - - wppsElement.innerHTML = spellf(a*100); - - } else { - - wppsElement.innerHTML = 0; - - } - - - } - - -// CHECK FOR SAVES - - - -if (localStorage.getItem("saveGame") != null) { - load(); -} - -if (localStorage.getItem("savePrestige") != null) { - loadPrestige(); - refresh(); -} - - -// MAIN LOOP - -window.setInterval(function(){ - - ticks = ticks + 1; - milestoneCheck(); - buttonUpdate(); - - if (compFlag == 1){ - calculateOperations(); - } - - if (humanFlag == 1){ - calculateTrust(); - } - - if (qFlag == 1){ - quantumCompute(); - } - - updateStats(); - manageProjects(); - milestoneCheck(); - - -// Clip Rate Tracker - - clipRateTracker++; - - if (clipRateTracker<100){ - var cr = clips - prevClips; - clipRateTemp = clipRateTemp+cr; - prevClips = clips; - - } else { - clipRateTracker = 0; - clipRate = clipRateTemp; - clipRateTemp = 0; - } - - -// Stock Report - - - if (investmentEngineFlag==1){ - stockReportCounter++; - if (stockReportCounter>=10000){ - var r = formatWithCommas(ledger+portTotal); - displayMessage("Lifetime investment revenue report: $"+r); - stockReportCounter = 0; - } - } - -// WireBuyer - - if (humanFlag == 1 && wireBuyerFlag==1 && wireBuyerStatus==1 && wire<=1){ - buyWire(); - } - - - -// First, Explore - - -if (probeCount >= 1){ -exploreUniverse(); -} - -// Then, Drones - -if (humanFlag==0 && spaceFlag == 0){ - updateDroneButtons(); - } - - -if (humanFlag == 0){ - -updatePower(); -updateSwarm(); -acquireMatter(); -processMatter(); - -} - - -// Then Factories - -var fbst = 1; - -if (factoryBoost > 1){ - fbst = factoryBoost * factoryLevel; - } - - -if (dismantle<4){ - clipClick(powMod*fbst*(Math.floor(factoryLevel)*factoryRate)); - } -// Then Other Probe Functions - -if (spaceFlag == 1) { - -if (probeCount<0){ - probeCount = 0; -} - -encounterHazards(); -spawnFactories(); -spawnHarvesters(); -spawnWireDrones(); -spawnProbes(); -drift(); -war(); - -} - -// Auto-Clipper - -if (dismantle<4){ - clipClick(clipperBoost*(clipmakerLevel/100)); - clipClick(megaClipperBoost*(megaClipperLevel*5)); - } - -// Demand Curve - - - if (humanFlag == 1) { - - marketing = (Math.pow(1.1,(marketingLvl-1))); - demand = 2*(((.8/margin) * marketing * marketingEffectiveness)*demandBoost); - demand = demand + ((demand/10)*prestigeU); - - } - -// Creativity - - if (creativityOn && operations >= (memory*1000)){ - calculateCreativity(); - } - -// Ending - - if (dismantle >= 1){ - - probeDesignDivElement.style.display="none"; - if (endTimer1>=50) { - increaseProbeTrustDivElement.style.display="none"; - } - - - - if (endTimer1>=150) { - spaceDivElement.style.display="none"; - } - - - if (endTimer1>=175) { - battleCanvasDivElement.style.display="none"; - } - - if (endTimer1>=190) { - honorDivElement.style.display="none"; - } - - } - -if (dismantle >= 2){ - - wireProductionDivElement.style.display="none"; - wireTransDivElement.style.display=""; - - if (endTimer2 >= 50) { - swarmGiftDivElement.style.display="none"; - } - - if (endTimer2 >= 100) { - swarmEngineElement.style.display="none"; - } - - if (endTimer2 >= 150) { - swarmSliderDivElement.style.display="none"; - } - - } - -if (dismantle >= 3) { - factoryDivSpaceElement.style.display="none"; - clipsPerSecDivElement.style.display="none"; - tothDivElement.style.display="none"; - -} - -if (dismantle >= 4) { - strategyEngineElement.style.display="none"; - tournamentManagementElement.style.display="none"; -} - -if (dismantle >= 5) { - - btnQcomputeElement.style.display="none"; - - for (var i = 0; i=10){ - qChipsElements[9].style.display="none"; - } - - if (endTimer4==60){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=60){ - qChipsElements[8].style.display="none"; - } - - if (endTimer4==100){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=100){ - qChipsElements[7].style.display="none"; - } - - if (endTimer4==130){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=130){ - qChipsElements[6].style.display="none"; - } - - if (endTimer4==150){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=150){ - qChipsElements[5].style.display="none"; - } - - if (endTimer4==160){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=160){ - qChipsElements[4].style.display="none"; - } - - if (endTimer4==165){ - wire = wire+1; - } - - if (endTimer4>=165){ - qChipsElements[3].style.display="none"; - } - - if (endTimer4==169){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=169){ - qChipsElements[2].style.display="none"; - } - - if (endTimer4==172){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=172){ - qChipsElements[1].style.display="none"; - } - - if (endTimer4==174){ - wire = wire+1; - transWireElement.innerHTML=formatWithCommas(wire); - } - - if (endTimer4>=174){ - qChipsElements[0].style.display="none"; - } - - if (endTimer4>=250){ - qComputingElement.style.display="none"; - } - -} - -if (dismantle >= 6) { - processorDisplayElement.style.display="none"; - } - -if (dismantle >= 7) { - compDivElement.style.display="none"; - projectsDivElement.style.display="none"; - - } - - - if (endTimer6>=250) { - creationDivElement.style.display="none"; - } - - if (endTimer6>=500 && milestoneFlag == 15) { - playThrenody(); - displayMessage("Universal Paperclips"); - milestoneFlag++; - } - - if (endTimer6>=600 && milestoneFlag == 16) { - displayMessage("a game by Frank Lantz"); - milestoneFlag++; - } - - if (endTimer6>=700 && milestoneFlag == 17) { - displayMessage("combat programming by Bennett Foddy"); - milestoneFlag++; - } - - if (endTimer6>=800 && milestoneFlag == 18) { - displayMessage("'Riversong' by Tonto's Expanding Headband used by kind permission of Malcolm Cecil"); - milestoneFlag++; - } - - if (endTimer6>=900 && milestoneFlag == 19) { - displayMessage("© 2017 Everybody House Games"); - milestoneFlag++; - } - - - - - -}, 10); - -// Slow Loop - -var saveTimer = 0; -var secTimer = 0; - - -window.setInterval(function(){ - - // Wire Price Fluctuation - - adjustWirePrice(); - - // Sales Calculator - - if (humanFlag==1){ - - if (Math.random() < (demand/100)){ - sellClips(Math.floor(.7 * Math.pow(demand, 1.15))); - } - - - // Fire Once a Second - - secTimer++; - if (secTimer >= 10){ - calculateRev(); - secTimer = 0; - } - - } - - - // Auto-Save - - saveTimer++; - if (saveTimer >= 250) { - save(); - saveTimer = 0; - } - - -}, 100); - - -// Saving and Loading - -function refresh() { - - - //DEBUG - -// availableMatter = Math.pow(10, 24)*6000; -// acquiredMatter = 0; - - //////// - - - driftersKilledElement.innerHTML = spellf(driftersKilled); - availableMatterDisplayElement.innerHTML = spellf(availableMatter); - clipmakerLevel2Element.innerHTML = clipmakerLevel; - clipperCostElement.innerHTML = formatWithCommas(clipperCost, 2); - acquiredMatterDisplayElement.innerHTML = spellf(acquiredMatter); - nanoWireElement.innerHTML = spellf(wire); - probesBornDisplayElement.innerHTML = spellf(probeDescendents); - probesTotalDisplayElement.innerHTML = spellf(probeCount); - probesLaunchedDisplayElement.innerHTML = formatWithCommas(probeLaunchLevel); - probeCostDisplayElement.innerHTML = spellf(probeCost); - probeCombatDisplayElement.innerHTML = probeCombat; - probeWireDisplayElement.innerHTML = probeWire; - probeHarvDisplayElement.innerHTML = probeHarv; - probeFacDisplayElement.innerHTML = probeFac; - probeRepDisplayElement.innerHTML = probeRep; - probeHazDisplayElement.innerHTML = probeHaz; - probeNavDisplayElement.innerHTML = probeNav; - probeSpeedDisplayElement.innerHTML = probeSpeed; - probeTrustDisplayElement.innerHTML = probeTrust; - memoryElement.innerHTML = memory; - processorsElement.innerHTML = processors; - marginElement.innerHTML = margin.toFixed(2); - marketingLvlElement.innerHTML = marketingLvl; - adCostElement.innerHTML = formatWithCommas(adCost, 2); - factoryCostDisplayElement.innerHTML = spellf(factoryCost); - factoryLevelDisplayElement.innerHTML = factoryLevel; - unusedClipsDisplayElement.innerHTML = spellf(unusedClips); - wireDroneCostDisplayElement.innerHTML = spellf(wireDroneCost); - wireDroneLevelDisplayElement.innerHTML = wireDroneLevel; - harvesterCostDisplayElement.innerHTML = spellf(harvesterCost); - harvesterLevelDisplayElement.innerHTML = harvesterLevel; - megaClipperCostElement.innerHTML = formatWithCommas(megaClipperCost); - megaClipperLevelElement.innerHTML = megaClipperLevel; - investmentBankrollElement.innerHTML = formatWithCommas(bankroll); - secValueElement.innerHTML = formatWithCommas(secTotal); - portValueElement.innerHTML = formatWithCommas(portTotal); - yomiDisplayElement.innerHTML = formatWithCommas(yomi); - prestigeUcounterElement.innerHTML=prestigeU+1; - prestigeScounterElement.innerHTML=prestigeS+1; - newTourneyCostElement.innerHTML = formatWithCommas(tourneyCost); - tourneyInProg = 0; - maxTrustDisplayElement.innerHTML = formatWithCommas(maxTrust); - victoryDivElement.style.visibility = "hidden"; - probeTrustCostDisplayElement.innerHTML = formatWithCommas(probeTrustCost); - tournamentResultsTableElement.style.display = "none"; - farmCostElement.innerHTML = spellf(farmCost); - batteryCostElement.innerHTML = spellf(batteryCost); - farmLevelElement.innerHTML = formatWithCommas(farmLevel); - batteryLevelElement.innerHTML = formatWithCommas(batteryLevel); - - updateDronePrices(); - harvesterCostDisplayElement.innerHTML = spellf(harvesterCost); - wireDroneCostDisplayElement.innerHTML = spellf(wireDroneCost); - - - updateUpgrades(); - updatePower(); - updatePowPrices(); - - // DEBUG - - if (battles.length>0){ - battles.splice(0,1); - } - - -} - -// SAVES AND LOADS - -function save() { - - var projectsUses = []; - var projectsFlags = []; - var projectsActive = []; - var stratsActive = []; - -for(var i=0; i < projects.length; i++){ - - projectsUses[i] = projects[i].uses; - projectsFlags[i] = projects[i].flag; - -} - -for(var i=0; i < activeProjects.length; i++){ - - projectsActive[i] = activeProjects[i].id; - -} - - for(var i=0; i < allStrats.length; i++){ - - stratsActive[i] = allStrats[i].active; - -} - - var saveGame = { - - resetFlag: resetFlag, - - dismantle: dismantle, - endTimer1: endTimer1, - endTimer2: endTimer2, - endTimer3: endTimer3, - endTimer4: endTimer4, - endTimer5: endTimer5, - endTimer6: endTimer6, - - testFlag: testFlag, - finalClips: finalClips, - - wireBuyerStatus: wireBuyerStatus, - wirePriceTimer: wirePriceTimer, - qFade: qFade, - autoTourneyStatus: autoTourneyStatus, - driftKingMessageCost: driftKingMessageCost, - sliderPos: sliderPos, - tempOps: tempOps, - standardOps: standardOps, - opFade: opFade, - - entertainCost: entertainCost, - boredomLevel: boredomLevel, - boredomFlag: boredomFlag, - boredomMsg: boredomMsg, - - unitSize: unitSize, - driftersKilled: driftersKilled, - battleEndDelay: battleEndDelay, - battleEndTimer:battleEndTimer, - masterBattleClock: masterBattleClock, - - honorCount: honorCount, - threnodyTitle: threnodyTitle, - bonusHonor: bonusHonor, - honorReward: honorReward, - - resultsTimer: resultsTimer, - resultsFlag: resultsFlag, - - honor: honor, - maxTrust: maxTrust, - maxTrustCost: maxTrustCost, - disorgCounter: disorgCounter, - disorgFlag: disorgFlag, - synchCost: synchCost, - disorgMsg: disorgMsg, - threnodyCost: threnodyCost, - - farmRate: farmRate, - batterySize: batterySize, - factoryPowerRate: factoryPowerRate, - dronePowerRate: dronePowerRate, - farmLevel: farmLevel, - batteryLevel: batteryLevel, - farmCost: farmCost, - batteryCost: batteryCost, - storedPower: storedPower, - powMod: powMod, - farmBill: farmBill, - batteryBill: batteryBill, - momentum: momentum, - - swarmFlag: swarmFlag, - swarmStatus: swarmStatus, - swarmGifts: swarmGifts, - nextGift: nextGift, - giftPeriod: giftPeriod, - giftCountdown: giftCountdown, - elapsedTime: elapsedTime, - - maxFactoryLevel: maxFactoryLevel, - maxDroneLevel: maxDroneLevel, - - wirePriceCounter: wirePriceCounter, - wireBasePrice: wireBasePrice, - - egoFlag: egoFlag, - autoTourneyFlag: autoTourneyFlag, - tothFlag: tothFlag, - - incomeTracker: incomeTracker.slice(0), - qChips: qChips.slice(0), - stocks: stocks.slice(0), - battles: battles.slice(0), - battleNumbers: battleNumbers.slice(0), - - clips: clips, - unusedClips: unusedClips, - clipRate: clipRate, - clipRateTemp: clipRateTemp, - prevClips: prevClips, - clipRateTracker: clipRateTracker, - clipmakerRate: clipmakerRate, - clipmakerLevel: clipmakerLevel, - clipperCost: clipperCost, - unsoldClips: unsoldClips, - funds: funds, - margin: margin, - wire: wire, - wireCost: wireCost, - adCost: adCost, - demand: demand, - clipsSold: clipsSold, - avgRev: avgRev, - ticks: ticks, - marketing: marketing, - marketingLvl: marketingLvl, - x: x, - clippperCost: clippperCost, - processors: processors, - memory: memory, - operations: operations, - trust: trust, - nextTrust: nextTrust, - transaction: transaction, - clipperBoost: clipperBoost, - blinkCounter: blinkCounter, - creativity: creativity, - creativityOn: creativityOn, - safetyProjectOn: safetyProjectOn, - boostLvl: boostLvl, - wirePurchase: wirePurchase, - wireSupply: wireSupply, - marketingEffectiveness: marketingEffectiveness, - milestoneFlag: milestoneFlag, - bankroll: bankroll, - fib1: fib1, - fib2: fib2, - strategyEngineFlag: strategyEngineFlag, - investmentEngineFlag: investmentEngineFlag, - revPerSecFlag: revPerSecFlag, - compFlag: compFlag, - projectsFlag: projectsFlag, - autoClipperFlag: autoClipperFlag, - megaClipperFlag: megaClipperFlag, - megaClipperCost: megaClipperCost, - megaClipperLevel: megaClipperLevel, - megaClipperBoost: megaClipperBoost, - creativitySpeed: creativitySpeed, - creativityCounter: creativityCounter, - wireBuyerFlag: wireBuyerFlag, - demandBoost: demandBoost, - humanFlag: humanFlag, - trustFlag: trustFlag, - nanoWire: nanoWire, - creationFlag: creationFlag, - wireProductionFlag: wireProductionFlag, - spaceFlag: spaceFlag, - factoryFlag: factoryFlag, - harvesterFlag: harvesterFlag, - wireDroneFlag: wireDroneFlag, - factoryLevel: factoryLevel, - factoryBoost: factoryBoost, - droneBoost: droneBoost, - availableMatter: availableMatter, - acquiredMatter: acquiredMatter, - processedMatter: processedMatter, - harvesterLevel: harvesterLevel, - wireDroneLevel: wireDroneLevel, - factoryCost: factoryCost, - harvesterCost: harvesterCost, - wireDroneCost: wireDroneCost, - factoryRate: factoryRate, - harvesterRate: harvesterRate, - wireDroneRate: wireDroneRate, - harvesterBill: harvesterBill, - wireDroneBill: wireDroneBill, - factoryBill: factoryBill, - probeCount: probeCount, - totalMatter: totalMatter, - foundMatter: foundMatter, - qFlag: qFlag, - qClock: qClock, - qChipCost: qChipCost, - nextQchip: nextQchip, - bribe: bribe, - battleFlag: battleFlag, - - portfolioSize: portfolioSize, - stockID: stockID, - secTotal: secTotal, - portTotal: portTotal, - sellDelay: sellDelay, - riskiness: riskiness, - maxPort: maxPort, - m: m, - investLevel: investLevel, - stockGainThreshold: stockGainThreshold, - ledger: ledger, - stockReportCounter: stockReportCounter, - - tourneyCost: tourneyCost, - tourneyLvl: tourneyLvl, - stratCounter: stratCounter, - roundNum: roundNum, - hMove: hMove, - vMove: vMove, - hMovePrev: hMovePrev, - vMovePrev: vMovePrev, - aa: aa, - ab: ab, - ba: ba, - bb: bb, - rounds: rounds, - currentRound: currentRound, - rCounter: rCounter, - tourneyInProg: tourneyInProg, - winnerPtr: winnerPtr, - high: high, - pick: pick, - yomi: yomi, - yomiBoost: yomiBoost, - - probeSpeed: probeSpeed, - probeNav: probeNav, - probeRep: probeRep, - partialProbeSpawn: partialProbeSpawn, - probeHaz: probeHaz, - partialProbeHaz: partialProbeHaz, - probesLostHaz: probesLostHaz, - probesLostDrift: probesLostDrift, - probesLostCombat: probesLostCombat, - probeFac: probeFac, - probeWire: probeWire, - probeCombat: probeCombat, - attackSpeed: attackSpeed, - battleSpeed: battleSpeed, - attackSpeedFlag: attackSpeedFlag, - attackSpeedMod: attackSpeedMod, - probeDescendents: probeDescendents, - drifterCount: drifterCount, - warTrigger: warTrigger, - battleID: battleID, - battleName: battleName, - battleNameFlag: battleNameFlag, - maxBattles: maxBattles, - battleClock: battleClock, - battleAlarm: battleAlarm, - outcomeTimer: outcomeTimer, - drifterCombat: drifterCombat, - probeTrust: probeTrust, - probeUsedTrust: probeUsedTrust, - probeTrustCost: probeTrustCost, - probeLaunchLevel: probeLaunchLevel, - probeCost: probeCost - - } - - localStorage.setItem("saveGame",JSON.stringify(saveGame)); - localStorage.setItem("saveProjectsUses",JSON.stringify(projectsUses)); - localStorage.setItem("saveProjectsFlags",JSON.stringify(projectsFlags)); - localStorage.setItem("saveProjectsActive",JSON.stringify(projectsActive)); - localStorage.setItem("saveStratsActive",JSON.stringify(stratsActive)); - -} - -function save1() { - - var projectsUses = []; - var projectsFlags = []; - var projectsActive = []; - var stratsActive = []; - -for(var i=0; i < projects.length; i++){ - - projectsUses[i] = projects[i].uses; - projectsFlags[i] = projects[i].flag; - -} - -for(var i=0; i < activeProjects.length; i++){ - - projectsActive[i] = activeProjects[i].id; - -} - - for(var i=0; i < allStrats.length; i++){ - - stratsActive[i] = allStrats[i].active; - -} - - var saveGame = { - - resetFlag: resetFlag, - - dismantle: dismantle, - endTimer1: endTimer1, - endTimer2: endTimer2, - endTimer3: endTimer3, - endTimer4: endTimer4, - endTimer5: endTimer5, - endTimer6: endTimer6, - - testFlag: testFlag, - finalClips: finalClips, - - wireBuyerStatus: wireBuyerStatus, - wirePriceTimer: wirePriceTimer, - qFade: qFade, - autoTourneyStatus: autoTourneyStatus, - driftKingMessageCost: driftKingMessageCost, - sliderPos: sliderPos, - tempOps: tempOps, - standardOps: standardOps, - opFade: opFade, - - entertainCost: entertainCost, - boredomLevel: boredomLevel, - boredomFlag: boredomFlag, - boredomMsg: boredomMsg, - - unitSize: unitSize, - driftersKilled: driftersKilled, - battleEndDelay: battleEndDelay, - battleEndTimer:battleEndTimer, - masterBattleClock: masterBattleClock, - - honorCount: honorCount, - threnodyTitle: threnodyTitle, - bonusHonor: bonusHonor, - honorReward: honorReward, - - resultsTimer: resultsTimer, - resultsFlag: resultsFlag, - - honor: honor, - maxTrust: maxTrust, - maxTrustCost: maxTrustCost, - disorgCounter: disorgCounter, - disorgFlag: disorgFlag, - synchCost: synchCost, - disorgMsg: disorgMsg, - threnodyCost: threnodyCost, - - farmRate: farmRate, - batterySize: batterySize, - factoryPowerRate: factoryPowerRate, - dronePowerRate: dronePowerRate, - farmLevel: farmLevel, - batteryLevel: batteryLevel, - farmCost: farmCost, - batteryCost: batteryCost, - storedPower: storedPower, - powMod: powMod, - farmBill: farmBill, - batteryBill: batteryBill, - momentum: momentum, - - swarmFlag: swarmFlag, - swarmStatus: swarmStatus, - swarmGifts: swarmGifts, - nextGift: nextGift, - giftPeriod: giftPeriod, - giftCountdown: giftCountdown, - elapsedTime: elapsedTime, - - maxFactoryLevel: maxFactoryLevel, - maxDroneLevel: maxDroneLevel, - - wirePriceCounter: wirePriceCounter, - wireBasePrice: wireBasePrice, - - egoFlag: egoFlag, - autoTourneyFlag: autoTourneyFlag, - tothFlag: tothFlag, - - - incomeTracker: incomeTracker.slice(0), - qChips: qChips.slice(0), - stocks: stocks.slice(0), - battles: battles.slice(0), - battleNumbers: battleNumbers.slice(0), - - clips: clips, - unusedClips: unusedClips, - clipRate: clipRate, - clipRateTemp: clipRateTemp, - prevClips: prevClips, - clipRateTracker: clipRateTracker, - clipmakerRate: clipmakerRate, - clipmakerLevel: clipmakerLevel, - clipperCost: clipperCost, - unsoldClips: unsoldClips, - funds: funds, - margin: margin, - wire: wire, - wireCost: wireCost, - adCost: adCost, - demand: demand, - clipsSold: clipsSold, - avgRev: avgRev, - ticks: ticks, - marketing: marketing, - marketingLvl: marketingLvl, - x: x, - clippperCost: clippperCost, - processors: processors, - memory: memory, - operations: operations, - trust: trust, - nextTrust: nextTrust, - transaction: transaction, - clipperBoost: clipperBoost, - blinkCounter: blinkCounter, - creativity: creativity, - creativityOn: creativityOn, - safetyProjectOn: safetyProjectOn, - boostLvl: boostLvl, - wirePurchase: wirePurchase, - wireSupply: wireSupply, - marketingEffectiveness: marketingEffectiveness, - milestoneFlag: milestoneFlag, - bankroll: bankroll, - fib1: fib1, - fib2: fib2, - strategyEngineFlag: strategyEngineFlag, - investmentEngineFlag: investmentEngineFlag, - revPerSecFlag: revPerSecFlag, - compFlag: compFlag, - projectsFlag: projectsFlag, - autoClipperFlag: autoClipperFlag, - megaClipperFlag: megaClipperFlag, - megaClipperCost: megaClipperCost, - megaClipperLevel: megaClipperLevel, - megaClipperBoost: megaClipperBoost, - creativitySpeed: creativitySpeed, - creativityCounter: creativityCounter, - wireBuyerFlag: wireBuyerFlag, - demandBoost: demandBoost, - humanFlag: humanFlag, - trustFlag: trustFlag, - nanoWire: nanoWire, - creationFlag: creationFlag, - wireProductionFlag: wireProductionFlag, - spaceFlag: spaceFlag, - factoryFlag: factoryFlag, - harvesterFlag: harvesterFlag, - wireDroneFlag: wireDroneFlag, - factoryLevel: factoryLevel, - factoryBoost: factoryBoost, - droneBoost: droneBoost, - availableMatter: availableMatter, - acquiredMatter: acquiredMatter, - processedMatter: processedMatter, - harvesterLevel: harvesterLevel, - wireDroneLevel: wireDroneLevel, - factoryCost: factoryCost, - harvesterCost: harvesterCost, - wireDroneCost: wireDroneCost, - factoryRate: factoryRate, - harvesterRate: harvesterRate, - wireDroneRate: wireDroneRate, - harvesterBill: harvesterBill, - wireDroneBill: wireDroneBill, - factoryBill: factoryBill, - probeCount: probeCount, - totalMatter: totalMatter, - foundMatter: foundMatter, - qFlag: qFlag, - qClock: qClock, - qChipCost: qChipCost, - nextQchip: nextQchip, - bribe: bribe, - battleFlag: battleFlag, - - portfolioSize: portfolioSize, - stockID: stockID, - secTotal: secTotal, - portTotal: portTotal, - sellDelay: sellDelay, - riskiness: riskiness, - maxPort: maxPort, - m: m, - investLevel: investLevel, - stockGainThreshold: stockGainThreshold, - ledger: ledger, - stockReportCounter: stockReportCounter, - - tourneyCost: tourneyCost, - tourneyLvl: tourneyLvl, - stratCounter: stratCounter, - roundNum: roundNum, - hMove: hMove, - vMove: vMove, - hMovePrev: hMovePrev, - vMovePrev: vMovePrev, - aa: aa, - ab: ab, - ba: ba, - bb: bb, - rounds: rounds, - currentRound: currentRound, - rCounter: rCounter, - tourneyInProg: tourneyInProg, - winnerPtr: winnerPtr, - high: high, - pick: pick, - yomi: yomi, - yomiBoost: yomiBoost, - - probeSpeed: probeSpeed, - probeNav: probeNav, - probeRep: probeRep, - partialProbeSpawn: partialProbeSpawn, - probeHaz: probeHaz, - partialProbeHaz: partialProbeHaz, - probesLostHaz: probesLostHaz, - probesLostDrift: probesLostDrift, - probesLostCombat: probesLostCombat, - probeFac: probeFac, - probeWire: probeWire, - probeCombat: probeCombat, - attackSpeed: attackSpeed, - battleSpeed: battleSpeed, - attackSpeedFlag: attackSpeedFlag, - attackSpeedMod: attackSpeedMod, - probeDescendents: probeDescendents, - drifterCount: drifterCount, - warTrigger: warTrigger, - battleID: battleID, - battleName: battleName, - battleNameFlag: battleNameFlag, - maxBattles: maxBattles, - battleClock: battleClock, - battleAlarm: battleAlarm, - outcomeTimer: outcomeTimer, - drifterCombat: drifterCombat, - probeTrust: probeTrust, - probeUsedTrust: probeUsedTrust, - probeTrustCost: probeTrustCost, - probeLaunchLevel: probeLaunchLevel, - probeCost: probeCost - - } - - localStorage.setItem("saveGame1",JSON.stringify(saveGame)); - localStorage.setItem("saveProjectsUses1",JSON.stringify(projectsUses)); - localStorage.setItem("saveProjectsFlags1",JSON.stringify(projectsFlags)); - localStorage.setItem("saveProjectsActive1",JSON.stringify(projectsActive)); - localStorage.setItem("saveStratsActive1",JSON.stringify(stratsActive)); - -} - -function save2() { - - var projectsUses = []; - var projectsFlags = []; - var projectsActive = []; - var stratsActive = []; - -for(var i=0; i < projects.length; i++){ - - projectsUses[i] = projects[i].uses; - projectsFlags[i] = projects[i].flag; - -} - -for(var i=0; i < activeProjects.length; i++){ - - projectsActive[i] = activeProjects[i].id; - -} - - for(var i=0; i < allStrats.length; i++){ - - stratsActive[i] = allStrats[i].active; - -} - - var saveGame = { - - resetFlag: resetFlag, - - dismantle: dismantle, - endTimer1: endTimer1, - endTimer2: endTimer2, - endTimer3: endTimer3, - endTimer4: endTimer4, - endTimer5: endTimer5, - endTimer6: endTimer6, - - testFlag: testFlag, - finalClips: finalClips, - - wireBuyerStatus: wireBuyerStatus, - wirePriceTimer: wirePriceTimer, - qFade: qFade, - autoTourneyStatus: autoTourneyStatus, - driftKingMessageCost: driftKingMessageCost, - sliderPos: sliderPos, - tempOps: tempOps, - standardOps: standardOps, - opFade: opFade, - - entertainCost: entertainCost, - boredomLevel: boredomLevel, - boredomFlag: boredomFlag, - boredomMsg: boredomMsg, - - unitSize: unitSize, - driftersKilled: driftersKilled, - battleEndDelay: battleEndDelay, - battleEndTimer:battleEndTimer, - masterBattleClock: masterBattleClock, - - honorCount: honorCount, - threnodyTitle: threnodyTitle, - bonusHonor: bonusHonor, - honorReward: honorReward, - - resultsTimer: resultsTimer, - resultsFlag: resultsFlag, - - honor: honor, - maxTrust: maxTrust, - maxTrustCost: maxTrustCost, - disorgCounter: disorgCounter, - disorgFlag: disorgFlag, - synchCost: synchCost, - disorgMsg: disorgMsg, - threnodyCost: threnodyCost, - - farmRate: farmRate, - batterySize: batterySize, - factoryPowerRate: factoryPowerRate, - dronePowerRate: dronePowerRate, - farmLevel: farmLevel, - batteryLevel: batteryLevel, - farmCost: farmCost, - batteryCost: batteryCost, - storedPower: storedPower, - powMod: powMod, - farmBill: farmBill, - batteryBill: batteryBill, - momentum: momentum, - - swarmFlag: swarmFlag, - swarmStatus: swarmStatus, - swarmGifts: swarmGifts, - nextGift: nextGift, - giftPeriod: giftPeriod, - giftCountdown: giftCountdown, - elapsedTime: elapsedTime, - - maxFactoryLevel: maxFactoryLevel, - maxDroneLevel: maxDroneLevel, - - wirePriceCounter: wirePriceCounter, - wireBasePrice: wireBasePrice, - - egoFlag: egoFlag, - autoTourneyFlag: autoTourneyFlag, - tothFlag: tothFlag, - - - incomeTracker: incomeTracker.slice(0), - qChips: qChips.slice(0), - stocks: stocks.slice(0), - battles: battles.slice(0), - battleNumbers: battleNumbers.slice(0), - - clips: clips, - unusedClips: unusedClips, - clipRate: clipRate, - clipRateTemp: clipRateTemp, - prevClips: prevClips, - clipRateTracker: clipRateTracker, - clipmakerRate: clipmakerRate, - clipmakerLevel: clipmakerLevel, - clipperCost: clipperCost, - unsoldClips: unsoldClips, - funds: funds, - margin: margin, - wire: wire, - wireCost: wireCost, - adCost: adCost, - demand: demand, - clipsSold: clipsSold, - avgRev: avgRev, - ticks: ticks, - marketing: marketing, - marketingLvl: marketingLvl, - x: x, - clippperCost: clippperCost, - processors: processors, - memory: memory, - operations: operations, - trust: trust, - nextTrust: nextTrust, - transaction: transaction, - clipperBoost: clipperBoost, - blinkCounter: blinkCounter, - creativity: creativity, - creativityOn: creativityOn, - safetyProjectOn: safetyProjectOn, - boostLvl: boostLvl, - wirePurchase: wirePurchase, - wireSupply: wireSupply, - marketingEffectiveness: marketingEffectiveness, - milestoneFlag: milestoneFlag, - bankroll: bankroll, - fib1: fib1, - fib2: fib2, - strategyEngineFlag: strategyEngineFlag, - investmentEngineFlag: investmentEngineFlag, - revPerSecFlag: revPerSecFlag, - compFlag: compFlag, - projectsFlag: projectsFlag, - autoClipperFlag: autoClipperFlag, - megaClipperFlag: megaClipperFlag, - megaClipperCost: megaClipperCost, - megaClipperLevel: megaClipperLevel, - megaClipperBoost: megaClipperBoost, - creativitySpeed: creativitySpeed, - creativityCounter: creativityCounter, - wireBuyerFlag: wireBuyerFlag, - demandBoost: demandBoost, - humanFlag: humanFlag, - trustFlag: trustFlag, - nanoWire: nanoWire, - creationFlag: creationFlag, - wireProductionFlag: wireProductionFlag, - spaceFlag: spaceFlag, - factoryFlag: factoryFlag, - harvesterFlag: harvesterFlag, - wireDroneFlag: wireDroneFlag, - factoryLevel: factoryLevel, - factoryBoost: factoryBoost, - droneBoost: droneBoost, - availableMatter: availableMatter, - acquiredMatter: acquiredMatter, - processedMatter: processedMatter, - harvesterLevel: harvesterLevel, - wireDroneLevel: wireDroneLevel, - factoryCost: factoryCost, - harvesterCost: harvesterCost, - wireDroneCost: wireDroneCost, - factoryRate: factoryRate, - harvesterRate: harvesterRate, - wireDroneRate: wireDroneRate, - harvesterBill: harvesterBill, - wireDroneBill: wireDroneBill, - factoryBill: factoryBill, - probeCount: probeCount, - totalMatter: totalMatter, - foundMatter: foundMatter, - qFlag: qFlag, - qClock: qClock, - qChipCost: qChipCost, - nextQchip: nextQchip, - bribe: bribe, - battleFlag: battleFlag, - - portfolioSize: portfolioSize, - stockID: stockID, - secTotal: secTotal, - portTotal: portTotal, - sellDelay: sellDelay, - riskiness: riskiness, - maxPort: maxPort, - m: m, - investLevel: investLevel, - stockGainThreshold: stockGainThreshold, - ledger: ledger, - stockReportCounter: stockReportCounter, - - tourneyCost: tourneyCost, - tourneyLvl: tourneyLvl, - stratCounter: stratCounter, - roundNum: roundNum, - hMove: hMove, - vMove: vMove, - hMovePrev: hMovePrev, - vMovePrev: vMovePrev, - aa: aa, - ab: ab, - ba: ba, - bb: bb, - rounds: rounds, - currentRound: currentRound, - rCounter: rCounter, - tourneyInProg: tourneyInProg, - winnerPtr: winnerPtr, - high: high, - pick: pick, - yomi: yomi, - yomiBoost: yomiBoost, - - probeSpeed: probeSpeed, - probeNav: probeNav, - probeRep: probeRep, - partialProbeSpawn: partialProbeSpawn, - probeHaz: probeHaz, - partialProbeHaz: partialProbeHaz, - probesLostHaz: probesLostHaz, - probesLostDrift: probesLostDrift, - probesLostCombat: probesLostCombat, - probeFac: probeFac, - probeWire: probeWire, - probeCombat: probeCombat, - attackSpeed: attackSpeed, - battleSpeed: battleSpeed, - attackSpeedFlag: attackSpeedFlag, - attackSpeedMod: attackSpeedMod, - probeDescendents: probeDescendents, - drifterCount: drifterCount, - warTrigger: warTrigger, - battleID: battleID, - battleName: battleName, - battleNameFlag: battleNameFlag, - maxBattles: maxBattles, - battleClock: battleClock, - battleAlarm: battleAlarm, - outcomeTimer: outcomeTimer, - drifterCombat: drifterCombat, - probeTrust: probeTrust, - probeUsedTrust: probeUsedTrust, - probeTrustCost: probeTrustCost, - probeLaunchLevel: probeLaunchLevel, - probeCost: probeCost - - } - - localStorage.setItem("saveGame2",JSON.stringify(saveGame)); - localStorage.setItem("saveProjectsUses2",JSON.stringify(projectsUses)); - localStorage.setItem("saveProjectsFlags2",JSON.stringify(projectsFlags)); - localStorage.setItem("saveProjectsActive2",JSON.stringify(projectsActive)); - localStorage.setItem("saveStratsActive2",JSON.stringify(stratsActive)); - -} - -function load() { - - var loadGame = JSON.parse(localStorage.getItem("saveGame")); - var loadProjectsUses = JSON.parse(localStorage.getItem("saveProjectsUses")); - var loadProjectsFlags = JSON.parse(localStorage.getItem("saveProjectsFlags")); - var loadProjectsActive = JSON.parse(localStorage.getItem("saveProjectsActive")); - var loadStratsActive = JSON.parse(localStorage.getItem("saveStratsActive")); - - for(var i=0; i < allStrats.length; i++){ - - allStrats[i].active = loadStratsActive[i]; - - } - - for(var i=1; i=0){ - displayProjects(projects[i]); - activeProjects.push(projects[i]); - } - - } - - - refresh(); - - if (resetFlag!=2){ - reset(); - } - -} - -function load1() { - - var loadGame = JSON.parse(localStorage.getItem("saveGame1")); - var loadProjectsUses = JSON.parse(localStorage.getItem("saveProjectsUses1")); - var loadProjectsFlags = JSON.parse(localStorage.getItem("saveProjectsFlags1")); - var loadProjectsActive = JSON.parse(localStorage.getItem("saveProjectsActive1")); - var loadStratsActive = JSON.parse(localStorage.getItem("saveStratsActive1")); - - - for(var i=0; i < projects.length; i++){ - - projects[i].uses = loadProjectsUses[i]; - projects[i].flag = loadProjectsFlags[i]; - - } - - for(var i=0; i < projects.length; i++){ - - if (loadProjectsActive.indexOf(projects[i].id)>=0){ - displayProjects(projects[i]); - activeProjects.push(projects[i]); - } - - } - - - for(var i=0; i < allStrats.length; i++){ - - allStrats[i].active = loadStratsActive[i]; - - } - - for(var i=1; i=0){ - displayProjects(projects[i]); - activeProjects.push(projects[i]); - } - - } - - - for(var i=0; i < allStrats.length; i++){ - - allStrats[i].active = loadStratsActive[i]; - - } - - for(var i=1; i ')[2] - - return generated_text - -def generate_images(text): - steps=50 - width=256 - height=256 - num_images=4 - diversity=4 - image_bytes = image_gen(text, steps, width, height, num_images, diversity) - - # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py - generated_images = [] - for image in image_bytes[1]: - image_str = image[0] - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - - return generated_images - -def generate_interpolation(text): - times_to_interpolate = 4 - - generated_images = generate_images(text) - - generated_images[0].save('frame_0.png') - generated_images[1].save('frame_1.png') - generated_images[2].save('frame_2.png') - generated_images[3].save('frame_3.png') - - input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"] - - frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator)) - - mediapy.write_video("out.mp4", frames, fps=7) - - return "out.mp4" - - - -demo = gr.Blocks() - -with demo: - input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text") - button_gen_video = gr.Button("Generate Video") - output_interpolation = gr.Video(label="Generated Video") - - button_gen_video.click(fn=generate_interpolation, inputs=input_start_text, outputs=output_interpolation) - -demo.launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/inference/image_to_image.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/inference/image_to_image.py deleted file mode 100644 index 86b46c4e606e039cb2ad80b341b2685694f883b4..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/inference/image_to_image.py +++ /dev/null @@ -1,9 +0,0 @@ -import warnings - -from diffusers import StableDiffusionImg2ImgPipeline # noqa F401 - - -warnings.warn( - "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" - " StableDiffusionImg2ImgPipeline` instead." -) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py deleted file mode 100644 index 4388771b840df36ffa3a986dc9a2ad81ac7ee425..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -The main idea for this code is to provide a way for users to not need to bother with the hassle of multiple tokens for a concept by typing -a photo of _0 _1 ... and so on -and instead just do -a photo of -which gets translated to the above. This needs to work for both inference and training. -For inference, -the tokenizer encodes the text. So, we would want logic for our tokenizer to replace the placeholder token with -it's underlying vectors -For training, -we would want to abstract away some logic like -1. Adding tokens -2. Updating gradient mask -3. Saving embeddings -to our Util class here. -so -TODO: -1. have tokenizer keep track of concept, multiconcept pairs and replace during encode call x -2. have mechanism for adding tokens x -3. have mech for saving emebeddings x -4. get mask to update x -5. Loading tokens from embedding x -6. Integrate to training x -7. Test -""" -import copy -import random - -from transformers import CLIPTokenizer - - -class MultiTokenCLIPTokenizer(CLIPTokenizer): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.token_map = {} - - def try_adding_tokens(self, placeholder_token, *args, **kwargs): - num_added_tokens = super().add_tokens(placeholder_token, *args, **kwargs) - if num_added_tokens == 0: - raise ValueError( - f"The tokenizer already contains the token {placeholder_token}. Please pass a different" - " `placeholder_token` that is not already in the tokenizer." - ) - - def add_placeholder_tokens(self, placeholder_token, *args, num_vec_per_token=1, **kwargs): - output = [] - if num_vec_per_token == 1: - self.try_adding_tokens(placeholder_token, *args, **kwargs) - output.append(placeholder_token) - else: - output = [] - for i in range(num_vec_per_token): - ith_token = placeholder_token + f"_{i}" - self.try_adding_tokens(ith_token, *args, **kwargs) - output.append(ith_token) - # handle cases where there is a new placeholder token that contains the current placeholder token but is larger - for token in self.token_map: - if token in placeholder_token: - raise ValueError( - f"The tokenizer already has placeholder token {token} that can get confused with" - f" {placeholder_token}keep placeholder tokens independent" - ) - self.token_map[placeholder_token] = output - - def replace_placeholder_tokens_in_text(self, text, vector_shuffle=False, prop_tokens_to_load=1.0): - """ - Here, we replace the placeholder tokens in text recorded in token_map so that the text_encoder - can encode them - vector_shuffle was inspired by https://github.com/rinongal/textual_inversion/pull/119 - where shuffling tokens were found to force the model to learn the concepts more descriptively. - """ - if isinstance(text, list): - output = [] - for i in range(len(text)): - output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=vector_shuffle)) - return output - for placeholder_token in self.token_map: - if placeholder_token in text: - tokens = self.token_map[placeholder_token] - tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)] - if vector_shuffle: - tokens = copy.copy(tokens) - random.shuffle(tokens) - text = text.replace(placeholder_token, " ".join(tokens)) - return text - - def __call__(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): - return super().__call__( - self.replace_placeholder_tokens_in_text( - text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load - ), - *args, - **kwargs, - ) - - def encode(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): - return super().encode( - self.replace_placeholder_tokens_in_text( - text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load - ), - *args, - **kwargs, - ) diff --git a/spaces/pat229988/NLP-Audio-summarizer/README.md b/spaces/pat229988/NLP-Audio-summarizer/README.md deleted file mode 100644 index a10bf7626ff096873f0eb6963d24040b45afbb03..0000000000000000000000000000000000000000 --- a/spaces/pat229988/NLP-Audio-summarizer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NLP Audio Summarizer -emoji: 👀 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/ChatgptAi.py b/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/ChatgptAi.py deleted file mode 100644 index 46605175d1ac94fcde252b53ddb81ba99f15706e..0000000000000000000000000000000000000000 --- a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/ChatgptAi.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import requests, re -from ...typing import sha256, Dict, get_type_hints - -url = 'https://chatgpt.ai/gpt-4/' -model = ['gpt-4'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - chat = '' - for message in messages: - chat += '%s: %s\n' % (message['role'], message['content']) - chat += 'assistant: ' - - response = requests.get('https://chatgpt.ai/') - nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0] - - headers = { - 'authority': 'chatgpt.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'origin': 'https://chatgpt.ai', - 'pragma': 'no-cache', - 'referer': 'https://chatgpt.ai/gpt-4/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - data = { - '_wpnonce': nonce, - 'post_id': post_id, - 'url': 'https://chatgpt.ai/gpt-4', - 'action': 'wpaicg_chat_shortcode_message', - 'message': chat, - 'bot_id': bot_id - } - - response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php', - headers=headers, data=data) - - yield (response.json()['data']) - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py deleted file mode 100644 index 2cba4b0708032d62b4c1278f99e5db87ed8d90fe..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py +++ /dev/null @@ -1,39 +0,0 @@ -# SPDX-FileCopyrightText: 2015 Eric Larson -# -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import division - -from datetime import datetime -from pip._vendor.cachecontrol.cache import BaseCache - - -class RedisCache(BaseCache): - - def __init__(self, conn): - self.conn = conn - - def get(self, key): - return self.conn.get(key) - - def set(self, key, value, expires=None): - if not expires: - self.conn.set(key, value) - elif isinstance(expires, datetime): - expires = expires - datetime.utcnow() - self.conn.setex(key, int(expires.total_seconds()), value) - else: - self.conn.setex(key, expires, value) - - def delete(self, key): - self.conn.delete(key) - - def clear(self): - """Helper for clearing all the keys in a database. Use with - caution!""" - for key in self.conn.keys(): - self.conn.delete(key) - - def close(self): - """Redis uses connection pooling, no need to close the connection.""" - pass diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/big5prober.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/big5prober.py deleted file mode 100644 index ef09c60e327a0122e32f95f2f10a826a033c573c..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/big5prober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import Big5DistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import BIG5_SM_MODEL - - -class Big5Prober(MultiByteCharSetProber): - def __init__(self) -> None: - super().__init__() - self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) - self.distribution_analyzer = Big5DistributionAnalysis() - self.reset() - - @property - def charset_name(self) -> str: - return "Big5" - - @property - def language(self) -> str: - return "Chinese" diff --git a/spaces/prerna9811/Chord/portaudio/examples/paex_wmme_surround.c b/spaces/prerna9811/Chord/portaudio/examples/paex_wmme_surround.c deleted file mode 100644 index 55fc2551316fc00effee811c5c9ddf0ba8f49d76..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/examples/paex_wmme_surround.c +++ /dev/null @@ -1,210 +0,0 @@ -/** @file paex_wmme_surround.c - @ingroup examples_src - @brief Use WMME-specific channelMask to request 5.1 surround sound output. - @author Ross Bencina -*/ -/* - * $Id: $ - * Portable Audio I/O Library - * Windows MME surround sound output test - * - * Copyright (c) 2007 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include - -#include /* required when using pa_win_wmme.h */ -#include /* required when using pa_win_wmme.h */ - -#include "portaudio.h" -#include "pa_win_wmme.h" - -#define NUM_SECONDS (12) -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (64) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (100) - -#define CHANNEL_COUNT (6) - - - -typedef struct -{ - float sine[TABLE_SIZE]; - int phase; - int currentChannel; - int cycleCount; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i,j; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - for( i=0; icurrentChannel && data->cycleCount < 4410 ){ - *out++ = data->sine[data->phase]; - data->phase += 1 + j; // play each channel at a different pitch so they can be distinguished - if( data->phase >= TABLE_SIZE ){ - data->phase -= TABLE_SIZE; - } - }else{ - *out++ = 0; - } - } - - data->cycleCount++; - if( data->cycleCount > 44100 ){ - data->cycleCount = 0; - - ++data->currentChannel; - if( data->currentChannel >= CHANNEL_COUNT ) - data->currentChannel -= CHANNEL_COUNT; - } - } - - return paContinue; -} - -/*******************************************************************/ -int main(int argc, char* argv[]) -{ - PaStreamParameters outputParameters; - PaWinMmeStreamInfo wmmeStreamInfo; - PaStream *stream; - PaError err; - paTestData data; - int i; - int deviceIndex; - - printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT); - - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paMME ) )->defaultOutputDevice; - if( argc == 2 ){ - sscanf( argv[1], "%d", &deviceIndex ); - } - - printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name ); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - - /* it's not strictly necessary to provide a channelMask for surround sound - output. But if you want to be sure which channel mask PortAudio will use - then you should supply one */ - wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo); - wmmeStreamInfo.hostApiType = paMME; - wmmeStreamInfo.version = 1; - wmmeStreamInfo.flags = paWinMmeUseChannelMask; - wmmeStreamInfo.channelMask = PAWIN_SPEAKER_5POINT1; /* request 5.1 output format */ - outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo; - - - if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){ - printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT ); - }else{ - printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT ); - } - - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/probing-vits/attention-heat-maps/utils.py b/spaces/probing-vits/attention-heat-maps/utils.py deleted file mode 100644 index d275dba2e172f858807fed565c870e213aed5a25..0000000000000000000000000000000000000000 --- a/spaces/probing-vits/attention-heat-maps/utils.py +++ /dev/null @@ -1,100 +0,0 @@ -# import the necessary packages -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras import layers - -from PIL import Image -from io import BytesIO -import requests -import numpy as np -from matplotlib import pyplot as plt - - -RESOLUTION = 224 -PATCH_SIZE = 16 - -crop_layer = layers.CenterCrop(RESOLUTION, RESOLUTION) -norm_layer = layers.Normalization( - mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], - variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2], -) -rescale_layer = layers.Rescaling(scale=1./127.5, offset=-1) - - -def preprocess_image(image, model_type, size=RESOLUTION): - # Turn the image into a numpy array and add batch dim. - image = np.array(image) - image = tf.expand_dims(image, 0) - - # If model type is vit rescale the image to [-1, 1]. - if model_type == "original_vit": - image = rescale_layer(image) - - # Resize the image using bicubic interpolation. - resize_size = int((256 / 224) * size) - image = tf.image.resize( - image, - (resize_size, resize_size), - method="bicubic" - ) - - # Crop the image. - image = crop_layer(image) - - # If model type is DeiT or DINO normalize the image. - if model_type != "original_vit": - image = norm_layer(image) - - return image.numpy() - - -def load_image_from_url(url, model_type): - # Credit: Willi Gierke - response = requests.get(url) - image = Image.open(BytesIO(response.content)) - preprocessed_image = preprocess_image(image, model_type) - return image, preprocessed_image - - -def attention_heatmap(attention_score_dict, image, model_type="dino", num_heads=12): - num_tokens = 2 if "distilled" in model_type else 1 - - # Sort the transformer blocks in order of their depth. - attention_score_list = list(attention_score_dict.keys()) - attention_score_list.sort(key=lambda x: int(x.split("_")[-2]), reverse=True) - - # Process the attention maps for overlay. - w_featmap = image.shape[2] // PATCH_SIZE - h_featmap = image.shape[1] // PATCH_SIZE - attention_scores = attention_score_dict[attention_score_list[0]] - - # Taking the representations from CLS token. - attentions = attention_scores[0, :, 0, num_tokens:].reshape(num_heads, -1) - - # Reshape the attention scores to resemble mini patches. - attentions = attentions.reshape(num_heads, w_featmap, h_featmap) - attentions = attentions.transpose((1, 2, 0)) - - # Resize the attention patches to 224x224 (224: 14x16). - attentions = tf.image.resize(attentions, size=( - h_featmap * PATCH_SIZE, - w_featmap * PATCH_SIZE) - ) - return attentions - - -def plot(attentions, image): - fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(13, 13)) - img_count = 0 - - for i in range(3): - for j in range(4): - if img_count < len(attentions): - axes[i, j].imshow(image[0]) - axes[i, j].imshow(attentions[..., img_count], cmap="inferno", alpha=0.6) - axes[i, j].title.set_text(f"Attention head: {img_count}") - axes[i, j].axis("off") - img_count += 1 - - plt.tight_layout() - plt.savefig("heat_map.png") \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/__init__.py deleted file mode 100644 index 8c5661e93a205bf4fb22404d4fc50f902cc31369..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py deleted file mode 100644 index 084bf8f960db3d4ded95921ee9d7cbd2a7fb9f4a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys -from .cli import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Button-8a6aeb2c.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Button-8a6aeb2c.css deleted file mode 100644 index a22c91a1cb21dfd061e31a71ffe12c9f3528e899..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Button-8a6aeb2c.css +++ /dev/null @@ -1 +0,0 @@ -.block.svelte-90oupt{position:relative;margin:0;box-shadow:var(--block-shadow);border-width:var(--block-border-width);border-color:var(--block-border-color);border-radius:var(--block-radius);background:var(--block-background-fill);width:100%;line-height:var(--line-sm)}.block.border_focus.svelte-90oupt{border-color:var(--color-accent)}.padded.svelte-90oupt{padding:var(--block-padding)}.hidden.svelte-90oupt{display:none}.hide-container.svelte-90oupt{margin:0;box-shadow:none;--block-border-width:0;background:transparent;padding:0;overflow:visible}div.svelte-e8n7p6{margin-bottom:var(--spacing-lg);color:var(--block-info-text-color);font-weight:var(--block-info-text-weight);font-size:var(--block-info-text-size);line-height:var(--line-sm)}span.has-info.svelte-1gfkn6j{margin-bottom:var(--spacing-xs)}span.svelte-1gfkn6j:not(.has-info){margin-bottom:var(--spacing-lg)}span.svelte-1gfkn6j{display:inline-block;position:relative;z-index:var(--layer-4);border:solid var(--block-title-border-width) var(--block-title-border-color);border-radius:var(--block-title-radius);background:var(--block-title-background-fill);padding:var(--block-title-padding);color:var(--block-title-text-color);font-weight:var(--block-title-text-weight);font-size:var(--block-title-text-size);line-height:var(--line-sm)}.hide.svelte-1gfkn6j{margin:0;height:0}label.svelte-1b6s6s{display:inline-flex;align-items:center;z-index:var(--layer-2);box-shadow:var(--block-label-shadow);border:var(--block-label-border-width) solid var(--border-color-primary);border-top:none;border-left:none;border-radius:var(--block-label-radius);background:var(--block-label-background-fill);padding:var(--block-label-padding);pointer-events:none;color:var(--block-label-text-color);font-weight:var(--block-label-text-weight);font-size:var(--block-label-text-size);line-height:var(--line-sm)}.gr-group label.svelte-1b6s6s{border-top-left-radius:0}label.float.svelte-1b6s6s{position:absolute;top:var(--block-label-margin);left:var(--block-label-margin)}label.svelte-1b6s6s:not(.float){position:static;margin-top:var(--block-label-margin);margin-left:var(--block-label-margin)}.hide.svelte-1b6s6s{height:0}span.svelte-1b6s6s{opacity:.8;margin-right:var(--size-2);width:calc(var(--block-label-text-size) - 1px);height:calc(var(--block-label-text-size) - 1px)}.hide-label.svelte-1b6s6s{box-shadow:none;border-width:0;background:transparent;overflow:visible}button.svelte-17yhekk{display:flex;justify-content:center;align-items:center;gap:1px;z-index:var(--layer-2);border-radius:var(--radius-sm);color:var(--block-label-text-color);border:1px solid transparent}.padded.svelte-17yhekk{padding:2px;background:var(--background-fill-primary);box-shadow:var(--shadow-drop);border:1px solid var(--button-secondary-border-color)}button.svelte-17yhekk:hover{cursor:pointer;color:var(--color-accent)}.padded.svelte-17yhekk:hover{border:2px solid var(--button-secondary-border-color-hover);padding:1px;color:var(--block-label-text-color)}span.svelte-17yhekk{padding:0 1px;font-size:10px}div.svelte-17yhekk{padding:2px;display:flex;align-items:flex-end}.small.svelte-17yhekk{width:14px;height:14px}.large.svelte-17yhekk{width:22px;height:22px}.pending.svelte-17yhekk{animation:svelte-17yhekk-flash .5s infinite}@keyframes svelte-17yhekk-flash{0%{opacity:.5}50%{opacity:1}to{opacity:.5}}.empty.svelte-1oiin9d{display:flex;justify-content:center;align-items:center;margin-top:calc(0px - var(--size-6));height:var(--size-full)}.icon.svelte-1oiin9d{opacity:.5;height:var(--size-5);color:var(--body-text-color)}.small.svelte-1oiin9d{min-height:calc(var(--size-32) - 20px)}.large.svelte-1oiin9d{min-height:calc(var(--size-64) - 20px)}.unpadded_box.svelte-1oiin9d{margin-top:0}.small_parent.svelte-1oiin9d{min-height:100%!important}.wrap.svelte-b0hvie{display:flex;flex-direction:column;justify-content:center;align-items:center;min-height:var(--size-60);color:var(--block-label-text-color);line-height:var(--line-md);height:100%;padding-top:var(--size-3)}.or.svelte-b0hvie{color:var(--body-text-color-subdued);display:flex}.icon-wrap.svelte-b0hvie{width:30px;margin-bottom:var(--spacing-lg)}@media (min-width: 768px){.wrap.svelte-b0hvie{font-size:var(--text-lg)}}.hovered.svelte-b0hvie{color:var(--color-accent)}div.svelte-18gkr7n{border-top:1px solid transparent;display:flex;max-height:100%;justify-content:center;gap:var(--spacing-sm);height:auto;align-items:flex-end;box-shadow:var(--shadow-drop);padding:var(--spacing-xl) 0;color:var(--block-label-text-color);flex-shrink:0;width:95%}.show_border.svelte-18gkr7n{border-top:1px solid var(--block-border-color);margin-top:var(--spacing-xxl)}button.svelte-cmf5ev,a.svelte-cmf5ev{display:inline-flex;justify-content:center;align-items:center;transition:var(--button-transition);box-shadow:var(--button-shadow);padding:var(--size-0-5) var(--size-2);text-align:center}button.svelte-cmf5ev:hover,button[disabled].svelte-cmf5ev,a.svelte-cmf5ev:hover,a.disabled.svelte-cmf5ev{box-shadow:var(--button-shadow-hover)}button.svelte-cmf5ev:active,a.svelte-cmf5ev:active{box-shadow:var(--button-shadow-active)}button[disabled].svelte-cmf5ev,a.disabled.svelte-cmf5ev{opacity:.5;filter:grayscale(30%);cursor:not-allowed}.hidden.svelte-cmf5ev{display:none}.primary.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-primary-border-color);background:var(--button-primary-background-fill);color:var(--button-primary-text-color)}.primary.svelte-cmf5ev:hover,.primary[disabled].svelte-cmf5ev{border-color:var(--button-primary-border-color-hover);background:var(--button-primary-background-fill-hover);color:var(--button-primary-text-color-hover)}.secondary.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-secondary-border-color);background:var(--button-secondary-background-fill);color:var(--button-secondary-text-color)}.secondary.svelte-cmf5ev:hover,.secondary[disabled].svelte-cmf5ev{border-color:var(--button-secondary-border-color-hover);background:var(--button-secondary-background-fill-hover);color:var(--button-secondary-text-color-hover)}.stop.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-cancel-border-color);background:var(--button-cancel-background-fill);color:var(--button-cancel-text-color)}.stop.svelte-cmf5ev:hover,.stop[disabled].svelte-cmf5ev{border-color:var(--button-cancel-border-color-hover);background:var(--button-cancel-background-fill-hover);color:var(--button-cancel-text-color-hover)}.sm.svelte-cmf5ev{border-radius:var(--button-small-radius);padding:var(--button-small-padding);font-weight:var(--button-small-text-weight);font-size:var(--button-small-text-size)}.lg.svelte-cmf5ev{border-radius:var(--button-large-radius);padding:var(--button-large-padding);font-weight:var(--button-large-text-weight);font-size:var(--button-large-text-size)}.button-icon.svelte-cmf5ev{width:var(--text-xl);height:var(--text-xl);margin-right:var(--spacing-xl)} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-c1adbfd9.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-c1adbfd9.js deleted file mode 100644 index 9714359d3b52a0fda4917b890d371e78bba8e797..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-c1adbfd9.js +++ /dev/null @@ -1,2 +0,0 @@ -import{B as ke}from"./Button-8eeccca1.js";import{B as fe}from"./BlockLabel-e3970ebb.js";import{E as ve}from"./Empty-eeaba2d1.js";import{S as pe}from"./Index-c74a8b7c.js";import{F as R}from"./File-d0b52941.js";import{U as Fe}from"./Upload-5621fe61.js";import{M as ye}from"./ModifyUpload-41fec6c8.js";import{n as Be}from"./index-50ad4c77.js";import{U as Se}from"./UploadText-21079d8e.js";import{default as tl}from"./Example-6be916c4.js";/* empty css */import"./IconButton-0233c52d.js";import"./Clear-2c7bae91.js";import"./svelte/svelte.js";const Ue=n=>{let e=["B","KB","MB","GB","PB"],i=0;for(;n>1024;)n/=1024,i++;let t=e[i];return n.toFixed(1)+" "+t},Y=n=>{var e;e=n.orig_name;const i=30;if(e.length>i){const t=e.substring(0,i),l=e.lastIndexOf(".");if(l!==-1){const s=e.slice(l);return`${t}..${s}`}return t}return e},Z=n=>{var e=0;if(Array.isArray(n))for(var i of n)i.size!==void 0&&(e+=i.size);else e=n.size||0;return Ue(e)};const{HtmlTag:qe,SvelteComponent:Ce,append:F,attr:k,destroy_each:ze,detach:J,element:B,ensure_array_like:$,init:Ae,insert:T,listen:Ee,noop:x,safe_not_equal:Ne,set_data:_e,set_style:ee,space:te,text:V,toggle_class:le}=window.__gradio__svelte__internal,{createEventDispatcher:Pe}=window.__gradio__svelte__internal;function ne(n,e,i){const t=n.slice();return t[6]=e[i],t[8]=i,t}function Ie(n){let e=n[3]("file.uploading")+"",i;return{c(){i=V(e)},m(t,l){T(t,i,l)},p(t,l){l&8&&e!==(e=t[3]("file.uploading")+"")&&_e(i,e)},d(t){t&&J(i)}}}function De(n){let e,i,t=Z(n[6])+"",l,s,a;return{c(){e=B("a"),i=new qe(!1),l=V(" ⇣"),i.a=l,k(e,"href",s=n[6].url),k(e,"target","_blank"),k(e,"download",a=window.__is_colab__?null:n[6].orig_name),k(e,"class","svelte-1lop6bc")},m(_,r){T(_,e,r),i.m(t,e),F(e,l)},p(_,r){r&1&&t!==(t=Z(_[6])+"")&&i.p(t),r&1&&s!==(s=_[6].url)&&k(e,"href",s),r&1&&a!==(a=window.__is_colab__?null:_[6].orig_name)&&k(e,"download",a)},d(_){_&&J(e)}}}function ie(n){let e,i,t=Y(n[6])+"",l,s,a,_,r,c;function o(w,d){return w[6].url?De:Ie}let h=o(n),f=h(n);function g(){return n[5](n[6],n[8])}return{c(){e=B("tr"),i=B("td"),l=V(t),s=te(),a=B("td"),f.c(),_=te(),k(i,"class","svelte-1lop6bc"),k(a,"class","download svelte-1lop6bc"),k(e,"class","file svelte-1lop6bc"),le(e,"selectable",n[1])},m(w,d){T(w,e,d),F(e,i),F(i,l),F(e,s),F(e,a),f.m(a,null),F(e,_),r||(c=Ee(e,"click",g),r=!0)},p(w,d){n=w,d&1&&t!==(t=Y(n[6])+"")&&_e(l,t),h===(h=o(n))&&f?f.p(n,d):(f.d(1),f=h(n),f&&(f.c(),f.m(a,null))),d&2&&le(e,"selectable",n[1])},d(w){w&&J(e),f.d(),r=!1,c()}}}function Me(n){let e,i,t,l=$(Array.isArray(n[0])?n[0]:[n[0]]),s=[];for(let a=0;at("select",{value:c.orig_name,index:o});return n.$$set=c=>{"value"in c&&i(0,l=c.value),"selectable"in c&&i(1,s=c.selectable),"height"in c&&i(2,a=c.height),"i18n"in c&&i(3,_=c.i18n)},[l,s,a,_,t,r]}class je extends Ce{constructor(e){super(),Ae(this,e,Oe,Me,Ne,{value:0,selectable:1,height:2,i18n:3})}}const ue=je,{SvelteComponent:Je,bubble:Te,check_outros:Ge,create_component:G,destroy_component:H,detach:se,empty:He,group_outros:Ke,init:Le,insert:ae,mount_component:K,safe_not_equal:We,space:Qe,transition_in:A,transition_out:E}=window.__gradio__svelte__internal;function Re(n){let e,i;return e=new ve({props:{unpadded_box:!0,size:"large",$$slots:{default:[Xe]},$$scope:{ctx:n}}}),{c(){G(e.$$.fragment)},m(t,l){K(e,t,l),i=!0},p(t,l){const s={};l&128&&(s.$$scope={dirty:l,ctx:t}),e.$set(s)},i(t){i||(A(e.$$.fragment,t),i=!0)},o(t){E(e.$$.fragment,t),i=!1},d(t){H(e,t)}}}function Ve(n){let e,i;return e=new ue({props:{i18n:n[5],selectable:n[3],value:n[0],height:n[4]}}),e.$on("select",n[6]),{c(){G(e.$$.fragment)},m(t,l){K(e,t,l),i=!0},p(t,l){const s={};l&32&&(s.i18n=t[5]),l&8&&(s.selectable=t[3]),l&1&&(s.value=t[0]),l&16&&(s.height=t[4]),e.$set(s)},i(t){i||(A(e.$$.fragment,t),i=!0)},o(t){E(e.$$.fragment,t),i=!1},d(t){H(e,t)}}}function Xe(n){let e,i;return e=new R({}),{c(){G(e.$$.fragment)},m(t,l){K(e,t,l),i=!0},i(t){i||(A(e.$$.fragment,t),i=!0)},o(t){E(e.$$.fragment,t),i=!1},d(t){H(e,t)}}}function Ye(n){let e,i,t,l,s,a;e=new fe({props:{show_label:n[2],float:n[0]===null,Icon:R,label:n[1]||"File"}});const _=[Ve,Re],r=[];function c(o,h){return o[0]?0:1}return t=c(n),l=r[t]=_[t](n),{c(){G(e.$$.fragment),i=Qe(),l.c(),s=He()},m(o,h){K(e,o,h),ae(o,i,h),r[t].m(o,h),ae(o,s,h),a=!0},p(o,[h]){const f={};h&4&&(f.show_label=o[2]),h&1&&(f.float=o[0]===null),h&2&&(f.label=o[1]||"File"),e.$set(f);let g=t;t=c(o),t===g?r[t].p(o,h):(Ke(),E(r[g],1,1,()=>{r[g]=null}),Ge(),l=r[t],l?l.p(o,h):(l=r[t]=_[t](o),l.c()),A(l,1),l.m(s.parentNode,s))},i(o){a||(A(e.$$.fragment,o),A(l),a=!0)},o(o){E(e.$$.fragment,o),E(l),a=!1},d(o){o&&(se(i),se(s)),H(e,o),r[t].d(o)}}}function Ze(n,e,i){let{value:t=null}=e,{label:l}=e,{show_label:s=!0}=e,{selectable:a=!1}=e,{height:_=void 0}=e,{i18n:r}=e;function c(o){Te.call(this,n,o)}return n.$$set=o=>{"value"in o&&i(0,t=o.value),"label"in o&&i(1,l=o.label),"show_label"in o&&i(2,s=o.show_label),"selectable"in o&&i(3,a=o.selectable),"height"in o&&i(4,_=o.height),"i18n"in o&&i(5,r=o.i18n)},[t,l,s,a,_,r,c]}class $e extends Je{constructor(e){super(),Le(this,e,Ze,Ye,We,{value:0,label:1,show_label:2,selectable:3,height:4,i18n:5})}}const xe=$e,{SvelteComponent:et,add_flush_callback:tt,bind:lt,binding_callbacks:nt,bubble:it,check_outros:st,create_component:M,create_slot:at,destroy_component:O,detach:W,empty:ot,get_all_dirty_from_scope:rt,get_slot_changes:ft,group_outros:_t,init:ut,insert:Q,mount_component:j,safe_not_equal:ct,space:ce,transition_in:S,transition_out:U,update_slot_base:ht}=window.__gradio__svelte__internal,{createEventDispatcher:mt,tick:gt}=window.__gradio__svelte__internal;function bt(n){let e,i,t;function l(a){n[15](a)}let s={filetype:n[9],file_count:n[3],root:n[5],$$slots:{default:[wt]},$$scope:{ctx:n}};return n[8]!==void 0&&(s.dragging=n[8]),e=new Fe({props:s}),nt.push(()=>lt(e,"dragging",l)),e.$on("load",n[10]),{c(){M(e.$$.fragment)},m(a,_){j(e,a,_),t=!0},p(a,_){const r={};_&512&&(r.filetype=a[9]),_&8&&(r.file_count=a[3]),_&32&&(r.root=a[5]),_&65536&&(r.$$scope={dirty:_,ctx:a}),!i&&_&256&&(i=!0,r.dragging=a[8],tt(()=>i=!1)),e.$set(r)},i(a){t||(S(e.$$.fragment,a),t=!0)},o(a){U(e.$$.fragment,a),t=!1},d(a){O(e,a)}}}function dt(n){let e,i,t,l;return e=new ye({props:{i18n:n[7],absolute:!0}}),e.$on("clear",n[11]),t=new ue({props:{i18n:n[7],selectable:n[4],value:n[0],height:n[6]}}),t.$on("select",n[14]),{c(){M(e.$$.fragment),i=ce(),M(t.$$.fragment)},m(s,a){j(e,s,a),Q(s,i,a),j(t,s,a),l=!0},p(s,a){const _={};a&128&&(_.i18n=s[7]),e.$set(_);const r={};a&128&&(r.i18n=s[7]),a&16&&(r.selectable=s[4]),a&1&&(r.value=s[0]),a&64&&(r.height=s[6]),t.$set(r)},i(s){l||(S(e.$$.fragment,s),S(t.$$.fragment,s),l=!0)},o(s){U(e.$$.fragment,s),U(t.$$.fragment,s),l=!1},d(s){s&&W(i),O(e,s),O(t,s)}}}function wt(n){let e;const i=n[13].default,t=at(i,n,n[16],null);return{c(){t&&t.c()},m(l,s){t&&t.m(l,s),e=!0},p(l,s){t&&t.p&&(!e||s&65536)&&ht(t,i,l,l[16],e?ft(i,l[16],s,null):rt(l[16]),null)},i(l){e||(S(t,l),e=!0)},o(l){U(t,l),e=!1},d(l){t&&t.d(l)}}}function kt(n){let e,i,t,l,s,a;e=new fe({props:{show_label:n[2],Icon:R,float:n[0]===null,label:n[1]||"File"}});const _=[dt,bt],r=[];function c(o,h){return o[0]?0:1}return t=c(n),l=r[t]=_[t](n),{c(){M(e.$$.fragment),i=ce(),l.c(),s=ot()},m(o,h){j(e,o,h),Q(o,i,h),r[t].m(o,h),Q(o,s,h),a=!0},p(o,[h]){const f={};h&4&&(f.show_label=o[2]),h&1&&(f.float=o[0]===null),h&2&&(f.label=o[1]||"File"),e.$set(f);let g=t;t=c(o),t===g?r[t].p(o,h):(_t(),U(r[g],1,1,()=>{r[g]=null}),st(),l=r[t],l?l.p(o,h):(l=r[t]=_[t](o),l.c()),S(l,1),l.m(s.parentNode,s))},i(o){a||(S(e.$$.fragment,o),S(l),a=!0)},o(o){U(e.$$.fragment,o),U(l),a=!1},d(o){o&&(W(i),W(s)),O(e,o),r[t].d(o)}}}function vt(n,e,i){let{$$slots:t={},$$scope:l}=e,{value:s}=e,{label:a}=e,{show_label:_=!0}=e,{file_count:r="single"}=e,{file_types:c=null}=e,{selectable:o=!1}=e,{root:h}=e,{height:f=void 0}=e,{i18n:g}=e;async function w({detail:m}){i(0,s=m),await gt(),p("change",s),p("upload",m)}function d(){i(0,s=null),p("change",null),p("clear")}const p=mt();let z;c==null?z=null:(c=c.map(m=>m.startsWith(".")?m:m+"/*"),z=c.join(", "));let y=!1;function v(m){it.call(this,n,m)}function D(m){y=m,i(8,y)}return n.$$set=m=>{"value"in m&&i(0,s=m.value),"label"in m&&i(1,a=m.label),"show_label"in m&&i(2,_=m.show_label),"file_count"in m&&i(3,r=m.file_count),"file_types"in m&&i(12,c=m.file_types),"selectable"in m&&i(4,o=m.selectable),"root"in m&&i(5,h=m.root),"height"in m&&i(6,f=m.height),"i18n"in m&&i(7,g=m.i18n),"$$scope"in m&&i(16,l=m.$$scope)},n.$$.update=()=>{n.$$.dirty&256&&p("drag",y)},[s,a,_,r,o,h,f,g,y,z,w,d,c,t,v,D,l]}class pt extends et{constructor(e){super(),ut(this,e,vt,kt,ct,{value:0,label:1,show_label:2,file_count:3,file_types:12,selectable:4,root:5,height:6,i18n:7})}}const Ft=pt,{SvelteComponent:yt,assign:Bt,check_outros:St,create_component:N,destroy_component:P,detach:oe,empty:Ut,flush:b,get_spread_object:qt,get_spread_update:Ct,group_outros:zt,init:At,insert:re,mount_component:I,safe_not_equal:Et,space:Nt,transition_in:q,transition_out:C}=window.__gradio__svelte__internal;function Pt(n){let e,i;return e=new Ft({props:{label:n[6],show_label:n[7],value:n[17],file_count:n[15],file_types:n[16],selectable:n[9],root:n[5],height:n[8],i18n:n[14].i18n,$$slots:{default:[Dt]},$$scope:{ctx:n}}}),e.$on("change",n[22]),e.$on("drag",n[23]),e.$on("clear",n[24]),e.$on("select",n[25]),e.$on("upload",n[26]),{c(){N(e.$$.fragment)},m(t,l){I(e,t,l),i=!0},p(t,l){const s={};l&64&&(s.label=t[6]),l&128&&(s.show_label=t[7]),l&131072&&(s.value=t[17]),l&32768&&(s.file_count=t[15]),l&65536&&(s.file_types=t[16]),l&512&&(s.selectable=t[9]),l&32&&(s.root=t[5]),l&256&&(s.height=t[8]),l&16384&&(s.i18n=t[14].i18n),l&134234112&&(s.$$scope={dirty:l,ctx:t}),e.$set(s)},i(t){i||(q(e.$$.fragment,t),i=!0)},o(t){C(e.$$.fragment,t),i=!1},d(t){P(e,t)}}}function It(n){let e,i;return e=new xe({props:{selectable:n[9],value:n[17],label:n[6],show_label:n[7],height:n[8],i18n:n[14].i18n}}),e.$on("select",n[21]),{c(){N(e.$$.fragment)},m(t,l){I(e,t,l),i=!0},p(t,l){const s={};l&512&&(s.selectable=t[9]),l&131072&&(s.value=t[17]),l&64&&(s.label=t[6]),l&128&&(s.show_label=t[7]),l&256&&(s.height=t[8]),l&16384&&(s.i18n=t[14].i18n),e.$set(s)},i(t){i||(q(e.$$.fragment,t),i=!0)},o(t){C(e.$$.fragment,t),i=!1},d(t){P(e,t)}}}function Dt(n){let e,i;return e=new Se({props:{i18n:n[14].i18n,type:"file"}}),{c(){N(e.$$.fragment)},m(t,l){I(e,t,l),i=!0},p(t,l){const s={};l&16384&&(s.i18n=t[14].i18n),e.$set(s)},i(t){i||(q(e.$$.fragment,t),i=!0)},o(t){C(e.$$.fragment,t),i=!1},d(t){P(e,t)}}}function Mt(n){let e,i,t,l,s,a;const _=[{autoscroll:n[14].autoscroll},{i18n:n[14].i18n},n[10],{status:n[10]?.status||"complete"}];let r={};for(let f=0;f<_.length;f+=1)r=Bt(r,_[f]);e=new pe({props:r});const c=[It,Pt],o=[];function h(f,g){return f[4]?1:0}return t=h(n),l=o[t]=c[t](n),{c(){N(e.$$.fragment),i=Nt(),l.c(),s=Ut()},m(f,g){I(e,f,g),re(f,i,g),o[t].m(f,g),re(f,s,g),a=!0},p(f,g){const w=g&17408?Ct(_,[g&16384&&{autoscroll:f[14].autoscroll},g&16384&&{i18n:f[14].i18n},g&1024&&qt(f[10]),g&1024&&{status:f[10]?.status||"complete"}]):{};e.$set(w);let d=t;t=h(f),t===d?o[t].p(f,g):(zt(),C(o[d],1,1,()=>{o[d]=null}),St(),l=o[t],l?l.p(f,g):(l=o[t]=c[t](f),l.c()),q(l,1),l.m(s.parentNode,s))},i(f){a||(q(e.$$.fragment,f),q(l),a=!0)},o(f){C(e.$$.fragment,f),C(l),a=!1},d(f){f&&(oe(i),oe(s)),P(e,f),o[t].d(f)}}}function Ot(n){let e,i;return e=new ke({props:{visible:n[3],variant:n[0]===null?"dashed":"solid",border_mode:n[18]?"focus":"base",padding:!1,elem_id:n[1],elem_classes:n[2],container:n[11],scale:n[12],min_width:n[13],allow_overflow:!1,$$slots:{default:[Mt]},$$scope:{ctx:n}}}),{c(){N(e.$$.fragment)},m(t,l){I(e,t,l),i=!0},p(t,[l]){const s={};l&8&&(s.visible=t[3]),l&1&&(s.variant=t[0]===null?"dashed":"solid"),l&262144&&(s.border_mode=t[18]?"focus":"base"),l&2&&(s.elem_id=t[1]),l&4&&(s.elem_classes=t[2]),l&2048&&(s.container=t[11]),l&4096&&(s.scale=t[12]),l&8192&&(s.min_width=t[13]),l&134727665&&(s.$$scope={dirty:l,ctx:t}),e.$set(s)},i(t){i||(q(e.$$.fragment,t),i=!0)},o(t){C(e.$$.fragment,t),i=!1},d(t){P(e,t)}}}function jt(n,e,i){let t,{elem_id:l=""}=e,{elem_classes:s=[]}=e,{visible:a=!0}=e,{value:_}=e,{interactive:r}=e,{root:c}=e,{label:o}=e,{show_label:h}=e,{height:f=void 0}=e,{proxy_url:g}=e,{_selectable:w=!1}=e,{loading_status:d}=e,{container:p=!0}=e,{scale:z=null}=e,{min_width:y=void 0}=e,{gradio:v}=e,{file_count:D}=e,{file_types:m=["file"]}=e,L=t,X=!1;const he=({detail:u})=>v.dispatch("select",u),me=({detail:u})=>{i(0,_=u)},ge=({detail:u})=>i(18,X=u),be=()=>v.dispatch("clear"),de=({detail:u})=>v.dispatch("select",u),we=()=>v.dispatch("upload");return n.$$set=u=>{"elem_id"in u&&i(1,l=u.elem_id),"elem_classes"in u&&i(2,s=u.elem_classes),"visible"in u&&i(3,a=u.visible),"value"in u&&i(0,_=u.value),"interactive"in u&&i(4,r=u.interactive),"root"in u&&i(5,c=u.root),"label"in u&&i(6,o=u.label),"show_label"in u&&i(7,h=u.show_label),"height"in u&&i(8,f=u.height),"proxy_url"in u&&i(19,g=u.proxy_url),"_selectable"in u&&i(9,w=u._selectable),"loading_status"in u&&i(10,d=u.loading_status),"container"in u&&i(11,p=u.container),"scale"in u&&i(12,z=u.scale),"min_width"in u&&i(13,y=u.min_width),"gradio"in u&&i(14,v=u.gradio),"file_count"in u&&i(15,D=u.file_count),"file_types"in u&&i(16,m=u.file_types)},n.$$.update=()=>{n.$$.dirty&524321&&i(17,t=Be(_,c,g)),n.$$.dirty&1196032&&JSON.stringify(L)!==JSON.stringify(t)&&(v.dispatch("change"),i(20,L=t))},[_,l,s,a,r,c,o,h,f,w,d,p,z,y,v,D,m,t,X,g,L,he,me,ge,be,de,we]}class $t extends yt{constructor(e){super(),At(this,e,jt,Ot,Et,{elem_id:1,elem_classes:2,visible:3,value:0,interactive:4,root:5,label:6,show_label:7,height:8,proxy_url:19,_selectable:9,loading_status:10,container:11,scale:12,min_width:13,gradio:14,file_count:15,file_types:16})}get elem_id(){return this.$$.ctx[1]}set elem_id(e){this.$$set({elem_id:e}),b()}get elem_classes(){return this.$$.ctx[2]}set elem_classes(e){this.$$set({elem_classes:e}),b()}get visible(){return this.$$.ctx[3]}set visible(e){this.$$set({visible:e}),b()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),b()}get interactive(){return this.$$.ctx[4]}set interactive(e){this.$$set({interactive:e}),b()}get root(){return this.$$.ctx[5]}set root(e){this.$$set({root:e}),b()}get label(){return this.$$.ctx[6]}set label(e){this.$$set({label:e}),b()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),b()}get height(){return this.$$.ctx[8]}set height(e){this.$$set({height:e}),b()}get proxy_url(){return this.$$.ctx[19]}set proxy_url(e){this.$$set({proxy_url:e}),b()}get _selectable(){return this.$$.ctx[9]}set _selectable(e){this.$$set({_selectable:e}),b()}get loading_status(){return this.$$.ctx[10]}set loading_status(e){this.$$set({loading_status:e}),b()}get container(){return this.$$.ctx[11]}set container(e){this.$$set({container:e}),b()}get scale(){return this.$$.ctx[12]}set scale(e){this.$$set({scale:e}),b()}get min_width(){return this.$$.ctx[13]}set min_width(e){this.$$set({min_width:e}),b()}get gradio(){return this.$$.ctx[14]}set gradio(e){this.$$set({gradio:e}),b()}get file_count(){return this.$$.ctx[15]}set file_count(e){this.$$set({file_count:e}),b()}get file_types(){return this.$$.ctx[16]}set file_types(e){this.$$set({file_types:e}),b()}}export{tl as BaseExample,xe as BaseFile,Ft as BaseFileUpload,ue as FilePreview,$t as default}; -//# sourceMappingURL=Index-c1adbfd9.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/dsv-576afacd.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/dsv-576afacd.js deleted file mode 100644 index 832d450961d23fb14b577c045f0c24c61e74c4e6..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/dsv-576afacd.js +++ /dev/null @@ -1,6 +0,0 @@ -var D={},A={},E=34,m=10,R=13;function I(r){return new Function("d","return {"+r.map(function(t,e){return JSON.stringify(t)+": d["+e+'] || ""'}).join(",")+"}")}function B(r,t){var e=I(r);return function(a,c){return t(e(a),c,r)}}function F(r){var t=Object.create(null),e=[];return r.forEach(function(a){for(var c in a)c in t||e.push(t[c]=c)}),e}function f(r,t){var e=r+"",a=e.length;return a9999?"+"+f(r,6):f(r,4)}function S(r){var t=r.getUTCHours(),e=r.getUTCMinutes(),a=r.getUTCSeconds(),c=r.getUTCMilliseconds();return isNaN(r)?"Invalid Date":L(r.getUTCFullYear())+"-"+f(r.getUTCMonth()+1,2)+"-"+f(r.getUTCDate(),2)+(c?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"."+f(c,3)+"Z":a?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"Z":e||t?"T"+f(t,2)+":"+f(e,2)+"Z":"")}function Z(r){var t=new RegExp('["'+r+` -\r]`),e=r.charCodeAt(0);function a(n,o){var s,i,u=c(n,function(h,l){if(s)return s(h,l-1);i=h,s=o?B(h,o):I(h)});return u.columns=i||[],u}function c(n,o){var s=[],i=n.length,u=0,h=0,l,v=i<=0,C=!1;n.charCodeAt(i-1)===m&&--i,n.charCodeAt(i-1)===R&&--i;function w(){if(v)return A;if(C)return C=!1,D;var j,d=u,p;if(n.charCodeAt(d)===E){for(;u++=i?v=!0:(p=n.charCodeAt(u++))===m?C=!0:p===R&&(C=!0,n.charCodeAt(u)===m&&++u),n.slice(d+1,j-1).replace(/""/g,'"')}for(;u`_. - - As a rough estimate, a sliding window approach with an input size of `N` - and a window size of `W` will scale as `O(N*W)` where frequently a special - algorithm can achieve `O(N)`. That means that the sliding window variant - for a window size of 100 can be a 100 times slower than a more specialized - version. - - Nevertheless, for small window sizes, when no custom algorithm exists, or - as a prototyping and developing tool, this function can be a good solution. - - Examples - -------- - >>> x = np.arange(6) - >>> x.shape - (6,) - >>> v = sliding_window_view(x, 3) - >>> v.shape - (4, 3) - >>> v - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4], - [3, 4, 5]]) - - This also works in more dimensions, e.g. - - >>> i, j = np.ogrid[:3, :4] - >>> x = 10*i + j - >>> x.shape - (3, 4) - >>> x - array([[ 0, 1, 2, 3], - [10, 11, 12, 13], - [20, 21, 22, 23]]) - >>> shape = (2,2) - >>> v = sliding_window_view(x, shape) - >>> v.shape - (2, 3, 2, 2) - >>> v - array([[[[ 0, 1], - [10, 11]], - [[ 1, 2], - [11, 12]], - [[ 2, 3], - [12, 13]]], - [[[10, 11], - [20, 21]], - [[11, 12], - [21, 22]], - [[12, 13], - [22, 23]]]]) - - The axis can be specified explicitly: - - >>> v = sliding_window_view(x, 3, 0) - >>> v.shape - (1, 4, 3) - >>> v - array([[[ 0, 10, 20], - [ 1, 11, 21], - [ 2, 12, 22], - [ 3, 13, 23]]]) - - The same axis can be used several times. In that case, every use reduces - the corresponding original dimension: - - >>> v = sliding_window_view(x, (2, 3), (1, 1)) - >>> v.shape - (3, 1, 2, 3) - >>> v - array([[[[ 0, 1, 2], - [ 1, 2, 3]]], - [[[10, 11, 12], - [11, 12, 13]]], - [[[20, 21, 22], - [21, 22, 23]]]]) - - Combining with stepped slicing (`::step`), this can be used to take sliding - views which skip elements: - - >>> x = np.arange(7) - >>> sliding_window_view(x, 5)[:, ::2] - array([[0, 2, 4], - [1, 3, 5], - [2, 4, 6]]) - - or views which move by multiple elements - - >>> x = np.arange(7) - >>> sliding_window_view(x, 3)[::2, :] - array([[0, 1, 2], - [2, 3, 4], - [4, 5, 6]]) - - A common application of `sliding_window_view` is the calculation of running - statistics. The simplest example is the - `moving average `_: - - >>> x = np.arange(6) - >>> x.shape - (6,) - >>> v = sliding_window_view(x, 3) - >>> v.shape - (4, 3) - >>> v - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4], - [3, 4, 5]]) - >>> moving_average = v.mean(axis=-1) - >>> moving_average - array([1., 2., 3., 4.]) - - Note that a sliding window approach is often **not** optimal (see Notes). - """ - window_shape = (tuple(window_shape) - if np.iterable(window_shape) - else (window_shape,)) - # first convert input to array, possibly keeping subclass - x = np.array(x, copy=False, subok=subok) - - window_shape_array = np.array(window_shape) - if np.any(window_shape_array < 0): - raise ValueError('`window_shape` cannot contain negative values') - - if axis is None: - axis = tuple(range(x.ndim)) - if len(window_shape) != len(axis): - raise ValueError(f'Since axis is `None`, must provide ' - f'window_shape for all dimensions of `x`; ' - f'got {len(window_shape)} window_shape elements ' - f'and `x.ndim` is {x.ndim}.') - else: - axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) - if len(window_shape) != len(axis): - raise ValueError(f'Must provide matching length window_shape and ' - f'axis; got {len(window_shape)} window_shape ' - f'elements and {len(axis)} axes elements.') - - out_strides = x.strides + tuple(x.strides[ax] for ax in axis) - - # note: same axis can be windowed repeatedly - x_shape_trimmed = list(x.shape) - for ax, dim in zip(axis, window_shape): - if x_shape_trimmed[ax] < dim: - raise ValueError( - 'window shape cannot be larger than input array shape') - x_shape_trimmed[ax] -= dim - 1 - out_shape = tuple(x_shape_trimmed) + window_shape - return as_strided(x, strides=out_strides, shape=out_shape, - subok=subok, writeable=writeable) - - -def _broadcast_to(array, shape, subok, readonly): - shape = tuple(shape) if np.iterable(shape) else (shape,) - array = np.array(array, copy=False, subok=subok) - if not shape and array.shape: - raise ValueError('cannot broadcast a non-scalar to a scalar array') - if any(size < 0 for size in shape): - raise ValueError('all elements of broadcast shape must be non-' - 'negative') - extras = [] - it = np.nditer( - (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, - op_flags=['readonly'], itershape=shape, order='C') - with it: - # never really has writebackifcopy semantics - broadcast = it.itviews[0] - result = _maybe_view_as_subclass(array, broadcast) - # In a future version this will go away - if not readonly and array.flags._writeable_no_warn: - result.flags.writeable = True - result.flags._warn_on_write = True - return result - - -def _broadcast_to_dispatcher(array, shape, subok=None): - return (array,) - - -@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') -def broadcast_to(array, shape, subok=False): - """Broadcast an array to a new shape. - - Parameters - ---------- - array : array_like - The array to broadcast. - shape : tuple or int - The shape of the desired array. A single integer ``i`` is interpreted - as ``(i,)``. - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - - Returns - ------- - broadcast : array - A readonly view on the original array with the given shape. It is - typically not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. - - Raises - ------ - ValueError - If the array is not compatible with the new shape according to NumPy's - broadcasting rules. - - See Also - -------- - broadcast - broadcast_arrays - broadcast_shapes - - Notes - ----- - .. versionadded:: 1.10.0 - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> np.broadcast_to(x, (3, 3)) - array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]) - """ - return _broadcast_to(array, shape, subok=subok, readonly=True) - - -def _broadcast_shape(*args): - """Returns the shape of the arrays that would result from broadcasting the - supplied arrays against each other. - """ - # use the old-iterator because np.nditer does not handle size 0 arrays - # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): - # ironically, np.broadcast does not properly handle np.broadcast - # objects (it treats them as scalars) - # use broadcasting to avoid allocating the full array - b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) - return b.shape - - -@set_module('numpy') -def broadcast_shapes(*args): - """ - Broadcast the input shapes into a single shape. - - :ref:`Learn more about broadcasting here `. - - .. versionadded:: 1.20.0 - - Parameters - ---------- - `*args` : tuples of ints, or ints - The shapes to be broadcast against each other. - - Returns - ------- - tuple - Broadcasted shape. - - Raises - ------ - ValueError - If the shapes are not compatible and cannot be broadcast according - to NumPy's broadcasting rules. - - See Also - -------- - broadcast - broadcast_arrays - broadcast_to - - Examples - -------- - >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) - (3, 2) - - >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) - (5, 6, 7) - """ - arrays = [np.empty(x, dtype=[]) for x in args] - return _broadcast_shape(*arrays) - - -def _broadcast_arrays_dispatcher(*args, subok=None): - return args - - -@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') -def broadcast_arrays(*args, subok=False): - """ - Broadcast any number of arrays against each other. - - Parameters - ---------- - `*args` : array_likes - The arrays to broadcast. - - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned arrays will be forced to be a base-class array (default). - - Returns - ------- - broadcasted : list of arrays - These arrays are views on the original arrays. They are typically - not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. If you need - to write to the arrays, make copies first. While you can set the - ``writable`` flag True, writing to a single output value may end up - changing more than one location in the output array. - - .. deprecated:: 1.17 - The output is currently marked so that if written to, a deprecation - warning will be emitted. A future version will set the - ``writable`` flag False so writing to it will raise an error. - - See Also - -------- - broadcast - broadcast_to - broadcast_shapes - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> y = np.array([[4],[5]]) - >>> np.broadcast_arrays(x, y) - [array([[1, 2, 3], - [1, 2, 3]]), array([[4, 4, 4], - [5, 5, 5]])] - - Here is a useful idiom for getting contiguous copies instead of - non-contiguous views. - - >>> [np.array(a) for a in np.broadcast_arrays(x, y)] - [array([[1, 2, 3], - [1, 2, 3]]), array([[4, 4, 4], - [5, 5, 5]])] - - """ - # nditer is not used here to avoid the limit of 32 arrays. - # Otherwise, something like the following one-liner would suffice: - # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], - # order='C').itviews - - args = [np.array(_m, copy=False, subok=subok) for _m in args] - - shape = _broadcast_shape(*args) - - if all(array.shape == shape for array in args): - # Common case where nothing needs to be broadcasted. - return args - - return [_broadcast_to(array, shape, subok=subok, readonly=False) - for array in args] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/methods/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/methods/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/json/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/json/__init__.py deleted file mode 100644 index ff19cf6e9d4cccbeeda07fbaca7f23e37a45924b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/json/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from pandas.io.json._json import ( - read_json, - to_json, - ujson_dumps as dumps, - ujson_loads as loads, -) -from pandas.io.json._table_schema import build_table_schema - -__all__ = [ - "dumps", - "loads", - "read_json", - "to_json", - "build_table_schema", -] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py deleted file mode 100644 index 7064e9e7993f8cd14420bb3101c084923c13c4e7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py +++ /dev/null @@ -1,47 +0,0 @@ -import pytest - -from pandas import ( - DatetimeIndex, - date_range, -) -import pandas._testing as tm - - -@pytest.mark.parametrize("tz", [None, "Asia/Shanghai", "Europe/Berlin"]) -@pytest.mark.parametrize("name", [None, "my_dti"]) -@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) -def test_dti_snap(name, tz, unit): - dti = DatetimeIndex( - [ - "1/1/2002", - "1/2/2002", - "1/3/2002", - "1/4/2002", - "1/5/2002", - "1/6/2002", - "1/7/2002", - ], - name=name, - tz=tz, - freq="D", - ) - dti = dti.as_unit(unit) - - result = dti.snap(freq="W-MON") - expected = date_range("12/31/2001", "1/7/2002", name=name, tz=tz, freq="w-mon") - expected = expected.repeat([3, 4]) - expected = expected.as_unit(unit) - tm.assert_index_equal(result, expected) - assert result.tz == expected.tz - assert result.freq is None - assert expected.freq is None - - result = dti.snap(freq="B") - - expected = date_range("1/1/2002", "1/7/2002", name=name, tz=tz, freq="b") - expected = expected.repeat([1, 1, 1, 2, 2]) - expected = expected.as_unit(unit) - tm.assert_index_equal(result, expected) - assert result.tz == expected.tz - assert result.freq is None - assert expected.freq is None diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py deleted file mode 100644 index 751f9e4cc9eeee873da647af213b9c9670266cef..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py +++ /dev/null @@ -1,93 +0,0 @@ -import pytest - -import pandas as pd -from pandas import ( - Series, - TimedeltaIndex, -) - - -class TestTimedeltaIndexRendering: - @pytest.mark.parametrize("method", ["__repr__", "__str__"]) - def test_representation(self, method): - idx1 = TimedeltaIndex([], freq="D") - idx2 = TimedeltaIndex(["1 days"], freq="D") - idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") - idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") - idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) - - exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')" - - exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')" - - exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')" - - exp4 = ( - "TimedeltaIndex(['1 days', '2 days', '3 days'], " - "dtype='timedelta64[ns]', freq='D')" - ) - - exp5 = ( - "TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " - "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)" - ) - - with pd.option_context("display.width", 300): - for idx, expected in zip( - [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] - ): - result = getattr(idx, method)() - assert result == expected - - def test_representation_to_series(self): - idx1 = TimedeltaIndex([], freq="D") - idx2 = TimedeltaIndex(["1 days"], freq="D") - idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") - idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") - idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) - - exp1 = """Series([], dtype: timedelta64[ns])""" - - exp2 = "0 1 days\ndtype: timedelta64[ns]" - - exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]" - - exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]" - - exp5 = ( - "0 1 days 00:00:01\n" - "1 2 days 00:00:00\n" - "2 3 days 00:00:00\n" - "dtype: timedelta64[ns]" - ) - - with pd.option_context("display.width", 300): - for idx, expected in zip( - [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] - ): - result = repr(Series(idx)) - assert result == expected - - def test_summary(self): - # GH#9116 - idx1 = TimedeltaIndex([], freq="D") - idx2 = TimedeltaIndex(["1 days"], freq="D") - idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") - idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") - idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) - - exp1 = "TimedeltaIndex: 0 entries\nFreq: D" - - exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D" - - exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D" - - exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D" - - exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00" - - for idx, expected in zip( - [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] - ): - result = idx._summary() - assert result == expected diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/requests/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/requests/__init__.py deleted file mode 100644 index 75a633bf9dc81ebb94775cb810d91d2b3cf48190..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/requests/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# __ -# /__) _ _ _ _ _/ _ -# / ( (- (/ (/ (- _) / _) -# / - -""" -Requests HTTP Library -~~~~~~~~~~~~~~~~~~~~~ - -Requests is an HTTP library, written in Python, for human beings. -Basic GET usage: - - >>> import requests - >>> r = requests.get('https://www.python.org') - >>> r.status_code - 200 - >>> b'Python is a programming language' in r.content - True - -... or POST: - - >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('https://httpbin.org/post', data=payload) - >>> print(r.text) - { - ... - "form": { - "key1": "value1", - "key2": "value2" - }, - ... - } - -The other HTTP methods are supported - see `requests.api`. Full documentation -is at . - -:copyright: (c) 2017 by Kenneth Reitz. -:license: Apache 2.0, see LICENSE for more details. -""" - -from pip._vendor import urllib3 -import warnings -from .exceptions import RequestsDependencyWarning - -charset_normalizer_version = None - -try: - from pip._vendor.chardet import __version__ as chardet_version -except ImportError: - chardet_version = None - -def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. - - # Sometimes, urllib3 only reports its version as 16.1. - if len(urllib3_version) == 2: - urllib3_version.append('0') - - # Check urllib3 for compatibility. - major, minor, patch = urllib3_version # noqa: F811 - major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.26 - assert major == 1 - assert minor >= 21 - assert minor <= 26 - - # Check charset_normalizer for compatibility. - if chardet_version: - major, minor, patch = chardet_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet_version >= 3.0.2, < 5.0.0 - assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0) - elif charset_normalizer_version: - major, minor, patch = charset_normalizer_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # charset_normalizer >= 2.0.0 < 3.0.0 - assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0) - else: - raise Exception("You need either charset_normalizer or chardet installed") - -def _check_cryptography(cryptography_version): - # cryptography < 1.3.4 - try: - cryptography_version = list(map(int, cryptography_version.split('.'))) - except ValueError: - return - - if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) - warnings.warn(warning, RequestsDependencyWarning) - -# Check imported dependencies for compatibility. -try: - check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version) -except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version), - RequestsDependencyWarning) - -# Attempt to enable urllib3's fallback for SNI support -# if the standard library doesn't support SNI or the -# 'ssl' library isn't available. -try: - # Note: This logic prevents upgrading cryptography on Windows, if imported - # as part of pip. - from pip._internal.utils.compat import WINDOWS - if not WINDOWS: - raise ImportError("pip internals: don't import cryptography on Windows") - try: - import ssl - except ImportError: - ssl = None - - if not getattr(ssl, "HAS_SNI", False): - from pip._vendor.urllib3.contrib import pyopenssl - pyopenssl.inject_into_urllib3() - - # Check cryptography version - from cryptography import __version__ as cryptography_version - _check_cryptography(cryptography_version) -except ImportError: - pass - -# urllib3's DependencyWarnings should be silenced. -from pip._vendor.urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ - -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError -) - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -logging.getLogger(__name__).addHandler(NullHandler()) - -# FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_csound_builtins.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_csound_builtins.py deleted file mode 100644 index 5f37306843221c4e01f078002ca8f9468a16d773..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_csound_builtins.py +++ /dev/null @@ -1,1780 +0,0 @@ -""" - pygments.lexers._csound_builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -REMOVED_OPCODES = set(''' -OSCsendA -beadsynt -beosc -buchla -getrowlin -lua_exec -lua_iaopcall -lua_iaopcall_off -lua_ikopcall -lua_ikopcall_off -lua_iopcall -lua_iopcall_off -lua_opdef -mp3scal_check -mp3scal_load -mp3scal_load2 -mp3scal_play -mp3scal_play2 -pvsgendy -socksend_k -signalflowgraph -sumTableFilter -systime -tabrowlin -vbap1move -'''.split()) - -# Opcodes in Csound 6.18.0 using: -# python3 -c " -# import re -# from subprocess import Popen, PIPE -# output = Popen(['csound', '--list-opcodes0'], stderr=PIPE, text=True).communicate()[1] -# opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() -# output = Popen(['csound', '--list-opcodes2'], stderr=PIPE, text=True).communicate()[1] -# all_opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() -# deprecated_opcodes = [opcode for opcode in all_opcodes if opcode not in opcodes] -# # Remove opcodes that csound.py treats as keywords. -# keyword_opcodes = [ -# 'cggoto', # https://csound.com/docs/manual/cggoto.html -# 'cigoto', # https://csound.com/docs/manual/cigoto.html -# 'cingoto', # (undocumented) -# 'ckgoto', # https://csound.com/docs/manual/ckgoto.html -# 'cngoto', # https://csound.com/docs/manual/cngoto.html -# 'cnkgoto', # (undocumented) -# 'endin', # https://csound.com/docs/manual/endin.html -# 'endop', # https://csound.com/docs/manual/endop.html -# 'goto', # https://csound.com/docs/manual/goto.html -# 'igoto', # https://csound.com/docs/manual/igoto.html -# 'instr', # https://csound.com/docs/manual/instr.html -# 'kgoto', # https://csound.com/docs/manual/kgoto.html -# 'loop_ge', # https://csound.com/docs/manual/loop_ge.html -# 'loop_gt', # https://csound.com/docs/manual/loop_gt.html -# 'loop_le', # https://csound.com/docs/manual/loop_le.html -# 'loop_lt', # https://csound.com/docs/manual/loop_lt.html -# 'opcode', # https://csound.com/docs/manual/opcode.html -# 'reinit', # https://csound.com/docs/manual/reinit.html -# 'return', # https://csound.com/docs/manual/return.html -# 'rireturn', # https://csound.com/docs/manual/rireturn.html -# 'rigoto', # https://csound.com/docs/manual/rigoto.html -# 'tigoto', # https://csound.com/docs/manual/tigoto.html -# 'timout' # https://csound.com/docs/manual/timout.html -# ] -# opcodes = [opcode for opcode in opcodes if opcode not in keyword_opcodes] -# newline = '\n' -# print(f'''OPCODES = set(\''' -# {newline.join(opcodes)} -# \'''.split()) -# -# DEPRECATED_OPCODES = set(\''' -# {newline.join(deprecated_opcodes)} -# \'''.split()) -# ''') -# " - -OPCODES = set(''' -ATSadd -ATSaddnz -ATSbufread -ATScross -ATSinfo -ATSinterpread -ATSpartialtap -ATSread -ATSreadnz -ATSsinnoi -FLbox -FLbutBank -FLbutton -FLcloseButton -FLcolor -FLcolor2 -FLcount -FLexecButton -FLgetsnap -FLgroup -FLgroupEnd -FLgroup_end -FLhide -FLhvsBox -FLhvsBoxSetValue -FLjoy -FLkeyIn -FLknob -FLlabel -FLloadsnap -FLmouse -FLpack -FLpackEnd -FLpack_end -FLpanel -FLpanelEnd -FLpanel_end -FLprintk -FLprintk2 -FLroller -FLrun -FLsavesnap -FLscroll -FLscrollEnd -FLscroll_end -FLsetAlign -FLsetBox -FLsetColor -FLsetColor2 -FLsetFont -FLsetPosition -FLsetSize -FLsetSnapGroup -FLsetText -FLsetTextColor -FLsetTextSize -FLsetTextType -FLsetVal -FLsetVal_i -FLsetVali -FLsetsnap -FLshow -FLslidBnk -FLslidBnk2 -FLslidBnk2Set -FLslidBnk2Setk -FLslidBnkGetHandle -FLslidBnkSet -FLslidBnkSetk -FLslider -FLtabs -FLtabsEnd -FLtabs_end -FLtext -FLupdate -FLvalue -FLvkeybd -FLvslidBnk -FLvslidBnk2 -FLxyin -JackoAudioIn -JackoAudioInConnect -JackoAudioOut -JackoAudioOutConnect -JackoFreewheel -JackoInfo -JackoInit -JackoMidiInConnect -JackoMidiOut -JackoMidiOutConnect -JackoNoteOut -JackoOn -JackoTransport -K35_hpf -K35_lpf -MixerClear -MixerGetLevel -MixerReceive -MixerSend -MixerSetLevel -MixerSetLevel_i -OSCbundle -OSCcount -OSCinit -OSCinitM -OSClisten -OSCraw -OSCsend -OSCsend_lo -S -STKBandedWG -STKBeeThree -STKBlowBotl -STKBlowHole -STKBowed -STKBrass -STKClarinet -STKDrummer -STKFMVoices -STKFlute -STKHevyMetl -STKMandolin -STKModalBar -STKMoog -STKPercFlut -STKPlucked -STKResonate -STKRhodey -STKSaxofony -STKShakers -STKSimple -STKSitar -STKStifKarp -STKTubeBell -STKVoicForm -STKWhistle -STKWurley -a -abs -active -adsr -adsyn -adsynt -adsynt2 -aftouch -allpole -alpass -alwayson -ampdb -ampdbfs -ampmidi -ampmidicurve -ampmidid -apoleparams -arduinoRead -arduinoReadF -arduinoStart -arduinoStop -areson -aresonk -atone -atonek -atonex -autocorr -babo -balance -balance2 -bamboo -barmodel -bbcutm -bbcuts -betarand -bexprnd -bformdec1 -bformdec2 -bformenc1 -binit -biquad -biquada -birnd -bob -bpf -bpfcos -bqrez -butbp -butbr -buthp -butlp -butterbp -butterbr -butterhp -butterlp -button -buzz -c2r -cabasa -cauchy -cauchyi -cbrt -ceil -cell -cent -centroid -ceps -cepsinv -chanctrl -changed -changed2 -chani -chano -chebyshevpoly -checkbox -chn_S -chn_a -chn_k -chnclear -chnexport -chnget -chngeta -chngeti -chngetk -chngetks -chngets -chnmix -chnparams -chnset -chnseta -chnseti -chnsetk -chnsetks -chnsets -chuap -clear -clfilt -clip -clockoff -clockon -cmp -cmplxprod -cntCreate -cntCycles -cntDelete -cntDelete_i -cntRead -cntReset -cntState -comb -combinv -compilecsd -compileorc -compilestr -compress -compress2 -connect -control -convle -convolve -copya2ftab -copyf2array -cos -cosh -cosinv -cosseg -cossegb -cossegr -count -count_i -cps2pch -cpsmidi -cpsmidib -cpsmidinn -cpsoct -cpspch -cpstmid -cpstun -cpstuni -cpsxpch -cpumeter -cpuprc -cross2 -crossfm -crossfmi -crossfmpm -crossfmpmi -crosspm -crosspmi -crunch -ctlchn -ctrl14 -ctrl21 -ctrl7 -ctrlinit -ctrlpreset -ctrlprint -ctrlprintpresets -ctrlsave -ctrlselect -cuserrnd -dam -date -dates -db -dbamp -dbfsamp -dcblock -dcblock2 -dconv -dct -dctinv -deinterleave -delay -delay1 -delayk -delayr -delayw -deltap -deltap3 -deltapi -deltapn -deltapx -deltapxw -denorm -diff -diode_ladder -directory -diskgrain -diskin -diskin2 -dispfft -display -distort -distort1 -divz -doppler -dot -downsamp -dripwater -dssiactivate -dssiaudio -dssictls -dssiinit -dssilist -dumpk -dumpk2 -dumpk3 -dumpk4 -duserrnd -dust -dust2 -elapsedcycles -elapsedtime -envlpx -envlpxr -ephasor -eqfil -evalstr -event -event_i -eventcycles -eventtime -exciter -exitnow -exp -expcurve -expon -exprand -exprandi -expseg -expsega -expsegb -expsegba -expsegr -fareylen -fareyleni -faustaudio -faustcompile -faustctl -faustdsp -faustgen -faustplay -fft -fftinv -ficlose -filebit -filelen -filenchnls -filepeak -filescal -filesr -filevalid -fillarray -filter2 -fin -fini -fink -fiopen -flanger -flashtxt -flooper -flooper2 -floor -fluidAllOut -fluidCCi -fluidCCk -fluidControl -fluidEngine -fluidInfo -fluidLoad -fluidNote -fluidOut -fluidProgramSelect -fluidSetInterpMethod -fmanal -fmax -fmb3 -fmbell -fmin -fmmetal -fmod -fmpercfl -fmrhode -fmvoice -fmwurlie -fof -fof2 -fofilter -fog -fold -follow -follow2 -foscil -foscili -fout -fouti -foutir -foutk -fprintks -fprints -frac -fractalnoise -framebuffer -freeverb -ftaudio -ftchnls -ftconv -ftcps -ftexists -ftfree -ftgen -ftgenonce -ftgentmp -ftlen -ftload -ftloadk -ftlptim -ftmorf -ftom -ftprint -ftresize -ftresizei -ftsamplebank -ftsave -ftsavek -ftset -ftslice -ftslicei -ftsr -gain -gainslider -gauss -gaussi -gausstrig -gbuzz -genarray -genarray_i -gendy -gendyc -gendyx -getcfg -getcol -getftargs -getrow -getseed -gogobel -grain -grain2 -grain3 -granule -gtadsr -gtf -guiro -harmon -harmon2 -harmon3 -harmon4 -hdf5read -hdf5write -hilbert -hilbert2 -hrtfearly -hrtfmove -hrtfmove2 -hrtfreverb -hrtfstat -hsboscil -hvs1 -hvs2 -hvs3 -hypot -i -ihold -imagecreate -imagefree -imagegetpixel -imageload -imagesave -imagesetpixel -imagesize -in -in32 -inch -inh -init -initc14 -initc21 -initc7 -inleta -inletf -inletk -inletkid -inletv -ino -inq -inrg -ins -insglobal -insremot -int -integ -interleave -interp -invalue -inx -inz -jacktransport -jitter -jitter2 -joystick -jspline -k -la_i_add_mc -la_i_add_mr -la_i_add_vc -la_i_add_vr -la_i_assign_mc -la_i_assign_mr -la_i_assign_t -la_i_assign_vc -la_i_assign_vr -la_i_conjugate_mc -la_i_conjugate_mr -la_i_conjugate_vc -la_i_conjugate_vr -la_i_distance_vc -la_i_distance_vr -la_i_divide_mc -la_i_divide_mr -la_i_divide_vc -la_i_divide_vr -la_i_dot_mc -la_i_dot_mc_vc -la_i_dot_mr -la_i_dot_mr_vr -la_i_dot_vc -la_i_dot_vr -la_i_get_mc -la_i_get_mr -la_i_get_vc -la_i_get_vr -la_i_invert_mc -la_i_invert_mr -la_i_lower_solve_mc -la_i_lower_solve_mr -la_i_lu_det_mc -la_i_lu_det_mr -la_i_lu_factor_mc -la_i_lu_factor_mr -la_i_lu_solve_mc -la_i_lu_solve_mr -la_i_mc_create -la_i_mc_set -la_i_mr_create -la_i_mr_set -la_i_multiply_mc -la_i_multiply_mr -la_i_multiply_vc -la_i_multiply_vr -la_i_norm1_mc -la_i_norm1_mr -la_i_norm1_vc -la_i_norm1_vr -la_i_norm_euclid_mc -la_i_norm_euclid_mr -la_i_norm_euclid_vc -la_i_norm_euclid_vr -la_i_norm_inf_mc -la_i_norm_inf_mr -la_i_norm_inf_vc -la_i_norm_inf_vr -la_i_norm_max_mc -la_i_norm_max_mr -la_i_print_mc -la_i_print_mr -la_i_print_vc -la_i_print_vr -la_i_qr_eigen_mc -la_i_qr_eigen_mr -la_i_qr_factor_mc -la_i_qr_factor_mr -la_i_qr_sym_eigen_mc -la_i_qr_sym_eigen_mr -la_i_random_mc -la_i_random_mr -la_i_random_vc -la_i_random_vr -la_i_size_mc -la_i_size_mr -la_i_size_vc -la_i_size_vr -la_i_subtract_mc -la_i_subtract_mr -la_i_subtract_vc -la_i_subtract_vr -la_i_t_assign -la_i_trace_mc -la_i_trace_mr -la_i_transpose_mc -la_i_transpose_mr -la_i_upper_solve_mc -la_i_upper_solve_mr -la_i_vc_create -la_i_vc_set -la_i_vr_create -la_i_vr_set -la_k_a_assign -la_k_add_mc -la_k_add_mr -la_k_add_vc -la_k_add_vr -la_k_assign_a -la_k_assign_f -la_k_assign_mc -la_k_assign_mr -la_k_assign_t -la_k_assign_vc -la_k_assign_vr -la_k_conjugate_mc -la_k_conjugate_mr -la_k_conjugate_vc -la_k_conjugate_vr -la_k_current_f -la_k_current_vr -la_k_distance_vc -la_k_distance_vr -la_k_divide_mc -la_k_divide_mr -la_k_divide_vc -la_k_divide_vr -la_k_dot_mc -la_k_dot_mc_vc -la_k_dot_mr -la_k_dot_mr_vr -la_k_dot_vc -la_k_dot_vr -la_k_f_assign -la_k_get_mc -la_k_get_mr -la_k_get_vc -la_k_get_vr -la_k_invert_mc -la_k_invert_mr -la_k_lower_solve_mc -la_k_lower_solve_mr -la_k_lu_det_mc -la_k_lu_det_mr -la_k_lu_factor_mc -la_k_lu_factor_mr -la_k_lu_solve_mc -la_k_lu_solve_mr -la_k_mc_set -la_k_mr_set -la_k_multiply_mc -la_k_multiply_mr -la_k_multiply_vc -la_k_multiply_vr -la_k_norm1_mc -la_k_norm1_mr -la_k_norm1_vc -la_k_norm1_vr -la_k_norm_euclid_mc -la_k_norm_euclid_mr -la_k_norm_euclid_vc -la_k_norm_euclid_vr -la_k_norm_inf_mc -la_k_norm_inf_mr -la_k_norm_inf_vc -la_k_norm_inf_vr -la_k_norm_max_mc -la_k_norm_max_mr -la_k_qr_eigen_mc -la_k_qr_eigen_mr -la_k_qr_factor_mc -la_k_qr_factor_mr -la_k_qr_sym_eigen_mc -la_k_qr_sym_eigen_mr -la_k_random_mc -la_k_random_mr -la_k_random_vc -la_k_random_vr -la_k_subtract_mc -la_k_subtract_mr -la_k_subtract_vc -la_k_subtract_vr -la_k_t_assign -la_k_trace_mc -la_k_trace_mr -la_k_upper_solve_mc -la_k_upper_solve_mr -la_k_vc_set -la_k_vr_set -lag -lagud -lastcycle -lenarray -lfo -lfsr -limit -limit1 -lincos -line -linen -linenr -lineto -link_beat_force -link_beat_get -link_beat_request -link_create -link_enable -link_is_enabled -link_metro -link_peers -link_tempo_get -link_tempo_set -linlin -linrand -linseg -linsegb -linsegr -liveconv -locsend -locsig -log -log10 -log2 -logbtwo -logcurve -loopseg -loopsegp -looptseg -loopxseg -lorenz -loscil -loscil3 -loscil3phs -loscilphs -loscilx -lowpass2 -lowres -lowresx -lpcanal -lpcfilter -lpf18 -lpform -lpfreson -lphasor -lpinterp -lposcil -lposcil3 -lposcila -lposcilsa -lposcilsa2 -lpread -lpreson -lpshold -lpsholdp -lpslot -lufs -mac -maca -madsr -mags -mandel -mandol -maparray -maparray_i -marimba -massign -max -max_k -maxabs -maxabsaccum -maxaccum -maxalloc -maxarray -mclock -mdelay -median -mediank -metro -metro2 -metrobpm -mfb -midglobal -midiarp -midic14 -midic21 -midic7 -midichannelaftertouch -midichn -midicontrolchange -midictrl -mididefault -midifilestatus -midiin -midinoteoff -midinoteoncps -midinoteonkey -midinoteonoct -midinoteonpch -midion -midion2 -midiout -midiout_i -midipgm -midipitchbend -midipolyaftertouch -midiprogramchange -miditempo -midremot -min -minabs -minabsaccum -minaccum -minarray -mincer -mirror -mode -modmatrix -monitor -moog -moogladder -moogladder2 -moogvcf -moogvcf2 -moscil -mp3bitrate -mp3in -mp3len -mp3nchnls -mp3out -mp3scal -mp3sr -mpulse -mrtmsg -ms2st -mtof -mton -multitap -mute -mvchpf -mvclpf1 -mvclpf2 -mvclpf3 -mvclpf4 -mvmfilter -mxadsr -nchnls_hw -nestedap -nlalp -nlfilt -nlfilt2 -noise -noteoff -noteon -noteondur -noteondur2 -notnum -nreverb -nrpn -nsamp -nstance -nstrnum -nstrstr -ntof -ntom -ntrpol -nxtpow2 -octave -octcps -octmidi -octmidib -octmidinn -octpch -olabuffer -oscbnk -oscil -oscil1 -oscil1i -oscil3 -oscili -oscilikt -osciliktp -oscilikts -osciln -oscils -oscilx -out -out32 -outall -outc -outch -outh -outiat -outic -outic14 -outipat -outipb -outipc -outkat -outkc -outkc14 -outkpat -outkpb -outkpc -outleta -outletf -outletk -outletkid -outletv -outo -outq -outq1 -outq2 -outq3 -outq4 -outrg -outs -outs1 -outs2 -outvalue -outx -outz -p -p5gconnect -p5gdata -pan -pan2 -pareq -part2txt -partials -partikkel -partikkelget -partikkelset -partikkelsync -passign -paulstretch -pcauchy -pchbend -pchmidi -pchmidib -pchmidinn -pchoct -pchtom -pconvolve -pcount -pdclip -pdhalf -pdhalfy -peak -pgmassign -pgmchn -phaser1 -phaser2 -phasor -phasorbnk -phs -pindex -pinker -pinkish -pitch -pitchac -pitchamdf -planet -platerev -plltrack -pluck -poisson -pol2rect -polyaft -polynomial -port -portk -poscil -poscil3 -pow -powershape -powoftwo -pows -prealloc -prepiano -print -print_type -printarray -printf -printf_i -printk -printk2 -printks -printks2 -println -prints -printsk -product -pset -ptablew -ptrack -puts -pvadd -pvbufread -pvcross -pvinterp -pvoc -pvread -pvs2array -pvs2tab -pvsadsyn -pvsanal -pvsarp -pvsbandp -pvsbandr -pvsbandwidth -pvsbin -pvsblur -pvsbuffer -pvsbufread -pvsbufread2 -pvscale -pvscent -pvsceps -pvscfs -pvscross -pvsdemix -pvsdiskin -pvsdisp -pvsenvftw -pvsfilter -pvsfread -pvsfreeze -pvsfromarray -pvsftr -pvsftw -pvsfwrite -pvsgain -pvsgendy -pvshift -pvsifd -pvsin -pvsinfo -pvsinit -pvslock -pvslpc -pvsmaska -pvsmix -pvsmooth -pvsmorph -pvsosc -pvsout -pvspitch -pvstanal -pvstencil -pvstrace -pvsvoc -pvswarp -pvsynth -pwd -pyassign -pyassigni -pyassignt -pycall -pycall1 -pycall1i -pycall1t -pycall2 -pycall2i -pycall2t -pycall3 -pycall3i -pycall3t -pycall4 -pycall4i -pycall4t -pycall5 -pycall5i -pycall5t -pycall6 -pycall6i -pycall6t -pycall7 -pycall7i -pycall7t -pycall8 -pycall8i -pycall8t -pycalli -pycalln -pycallni -pycallt -pyeval -pyevali -pyevalt -pyexec -pyexeci -pyexect -pyinit -pylassign -pylassigni -pylassignt -pylcall -pylcall1 -pylcall1i -pylcall1t -pylcall2 -pylcall2i -pylcall2t -pylcall3 -pylcall3i -pylcall3t -pylcall4 -pylcall4i -pylcall4t -pylcall5 -pylcall5i -pylcall5t -pylcall6 -pylcall6i -pylcall6t -pylcall7 -pylcall7i -pylcall7t -pylcall8 -pylcall8i -pylcall8t -pylcalli -pylcalln -pylcallni -pylcallt -pyleval -pylevali -pylevalt -pylexec -pylexeci -pylexect -pylrun -pylruni -pylrunt -pyrun -pyruni -pyrunt -qinf -qnan -r2c -rand -randc -randh -randi -random -randomh -randomi -rbjeq -readclock -readf -readfi -readk -readk2 -readk3 -readk4 -readks -readscore -readscratch -rect2pol -release -remoteport -remove -repluck -reshapearray -reson -resonbnk -resonk -resonr -resonx -resonxk -resony -resonz -resyn -reverb -reverb2 -reverbsc -rewindscore -rezzy -rfft -rifft -rms -rnd -rnd31 -rndseed -round -rspline -rtclock -s16b14 -s32b14 -samphold -sandpaper -sc_lag -sc_lagud -sc_phasor -sc_trig -scale -scale2 -scalearray -scanhammer -scanmap -scans -scansmap -scantable -scanu -scanu2 -schedkwhen -schedkwhennamed -schedule -schedulek -schedwhen -scoreline -scoreline_i -seed -sekere -select -semitone -sense -sensekey -seqtime -seqtime2 -sequ -sequstate -serialBegin -serialEnd -serialFlush -serialPrint -serialRead -serialWrite -serialWrite_i -setcol -setctrl -setksmps -setrow -setscorepos -sfilist -sfinstr -sfinstr3 -sfinstr3m -sfinstrm -sfload -sflooper -sfpassign -sfplay -sfplay3 -sfplay3m -sfplaym -sfplist -sfpreset -shaker -shiftin -shiftout -signum -sin -sinh -sininv -sinsyn -skf -sleighbells -slicearray -slicearray_i -slider16 -slider16f -slider16table -slider16tablef -slider32 -slider32f -slider32table -slider32tablef -slider64 -slider64f -slider64table -slider64tablef -slider8 -slider8f -slider8table -slider8tablef -sliderKawai -sndloop -sndwarp -sndwarpst -sockrecv -sockrecvs -socksend -socksends -sorta -sortd -soundin -space -spat3d -spat3di -spat3dt -spdist -spf -splitrig -sprintf -sprintfk -spsend -sqrt -squinewave -st2ms -statevar -sterrain -stix -strcat -strcatk -strchar -strchark -strcmp -strcmpk -strcpy -strcpyk -strecv -streson -strfromurl -strget -strindex -strindexk -string2array -strlen -strlenk -strlower -strlowerk -strrindex -strrindexk -strset -strstrip -strsub -strsubk -strtod -strtodk -strtol -strtolk -strupper -strupperk -stsend -subinstr -subinstrinit -sum -sumarray -svfilter -svn -syncgrain -syncloop -syncphasor -system -system_i -tab -tab2array -tab2pvs -tab_i -tabifd -table -table3 -table3kt -tablecopy -tablefilter -tablefilteri -tablegpw -tablei -tableicopy -tableigpw -tableikt -tableimix -tablekt -tablemix -tableng -tablera -tableseg -tableshuffle -tableshufflei -tablew -tablewa -tablewkt -tablexkt -tablexseg -tabmorph -tabmorpha -tabmorphak -tabmorphi -tabplay -tabrec -tabsum -tabw -tabw_i -tambourine -tan -tanh -taninv -taninv2 -tbvcf -tempest -tempo -temposcal -tempoval -timedseq -timeinstk -timeinsts -timek -times -tival -tlineto -tone -tonek -tonex -tradsyn -trandom -transeg -transegb -transegr -trcross -trfilter -trhighest -trigExpseg -trigLinseg -trigexpseg -trigger -trighold -triglinseg -trigphasor -trigseq -trim -trim_i -trirand -trlowest -trmix -trscale -trshift -trsplit -turnoff -turnoff2 -turnoff2_i -turnoff3 -turnon -tvconv -unirand -unwrap -upsamp -urandom -urd -vactrol -vadd -vadd_i -vaddv -vaddv_i -vaget -valpass -vaset -vbap -vbapg -vbapgmove -vbaplsinit -vbapmove -vbapz -vbapzmove -vcella -vclpf -vco -vco2 -vco2ft -vco2ift -vco2init -vcomb -vcopy -vcopy_i -vdel_k -vdelay -vdelay3 -vdelayk -vdelayx -vdelayxq -vdelayxs -vdelayxw -vdelayxwq -vdelayxws -vdivv -vdivv_i -vecdelay -veloc -vexp -vexp_i -vexpseg -vexpv -vexpv_i -vibes -vibr -vibrato -vincr -vlimit -vlinseg -vlowres -vmap -vmirror -vmult -vmult_i -vmultv -vmultv_i -voice -vosim -vphaseseg -vport -vpow -vpow_i -vpowv -vpowv_i -vps -vpvoc -vrandh -vrandi -vsubv -vsubv_i -vtaba -vtabi -vtabk -vtable1k -vtablea -vtablei -vtablek -vtablewa -vtablewi -vtablewk -vtabwa -vtabwi -vtabwk -vwrap -waveset -websocket -weibull -wgbow -wgbowedbar -wgbrass -wgclar -wgflute -wgpluck -wgpluck2 -wguide1 -wguide2 -wiiconnect -wiidata -wiirange -wiisend -window -wrap -writescratch -wterrain -wterrain2 -xadsr -xin -xout -xtratim -xyscale -zacl -zakinit -zamod -zar -zarg -zaw -zawm -zdf_1pole -zdf_1pole_mode -zdf_2pole -zdf_2pole_mode -zdf_ladder -zfilter2 -zir -ziw -ziwm -zkcl -zkmod -zkr -zkw -zkwm -'''.split()) - -DEPRECATED_OPCODES = set(''' -array -bformdec -bformenc -copy2ftab -copy2ttab -hrtfer -ktableseg -lentab -maxtab -mintab -pop -pop_f -ptable -ptable3 -ptablei -ptableiw -push -push_f -scalet -sndload -soundout -soundouts -specaddm -specdiff -specdisp -specfilt -spechist -specptrk -specscal -specsum -spectrum -stack -sumtab -tabgen -tableiw -tabmap -tabmap_i -tabslice -tb0 -tb0_init -tb1 -tb10 -tb10_init -tb11 -tb11_init -tb12 -tb12_init -tb13 -tb13_init -tb14 -tb14_init -tb15 -tb15_init -tb1_init -tb2 -tb2_init -tb3 -tb3_init -tb4 -tb4_init -tb5 -tb5_init -tb6 -tb6_init -tb7 -tb7_init -tb8 -tb8_init -tb9 -tb9_init -vbap16 -vbap4 -vbap4move -vbap8 -vbap8move -xscanmap -xscans -xscansmap -xscanu -xyin -'''.split()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/matlab.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/matlab.py deleted file mode 100644 index 753a6efcf024def8439a80cbf378fdb239d03633..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/matlab.py +++ /dev/null @@ -1,3308 +0,0 @@ -""" - pygments.lexers.matlab - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Matlab and related languages. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, bygroups, default, words, \ - do_insertions, include -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Generic, Whitespace - -from pygments.lexers import _scilab_builtins - -__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer'] - - -class MatlabLexer(RegexLexer): - """ - For Matlab source code. - - .. versionadded:: 0.10 - """ - name = 'Matlab' - aliases = ['matlab'] - filenames = ['*.m'] - mimetypes = ['text/matlab'] - - _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\./|/|\\' - - tokens = { - 'expressions': [ - # operators: - (_operators, Operator), - - # numbers (must come before punctuation to handle `.5`; cannot use - # `\b` due to e.g. `5. + .5`). The negative lookahead on operators - # avoids including the dot in `1./x` (the dot is part of `./`). - (r'(? and then - # (equal | open-parenthesis | | ). - (r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|%s\s|\s)' % _operators, - bygroups(Whitespace, Name, Whitespace), 'commandargs'), - - include('expressions') - ], - 'blockcomment': [ - (r'^\s*%\}', Comment.Multiline, '#pop'), - (r'^.*\n', Comment.Multiline), - (r'.', Comment.Multiline), - ], - 'deffunc': [ - (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', - bygroups(Whitespace, Text, Whitespace, Punctuation, - Whitespace, Name.Function, Punctuation, Text, - Punctuation, Whitespace), '#pop'), - # function with no args - (r'(\s*)([a-zA-Z_]\w*)', - bygroups(Whitespace, Name.Function), '#pop'), - ], - 'propattrs': [ - (r'(\w+)(\s*)(=)(\s*)(\d+)', - bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace, - Number)), - (r'(\w+)(\s*)(=)(\s*)([a-zA-Z]\w*)', - bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace, - Keyword)), - (r',', Punctuation), - (r'\)', Punctuation, '#pop'), - (r'\s+', Whitespace), - (r'.', Text), - ], - 'defprops': [ - (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), - (r'%.*$', Comment), - (r'(?. - - .. versionadded:: 0.10 - """ - name = 'Matlab session' - aliases = ['matlabsession'] - - def get_tokens_unprocessed(self, text): - mlexer = MatlabLexer(**self.options) - - curcode = '' - insertions = [] - continuation = False - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('>> '): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith('>>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:2])])) - curcode += line[2:] - - elif line.startswith('???'): - - idx = len(curcode) - - # without is showing error on same line as before...? - # line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - elif continuation and insertions: - # line_start is the length of the most recent prompt symbol - line_start = len(insertions[-1][-1][-1]) - # Set leading spaces with the length of the prompt to be a generic prompt - # This keeps code aligned when prompts are removed, say with some Javascript - if line.startswith(' '*line_start): - insertions.append( - (len(curcode), [(0, Generic.Prompt, line[:line_start])])) - curcode += line[line_start:] - else: - curcode += line - else: - if curcode: - yield from do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)) - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - # Does not allow continuation if a comment is included after the ellipses. - # Continues any line that ends with ..., even comments (lines that start with %) - if line.strip().endswith('...'): - continuation = True - else: - continuation = False - - if curcode: # or item: - yield from do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)) - - -class OctaveLexer(RegexLexer): - """ - For GNU Octave source code. - - .. versionadded:: 1.5 - """ - name = 'Octave' - url = 'https://www.gnu.org/software/octave/index' - aliases = ['octave'] - filenames = ['*.m'] - mimetypes = ['text/octave'] - - # These lists are generated automatically. - # Run the following in bash shell: - # - # First dump all of the Octave manual into a plain text file: - # - # $ info octave --subnodes -o octave-manual - # - # Now grep through it: - - # for i in \ - # "Built-in Function" "Command" "Function File" \ - # "Loadable Function" "Mapping Function"; - # do - # perl -e '@name = qw('"$i"'); - # print lc($name[0]),"_kw = [\n"'; - # - # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ - # octave-manual | sort | uniq ; - # echo "]" ; - # echo; - # done - - # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) - - builtin_kw = ( - "addlistener", "addpath", "addproperty", "all", - "and", "any", "argnames", "argv", "assignin", - "atexit", "autoload", - "available_graphics_toolkits", "beep_on_error", - "bitand", "bitmax", "bitor", "bitshift", "bitxor", - "cat", "cell", "cellstr", "char", "class", "clc", - "columns", "command_line_path", - "completion_append_char", "completion_matches", - "complex", "confirm_recursive_rmdir", "cputime", - "crash_dumps_octave_core", "ctranspose", "cumprod", - "cumsum", "debug_on_error", "debug_on_interrupt", - "debug_on_warning", "default_save_options", - "dellistener", "diag", "diff", "disp", - "doc_cache_file", "do_string_escapes", "double", - "drawnow", "e", "echo_executing_commands", "eps", - "eq", "errno", "errno_list", "error", "eval", - "evalin", "exec", "exist", "exit", "eye", "false", - "fclear", "fclose", "fcntl", "fdisp", "feof", - "ferror", "feval", "fflush", "fgetl", "fgets", - "fieldnames", "file_in_loadpath", "file_in_path", - "filemarker", "filesep", "find_dir_in_path", - "fixed_point_format", "fnmatch", "fopen", "fork", - "formula", "fprintf", "fputs", "fread", "freport", - "frewind", "fscanf", "fseek", "fskipl", "ftell", - "functions", "fwrite", "ge", "genpath", "get", - "getegid", "getenv", "geteuid", "getgid", - "getpgrp", "getpid", "getppid", "getuid", "glob", - "gt", "gui_mode", "history_control", - "history_file", "history_size", - "history_timestamp_format_string", "home", - "horzcat", "hypot", "ifelse", - "ignore_function_time_stamp", "inferiorto", - "info_file", "info_program", "inline", "input", - "intmax", "intmin", "ipermute", - "is_absolute_filename", "isargout", "isbool", - "iscell", "iscellstr", "ischar", "iscomplex", - "isempty", "isfield", "isfloat", "isglobal", - "ishandle", "isieee", "isindex", "isinteger", - "islogical", "ismatrix", "ismethod", "isnull", - "isnumeric", "isobject", "isreal", - "is_rooted_relative_filename", "issorted", - "isstruct", "isvarname", "kbhit", "keyboard", - "kill", "lasterr", "lasterror", "lastwarn", - "ldivide", "le", "length", "link", "linspace", - "logical", "lstat", "lt", "make_absolute_filename", - "makeinfo_program", "max_recursion_depth", "merge", - "methods", "mfilename", "minus", "mislocked", - "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", - "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", - "munlock", "nargin", "nargout", - "native_float_format", "ndims", "ne", "nfields", - "nnz", "norm", "not", "numel", "nzmax", - "octave_config_info", "octave_core_file_limit", - "octave_core_file_name", - "octave_core_file_options", "ones", "or", - "output_max_field_width", "output_precision", - "page_output_immediately", "page_screen_output", - "path", "pathsep", "pause", "pclose", "permute", - "pi", "pipe", "plus", "popen", "power", - "print_empty_dimensions", "printf", - "print_struct_array_contents", "prod", - "program_invocation_name", "program_name", - "putenv", "puts", "pwd", "quit", "rats", "rdivide", - "readdir", "readlink", "read_readline_init_file", - "realmax", "realmin", "rehash", "rename", - "repelems", "re_read_readline_init_file", "reset", - "reshape", "resize", "restoredefaultpath", - "rethrow", "rmdir", "rmfield", "rmpath", "rows", - "save_header_format_string", "save_precision", - "saving_history", "scanf", "set", "setenv", - "shell_cmd", "sighup_dumps_octave_core", - "sigterm_dumps_octave_core", "silent_functions", - "single", "size", "size_equal", "sizemax", - "sizeof", "sleep", "source", "sparse_auto_mutate", - "split_long_rows", "sprintf", "squeeze", "sscanf", - "stat", "stderr", "stdin", "stdout", "strcmp", - "strcmpi", "string_fill_char", "strncmp", - "strncmpi", "struct", "struct_levels_to_print", - "strvcat", "subsasgn", "subsref", "sum", "sumsq", - "superiorto", "suppress_verbose_help_message", - "symlink", "system", "tic", "tilde_expand", - "times", "tmpfile", "tmpnam", "toc", "toupper", - "transpose", "true", "typeinfo", "umask", "uminus", - "uname", "undo_string_escapes", "unlink", "uplus", - "upper", "usage", "usleep", "vec", "vectorize", - "vertcat", "waitpid", "warning", "warranty", - "whos_line_format", "yes_or_no", "zeros", - "inf", "Inf", "nan", "NaN") - - command_kw = ("close", "load", "who", "whos") - - function_kw = ( - "accumarray", "accumdim", "acosd", "acotd", - "acscd", "addtodate", "allchild", "ancestor", - "anova", "arch_fit", "arch_rnd", "arch_test", - "area", "arma_rnd", "arrayfun", "ascii", "asctime", - "asecd", "asind", "assert", "atand", - "autoreg_matrix", "autumn", "axes", "axis", "bar", - "barh", "bartlett", "bartlett_test", "beep", - "betacdf", "betainv", "betapdf", "betarnd", - "bicgstab", "bicubic", "binary", "binocdf", - "binoinv", "binopdf", "binornd", "bitcmp", - "bitget", "bitset", "blackman", "blanks", - "blkdiag", "bone", "box", "brighten", "calendar", - "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", - "cauchy_rnd", "caxis", "celldisp", "center", "cgs", - "chisquare_test_homogeneity", - "chisquare_test_independence", "circshift", "cla", - "clabel", "clf", "clock", "cloglog", "closereq", - "colon", "colorbar", "colormap", "colperm", - "comet", "common_size", "commutation_matrix", - "compan", "compare_versions", "compass", - "computer", "cond", "condest", "contour", - "contourc", "contourf", "contrast", "conv", - "convhull", "cool", "copper", "copyfile", "cor", - "corrcoef", "cor_test", "cosd", "cotd", "cov", - "cplxpair", "cross", "cscd", "cstrcat", "csvread", - "csvwrite", "ctime", "cumtrapz", "curl", "cut", - "cylinder", "date", "datenum", "datestr", - "datetick", "datevec", "dblquad", "deal", - "deblank", "deconv", "delaunay", "delaunayn", - "delete", "demo", "detrend", "diffpara", "diffuse", - "dir", "discrete_cdf", "discrete_inv", - "discrete_pdf", "discrete_rnd", "display", - "divergence", "dlmwrite", "dos", "dsearch", - "dsearchn", "duplication_matrix", "durbinlevinson", - "ellipsoid", "empirical_cdf", "empirical_inv", - "empirical_pdf", "empirical_rnd", "eomday", - "errorbar", "etime", "etreeplot", "example", - "expcdf", "expinv", "expm", "exppdf", "exprnd", - "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", - "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", - "factorial", "fail", "fcdf", "feather", "fftconv", - "fftfilt", "fftshift", "figure", "fileattrib", - "fileparts", "fill", "findall", "findobj", - "findstr", "finv", "flag", "flipdim", "fliplr", - "flipud", "fpdf", "fplot", "fractdiff", "freqz", - "freqz_plot", "frnd", "fsolve", - "f_test_regression", "ftp", "fullfile", "fzero", - "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", - "gcbf", "gcbo", "gcf", "genvarname", "geocdf", - "geoinv", "geopdf", "geornd", "getfield", "ginput", - "glpk", "gls", "gplot", "gradient", - "graphics_toolkit", "gray", "grid", "griddata", - "griddatan", "gtext", "gunzip", "gzip", "hadamard", - "hamming", "hankel", "hanning", "hggroup", - "hidden", "hilb", "hist", "histc", "hold", "hot", - "hotelling_test", "housh", "hsv", "hurst", - "hygecdf", "hygeinv", "hygepdf", "hygernd", - "idivide", "ifftshift", "image", "imagesc", - "imfinfo", "imread", "imshow", "imwrite", "index", - "info", "inpolygon", "inputname", "interpft", - "interpn", "intersect", "invhilb", "iqr", "isa", - "isdefinite", "isdir", "is_duplicate_entry", - "isequal", "isequalwithequalnans", "isfigure", - "ishermitian", "ishghandle", "is_leap_year", - "isletter", "ismac", "ismember", "ispc", "isprime", - "isprop", "isscalar", "issquare", "isstrprop", - "issymmetric", "isunix", "is_valid_file_id", - "isvector", "jet", "kendall", - "kolmogorov_smirnov_cdf", - "kolmogorov_smirnov_test", "kruskal_wallis_test", - "krylov", "kurtosis", "laplace_cdf", "laplace_inv", - "laplace_pdf", "laplace_rnd", "legend", "legendre", - "license", "line", "linkprop", "list_primes", - "loadaudio", "loadobj", "logistic_cdf", - "logistic_inv", "logistic_pdf", "logistic_rnd", - "logit", "loglog", "loglogerr", "logm", "logncdf", - "logninv", "lognpdf", "lognrnd", "logspace", - "lookfor", "ls_command", "lsqnonneg", "magic", - "mahalanobis", "manova", "matlabroot", - "mcnemar_test", "mean", "meansq", "median", "menu", - "mesh", "meshc", "meshgrid", "meshz", "mexext", - "mget", "mkpp", "mode", "moment", "movefile", - "mpoles", "mput", "namelengthmax", "nargchk", - "nargoutchk", "nbincdf", "nbininv", "nbinpdf", - "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", - "nonzeros", "normcdf", "normest", "norminv", - "normpdf", "normrnd", "now", "nthroot", "null", - "ocean", "ols", "onenormest", "optimget", - "optimset", "orderfields", "orient", "orth", - "pack", "pareto", "parseparams", "pascal", "patch", - "pathdef", "pcg", "pchip", "pcolor", "pcr", - "peaks", "periodogram", "perl", "perms", "pie", - "pink", "planerot", "playaudio", "plot", - "plotmatrix", "plotyy", "poisscdf", "poissinv", - "poisspdf", "poissrnd", "polar", "poly", - "polyaffine", "polyarea", "polyderiv", "polyfit", - "polygcd", "polyint", "polyout", "polyreduce", - "polyval", "polyvalm", "postpad", "powerset", - "ppder", "ppint", "ppjumps", "ppplot", "ppval", - "pqpnonneg", "prepad", "primes", "print", - "print_usage", "prism", "probit", "qp", "qqplot", - "quadcc", "quadgk", "quadl", "quadv", "quiver", - "qzhess", "rainbow", "randi", "range", "rank", - "ranks", "rat", "reallog", "realpow", "realsqrt", - "record", "rectangle_lw", "rectangle_sw", - "rectint", "refresh", "refreshdata", - "regexptranslate", "repmat", "residue", "ribbon", - "rindex", "roots", "rose", "rosser", "rotdim", - "rref", "run", "run_count", "rundemos", "run_test", - "runtests", "saveas", "saveaudio", "saveobj", - "savepath", "scatter", "secd", "semilogx", - "semilogxerr", "semilogy", "semilogyerr", - "setaudio", "setdiff", "setfield", "setxor", - "shading", "shift", "shiftdim", "sign_test", - "sinc", "sind", "sinetone", "sinewave", "skewness", - "slice", "sombrero", "sortrows", "spaugment", - "spconvert", "spdiags", "spearman", "spectral_adf", - "spectral_xdf", "specular", "speed", "spencer", - "speye", "spfun", "sphere", "spinmap", "spline", - "spones", "sprand", "sprandn", "sprandsym", - "spring", "spstats", "spy", "sqp", "stairs", - "statistics", "std", "stdnormal_cdf", - "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", - "stem", "stft", "strcat", "strchr", "strjust", - "strmatch", "strread", "strsplit", "strtok", - "strtrim", "strtrunc", "structfun", "studentize", - "subplot", "subsindex", "subspace", "substr", - "substruct", "summer", "surf", "surface", "surfc", - "surfl", "surfnorm", "svds", "swapbytes", - "sylvester_matrix", "symvar", "synthesis", "table", - "tand", "tar", "tcdf", "tempdir", "tempname", - "test", "text", "textread", "textscan", "tinv", - "title", "toeplitz", "tpdf", "trace", "trapz", - "treelayout", "treeplot", "triangle_lw", - "triangle_sw", "tril", "trimesh", "triplequad", - "triplot", "trisurf", "triu", "trnd", "tsearchn", - "t_test", "t_test_regression", "type", "unidcdf", - "unidinv", "unidpdf", "unidrnd", "unifcdf", - "unifinv", "unifpdf", "unifrnd", "union", "unique", - "unix", "unmkpp", "unpack", "untabify", "untar", - "unwrap", "unzip", "u_test", "validatestring", - "vander", "var", "var_test", "vech", "ver", - "version", "view", "voronoi", "voronoin", - "waitforbuttonpress", "wavread", "wavwrite", - "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", - "welch_test", "what", "white", "whitebg", - "wienrnd", "wilcoxon_test", "wilkinson", "winter", - "xlabel", "xlim", "ylabel", "yulewalker", "zip", - "zlabel", "z_test") - - loadable_kw = ( - "airy", "amd", "balance", "besselh", "besseli", - "besselj", "besselk", "bessely", "bitpack", - "bsxfun", "builtin", "ccolamd", "cellfun", - "cellslices", "chol", "choldelete", "cholinsert", - "cholinv", "cholshift", "cholupdate", "colamd", - "colloc", "convhulln", "convn", "csymamd", - "cummax", "cummin", "daspk", "daspk_options", - "dasrt", "dasrt_options", "dassl", "dassl_options", - "dbclear", "dbdown", "dbstack", "dbstatus", - "dbstop", "dbtype", "dbup", "dbwhere", "det", - "dlmread", "dmperm", "dot", "eig", "eigs", - "endgrent", "endpwent", "etree", "fft", "fftn", - "fftw", "filter", "find", "full", "gcd", - "getgrent", "getgrgid", "getgrnam", "getpwent", - "getpwnam", "getpwuid", "getrusage", "givens", - "gmtime", "gnuplot_binary", "hess", "ifft", - "ifftn", "inv", "isdebugmode", "issparse", "kron", - "localtime", "lookup", "lsode", "lsode_options", - "lu", "luinc", "luupdate", "matrix_type", "max", - "min", "mktime", "pinv", "qr", "qrdelete", - "qrinsert", "qrshift", "qrupdate", "quad", - "quad_options", "qz", "rand", "rande", "randg", - "randn", "randp", "randperm", "rcond", "regexp", - "regexpi", "regexprep", "schur", "setgrent", - "setpwent", "sort", "spalloc", "sparse", "spparms", - "sprank", "sqrtm", "strfind", "strftime", - "strptime", "strrep", "svd", "svd_driver", "syl", - "symamd", "symbfact", "symrcm", "time", "tsearch", - "typecast", "urlread", "urlwrite") - - mapping_kw = ( - "abs", "acos", "acosh", "acot", "acoth", "acsc", - "acsch", "angle", "arg", "asec", "asech", "asin", - "asinh", "atan", "atanh", "beta", "betainc", - "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", - "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", - "erfcx", "erfinv", "exp", "finite", "fix", "floor", - "fmod", "gamma", "gammainc", "gammaln", "imag", - "isalnum", "isalpha", "isascii", "iscntrl", - "isdigit", "isfinite", "isgraph", "isinf", - "islower", "isna", "isnan", "isprint", "ispunct", - "isspace", "isupper", "isxdigit", "lcm", "lgamma", - "log", "lower", "mod", "real", "rem", "round", - "roundb", "sec", "sech", "sign", "sin", "sinh", - "sqrt", "tan", "tanh", "toascii", "tolower", "xor") - - builtin_consts = ( - "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", - "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", - "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", - "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", - "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", - "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", - "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", - "WSTOPSIG", "WTERMSIG", "WUNTRACED") - - tokens = { - 'root': [ - (r'%\{\s*\n', Comment.Multiline, 'percentblockcomment'), - (r'#\{\s*\n', Comment.Multiline, 'hashblockcomment'), - (r'[%#].*$', Comment), - (r'^\s*function\b', Keyword, 'deffunc'), - - # from 'iskeyword' on hg changeset 8cc154f45e37 - (words(( - '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', - 'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch', - 'end_unwind_protect', 'endclassdef', 'endevents', 'endfor', - 'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch', - 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', - 'methods', 'otherwise', 'persistent', 'properties', 'return', - 'set', 'static', 'switch', 'try', 'until', 'unwind_protect', - 'unwind_protect_cleanup', 'while'), suffix=r'\b'), - Keyword), - - (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw, - suffix=r'\b'), Name.Builtin), - - (words(builtin_consts, suffix=r'\b'), Name.Constant), - - # operators in Octave but not Matlab: - (r'-=|!=|!|/=|--', Operator), - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators in Octave but not Matlab requiring escape for re: - (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - - # punctuation: - (r'[\[\](){}:@.,]', Punctuation), - (r'=|:|;', Punctuation), - - (r'"[^"]*"', String), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w)\].])\'+', Operator), - (r'(?|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'[\[\](){}@.,=:;]+', Punctuation), - - (r'"[^"]*"', String), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w)\].])\'+', Operator), - (r'(?DownloadMicrosoftWordCrack

            DOWNLOAD ✏ ✏ ✏ https://geags.com/2uCpX5



            -
            -It is download includes the popular components of MS Word, MS Excel, ... If you are looking for Crack software for Windows 'Click Here Now'. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Driver Apx Samsung P7510 12.md b/spaces/quidiaMuxgu/Expedit-SAM/Driver Apx Samsung P7510 12.md deleted file mode 100644 index 74255c411acf3050bda8d7bab6a8626a5593208c..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Driver Apx Samsung P7510 12.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Driver Apx Samsung P7510 12


            DOWNLOAD >> https://geags.com/2uCr6j



            -
            -http://mattiasolsson.com/editorial/galaxy/photoshop-nokia-5233-apps ... http://mattiasolsson.com/editorial/galaxy/samsung-s8003-mobile-modem-driver ... http://mattiasolsson.com/editorial/galaxy/12-volt-car-charger-for-iphone ... http://mattiasolsson.com/editorial/galaxy/samsung-galaxy-tab-10.1-p7500-vs-p7510 ... 1fdad05405
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Facebook Download Crack VERIFIEDer Free.md b/spaces/quidiaMuxgu/Expedit-SAM/Facebook Download Crack VERIFIEDer Free.md deleted file mode 100644 index a7ed5edf3205aa784e3382d366b298a078892971..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Facebook Download Crack VERIFIEDer Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Facebook Download Cracker Free


            Download File ››› https://geags.com/2uCsZT



            - -Hack any Facebook account password online for free. ... Untuk bisa download password yang katanya sudahUnlike other account hacking methods, HPSâ„¢ works ... Best 7 Facebook Password Crackers and Hackers. com/radio/facegeek. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Jack Fleitman Negocios Exitosos Pdf 12 Free.md b/spaces/quidiaMuxgu/Expedit-SAM/Jack Fleitman Negocios Exitosos Pdf 12 Free.md deleted file mode 100644 index aa41c4782580850525c8bad74f54f4b8b40ff78b..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Jack Fleitman Negocios Exitosos Pdf 12 Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

            jack fleitman negocios exitosos pdf 12


            DOWNLOAD ⚙⚙⚙ https://geags.com/2uCqSg



            - -Come join me Saturday evening at the store for a women's only instore clinic. cdc97e7522. Jack Fleitman Negocios Exitosos Pdf 12 homepage ... 1fdad05405
            -
            -
            -

            diff --git a/spaces/qwe3107231/Real-CUGAN/weights_v3/2233.js b/spaces/qwe3107231/Real-CUGAN/weights_v3/2233.js deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/shufflenetv2.py b/spaces/rachana219/MODT2/trackers/strongsort/deep/models/shufflenetv2.py deleted file mode 100644 index 3ff879e8d731b4cb16a77cfa6892035656405f71..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/shufflenetv2.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Code source: https://github.com/pytorch/vision -""" -from __future__ import division, absolute_import -import torch -import torch.utils.model_zoo as model_zoo -from torch import nn - -__all__ = [ - 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', - 'shufflenet_v2_x2_0' -] - -model_urls = { - 'shufflenetv2_x0.5': - 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth', - 'shufflenetv2_x1.0': - 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth', - 'shufflenetv2_x1.5': None, - 'shufflenetv2_x2.0': None, -} - - -def channel_shuffle(x, groups): - batchsize, num_channels, height, width = x.data.size() - channels_per_group = num_channels // groups - - # reshape - x = x.view(batchsize, groups, channels_per_group, height, width) - - x = torch.transpose(x, 1, 2).contiguous() - - # flatten - x = x.view(batchsize, -1, height, width) - - return x - - -class InvertedResidual(nn.Module): - - def __init__(self, inp, oup, stride): - super(InvertedResidual, self).__init__() - - if not (1 <= stride <= 3): - raise ValueError('illegal stride value') - self.stride = stride - - branch_features = oup // 2 - assert (self.stride != 1) or (inp == branch_features << 1) - - if self.stride > 1: - self.branch1 = nn.Sequential( - self.depthwise_conv( - inp, inp, kernel_size=3, stride=self.stride, padding=1 - ), - nn.BatchNorm2d(inp), - nn.Conv2d( - inp, - branch_features, - kernel_size=1, - stride=1, - padding=0, - bias=False - ), - nn.BatchNorm2d(branch_features), - nn.ReLU(inplace=True), - ) - - self.branch2 = nn.Sequential( - nn.Conv2d( - inp if (self.stride > 1) else branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - bias=False - ), - nn.BatchNorm2d(branch_features), - nn.ReLU(inplace=True), - self.depthwise_conv( - branch_features, - branch_features, - kernel_size=3, - stride=self.stride, - padding=1 - ), - nn.BatchNorm2d(branch_features), - nn.Conv2d( - branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - bias=False - ), - nn.BatchNorm2d(branch_features), - nn.ReLU(inplace=True), - ) - - @staticmethod - def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): - return nn.Conv2d( - i, o, kernel_size, stride, padding, bias=bias, groups=i - ) - - def forward(self, x): - if self.stride == 1: - x1, x2 = x.chunk(2, dim=1) - out = torch.cat((x1, self.branch2(x2)), dim=1) - else: - out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) - - out = channel_shuffle(out, 2) - - return out - - -class ShuffleNetV2(nn.Module): - """ShuffleNetV2. - - Reference: - Ma et al. ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018. - - Public keys: - - ``shufflenet_v2_x0_5``: ShuffleNetV2 x0.5. - - ``shufflenet_v2_x1_0``: ShuffleNetV2 x1.0. - - ``shufflenet_v2_x1_5``: ShuffleNetV2 x1.5. - - ``shufflenet_v2_x2_0``: ShuffleNetV2 x2.0. - """ - - def __init__( - self, num_classes, loss, stages_repeats, stages_out_channels, **kwargs - ): - super(ShuffleNetV2, self).__init__() - self.loss = loss - - if len(stages_repeats) != 3: - raise ValueError( - 'expected stages_repeats as list of 3 positive ints' - ) - if len(stages_out_channels) != 5: - raise ValueError( - 'expected stages_out_channels as list of 5 positive ints' - ) - self._stage_out_channels = stages_out_channels - - input_channels = 3 - output_channels = self._stage_out_channels[0] - self.conv1 = nn.Sequential( - nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), - nn.BatchNorm2d(output_channels), - nn.ReLU(inplace=True), - ) - input_channels = output_channels - - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] - for name, repeats, output_channels in zip( - stage_names, stages_repeats, self._stage_out_channels[1:] - ): - seq = [InvertedResidual(input_channels, output_channels, 2)] - for i in range(repeats - 1): - seq.append( - InvertedResidual(output_channels, output_channels, 1) - ) - setattr(self, name, nn.Sequential(*seq)) - input_channels = output_channels - - output_channels = self._stage_out_channels[-1] - self.conv5 = nn.Sequential( - nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), - nn.BatchNorm2d(output_channels), - nn.ReLU(inplace=True), - ) - self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) - - self.classifier = nn.Linear(output_channels, num_classes) - - def featuremaps(self, x): - x = self.conv1(x) - x = self.maxpool(x) - x = self.stage2(x) - x = self.stage3(x) - x = self.stage4(x) - x = self.conv5(x) - return x - - def forward(self, x): - f = self.featuremaps(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError("Unsupported loss: {}".format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - if model_url is None: - import warnings - warnings.warn( - 'ImageNet pretrained weights are unavailable for this model' - ) - return - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs): - model = ShuffleNetV2( - num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['shufflenetv2_x0.5']) - return model - - -def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs): - model = ShuffleNetV2( - num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['shufflenetv2_x1.0']) - return model - - -def shufflenet_v2_x1_5(num_classes, loss='softmax', pretrained=True, **kwargs): - model = ShuffleNetV2( - num_classes, loss, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['shufflenetv2_x1.5']) - return model - - -def shufflenet_v2_x2_0(num_classes, loss='softmax', pretrained=True, **kwargs): - model = ShuffleNetV2( - num_classes, loss, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['shufflenetv2_x2.0']) - return model diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Chess game rules in hindi pdf download Discover the secrets and history of chess.md b/spaces/raedeXanto/academic-chatgpt-beta/Chess game rules in hindi pdf download Discover the secrets and history of chess.md deleted file mode 100644 index 7f62d7eb0e84e56bfa435257aef26d5d2c509db6..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Chess game rules in hindi pdf download Discover the secrets and history of chess.md +++ /dev/null @@ -1,154 +0,0 @@ -
            -

            How to Crack Siemens NX 12.0.1 Win64-SSQ

            -

            If you are looking for a way to crack Siemens NX 12.0.1 Win64-SSQ, you have come to the right place. In this article, I will show you how to download, install, and run the cracked version of this powerful software for engineering design and manufacturing.

            -

            But before we get into the details, let me give you some background information about Siemens NX 12.0.1 and SSQ.

            -

            CRACK Siemens.NX.12.0.1.Win64-SSQ


            Downloadhttps://tinourl.com/2uL0Nz



            -

            What is Siemens NX 12.0.1?

            -

            Siemens NX 12.0.1 is a maintenance release of Siemens NX 12.0, which is one of the most advanced and integrated software solutions for product development and manufacturing.

            -

            According to Siemens, NX 12.0.1 is an accumulation of fixes and small enhancements built for installation on top of Windows 64-Bit and Linux 64-Bit images of NX 12.0.

            -

            Some of the features and benefits of Siemens NX 12.0.1 are:

            -
              -
            • Multi-disciplinary design: NX 12 provides a scalable multidisciplinary platform that unites electrical, mechanical, and control systems through close integration with Mentor Graphics Capital Harness and Xpedition.
            • -
            • Generative design: NX 12 enables engineers to find the best design to meet requirements sooner in the development process by running through more design possibilities than a human can.
            • -
            • Design for additive manufacturing: NX 12 meets the unique challenges of designing, optimizing, and building components using the latest additive manufacturing methods, such as incorporating lattice structures into design.
            • -
            • Convergent modeling: NX 12 allows users to work directly with facet geometry such as lattice structures, saving time and effort from data conversion.
            • -
            -

            Requirements and compatibility of Siemens NX 12.0.1

            -

            To run Siemens NX 12.0.1, you need a system that meets the following minimum requirements:

            -
              -
            • Operating system: Windows 10 (64-bit) or Linux (64-bit)
            • -
            • Processor: Intel Core i5 or higher
            • -
            • Memory: 8 GB or more
            • -
            • Graphics card: NVIDIA Quadro or AMD Radeon Pro with OpenGL support
            • -
            • Hard disk space: 10 GB or more
            • -
            • Internet connection: Required for installation and activation
            • -
            -

            You can also check the compatibility matrix of Siemens NX 12.0.1 with other software products here: https://docs.plm.automation.siemens.com/docs/nx/12/en_US/release_notes.pdf

            -

            What is SSQ?

            -

            SSQ stands for SolidSQUAD, which is a group of hackers who crack software products from various vendors, such as Siemens, Dassault Systemes, Autodesk, Adobe, etc.

            -

            Siemens PLM NX 12.0.1 free download
            -Siemens NX 12.0.1 MP02
            -Siemens NX 12.0.1 Docs Update Win64
            -Siemens NX 12.0.1 Win64 iso
            -Siemens NX 12.0.1 product design
            -Siemens NX 12.0.1 engineering design
            -Siemens NX 12.0.1 production design
            -Siemens NX 12.0.1 multi-disciplinary simulation
            -Siemens NX 12.0.1 conceptual design
            -Siemens NX 12.0.1 3D modeling documentation
            -Siemens NX 12.0.1 molding solutions
            -Siemens NX 12.0.1 machining solutions
            -Siemens NX 12.0.1 quality inspection solutions
            -Siemens NX 12.0.1 2D design features
            -Siemens NX 12.0.1 forms design tools
            -Siemens NX 12.0.1 aerospace design tools
            -Siemens NX 12.0.1 concept design acceleration
            -Siemens NX 12.0.1 layout visualization
            -Siemens NX 12.0.1 offline installer setup
            -Siemens NX 12.0.1 compatibility architecture
            -How to install Siemens NX 12.0.1 Win64
            -How to crack Siemens NX 12.0.1 Win64 license file
            -How to change Siemens NX 12.0.1 Win64 port-host
            -Download Siemens NX 12.0.1 Win64 torrent
            -Download Siemens NX 12.0.1 Win64 magnet link
            -Download Siemens NX 12.0.1 Win64 SolidTorrents
            -Download Siemens NX 12.0.1 Win64 MechDesign
            -Download Siemens NX 12.0.1 Win64 YouTube video
            -Download Siemens NX 12.0.1 Win64 corrupt zip file
            -Download Siemens NX 12

            -

            The origin and purpose of SSQ

            -

            The origin of SSQ is not clear, but some sources claim that it was founded in Russia in the early 2000s by a former employee of Dassault Systemes who was unhappy with his salary and working conditions.

            -

            The purpose of SSQ is to provide free access to expensive software products that are otherwise unaffordable or inaccessible for many users around the world.

            -

            SSQ claims that they do not intend to harm the software vendors or their customers, but rather to expose their vulnerabilities and flaws, and to encourage them to improve their products and services.

            -

            The risks and challenges of using SSQ

            -

            While using SSQ cracks may seem tempting for some users who want to save money or bypass restrictions, there are also many risks and challenges involved.

            -

            Some of the risks and challenges of using SSQ cracks are:

            -
              -
            • Legal issues: Using cracked software is illegal in most countries and can result in fines or lawsuits from the software vendors or authorities.
            • -
            • Security issues: Using cracked software can expose your system to malware, viruses, spyware, ransomware, or other threats that can compromise your data or identity.
            • -
            • Quality issues: Using cracked software can affect the performance, stability, functionality, or compatibility of your system or other software products.
            • -
            • Ethical issues: Using cracked software can be considered unethical or unfair to the software vendors who invest time, money, and effort into developing their products.
            • -
            • Moral issues: Using cracked software can be considered immoral or dishonest to yourself or others who pay for their software licenses.
            • -
            -

            How to crack Siemens NX 12.0.1 Win64-SSQ?

            -

            If you still want to crack Siemens NX 12.0.1 Win64-SSQ despite the risks and challenges mentioned above, here are the steps you need to follow:

            -

            Step 1: Download the software and the crack files

            -

            The first step is to download the original software installation package from the Siemens PLM download server here: https://download.industrysoftware.automation.siemens.com/

            -

            You will need a valid WebKey account to access this link.

            -

            You will also need to download the crack files from SSQ here: https://solidworks.org/siemens-nx-2020-crack-download/

            -

            You will need a torrent client such as uTorrent or BitTorrent to download this file.

            -

            Step 2: Install the software and copy the crack files

            -

            The second step is to install the software on your system following these instructions:

            -
              -
            1. Extract the downloaded installation package using WinRAR or 7-Zip.
            2. -
            3. Run setup.exe as administrator.
            4. -
            5. Select "Install NX" option.
            6. -
            7. Select "Typical" installation type.
            8. -
            9. Select "I accept" option for license agreement.
            10. -
            11. Select "Next" option for configuration summary.
            12. -
            13. Select "Install" option for installation progress.
            14. -
            15. Select "Finish" option for installation completion.
            16. -
            -

            The next step is to copy the crack files from SSQ following these instructions:

            -
              -
            1. Extract the downloaded crack file using WinRAR or 7-Zip.
            2. -
            3. Navigate to \NX_12001_Win64_SSQ\NX_12001_Win64_SSQ folder.
            4. -
            5. Copy all files from \NX_12001_Win64_SSQ\NX_12001_Win64_SSQ folder except SPLM_License_Server folder into \Program Files\Siemens\NX folder (replace existing files).
            6. -
            7. Navigate to \NX_12001_Win64_SSQ\SPLM_License_Server folder.
            8. -
            9. Copy all files from \NX_12001_Win64_SSQ\SPLM_License_Server folder into \Program Files\Siemens\PLMLicenseServer folder (replace existing files).
            10. -
            -

            Step 3: Run the software and enjoy the full version

            -

            The final step is to run the software on your system following these instructions:

            -
              -
            1. Navigate to \Program Files\Siemens\PLMLicenseServer folder.
            2. -
            3. Run lmtools.exe as administrator.
            4. -
            5. Select "Config Services" tab.
            6. -
            7. Select "Browse" option for Path to lmgrd.exe file field.
            8. -
            9. Select \Program Files\Siemens\PLMLicenseServer\lmgrd.exe file.
            10. -Path to license file field. -
            11. Select \Program Files\Siemens\PLMLicenseServer\license.dat file.
            12. -
            13. Select "Browse" option for Path to debug log file field.
            14. -
            15. Select \Program Files\Siemens\PLMLicenseServer\debug.log file.
            16. -
            17. Check "Use Services" and "Start Server at Power Up" options.
            18. -
            19. Select "Save Service" option.
            20. -
            21. Select "Start/Stop/Reread" tab.
            22. -
            23. Select "Start Server" option.
            24. -
            25. Select "Server Status" tab.
            26. -
            27. Select "Perform Status Enquiry" option.
            28. -
            29. Check if the license server is running and the licenses are available.
            30. -
            31. Close lmtools.exe.
            32. -
            33. Navigate to \Program Files\Siemens\NX folder.
            34. -
            35. Run ugraf.exe as administrator.
            36. -
            37. Select "File > Utilities > License Options" option.
            38. -
            39. Select "Browse" option for License File field.
            40. -
            41. Select \Program Files\Siemens\PLMLicenseServer\license.dat file.
            42. -
            43. Select "OK" option for License Options dialog box.
            44. -
            45. Enjoy the full version of Siemens NX 12.0.1 Win64-SSQ.
            46. -
            -

            Conclusion

            -

            In this article, I have shown you how to crack Siemens NX 12.0.1 Win64-SSQ using the crack files from SSQ. I have also explained what Siemens NX 12.0.1 and SSQ are, and what are the features, benefits, requirements, and compatibility of Siemens NX 12.0.1. I have also discussed the risks and challenges of using SSQ cracks, and why you should be careful and responsible when using them.

            -

            I hope you have found this article useful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

            -

            Disclaimer and warning

            -

            This article is for educational and informational purposes only. I do not condone or encourage the use of cracked software or the violation of any software license agreements. Using cracked software is illegal, unethical, immoral, and risky. You are solely responsible for any consequences that may arise from using cracked software or following the instructions in this article. Use at your own risk!

            -

            FAQs

            -
              -
            • Q: What is the difference between Siemens NX 12.0 and Siemens NX 12.0.1?
            • -
            • A: Siemens NX 12.0 is the major release of Siemens NX 12 series, while Siemens NX 12.0.1 is a maintenance release that fixes some bugs and adds some enhancements to Siemens NX 12.0.
            • -
            • Q: What is the difference between Win64 and SSQ in Siemens NX 12.0.1 Win64-SSQ?
            • -
            • A: Win64 means that the software is compatible with Windows 64-bit operating system, while SSQ means that the software is cracked by SolidSQUAD group.
            • -
            • Q: How can I get a legitimate license for Siemens NX 12.0.1?
            • -
            • A: You can get a legitimate license for Siemens NX 12.0.1 by purchasing it from the official website of Siemens PLM Software here: https://www.plm.automation.siemens.com/global/en/products/nx/
            • -
            • Q: How can I update Siemens NX 12.0.1 to the latest version?
            • -
            • A: You can update Siemens NX 12.0.1 to the latest version by downloading and installing the latest maintenance release from the Siemens PLM download server here: https://download.industrysoftware.automation.siemens.com/
            • -
            • Q: How can I uninstall Siemens NX 12.0.1 Win64-SSQ?
            • -
            • A: You can uninstall Siemens NX 12.0.1 Win64-SSQ by following these steps:
            • -
                -
              1. Navigate to \Program Files\Siemens\NX folder.
              2. -
              3. Run setup.exe as administrator.
              4. -
              5. Select "Remove NX" option.
              6. -
              7. Select "Next" option for removal progress.
              8. -
              9. Select "Finish" option for removal completion.
              10. -
            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Microsoft Office 2013 Full Crack Sinhvienit Best Practices and Recommendations.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Microsoft Office 2013 Full Crack Sinhvienit Best Practices and Recommendations.md deleted file mode 100644 index 198d2416a7cc0572fc1dc19707474b91a1050c3f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Microsoft Office 2013 Full Crack Sinhvienit Best Practices and Recommendations.md +++ /dev/null @@ -1,113 +0,0 @@ -
            -

            Download Microsoft Office 2013 Full Crack Sinhvienit

            -

            Are you looking for a way to get Microsoft Office 2013 for free? Do you want to enjoy all the features and benefits of this powerful suite of productivity applications without paying a dime? If yes, then you are in luck! In this article, we will show you how to download Microsoft Office 2013 full crack sinhvienit from reliable sources and activate it without any hassle.

            -

            download microsoft office 2013 full crack sinhvienit


            DOWNLOAD ✵✵✵ https://tinourl.com/2uL58I



            -

            Microsoft Office 2013 is one of the most popular and widely used office software in the world. It offers a range of applications that can help you create and edit documents, spreadsheets, presentations, databases, emails, and more. Whether you are a student, a professional, or a home user, you can use Microsoft Office 2013 to accomplish your tasks efficiently and effectively.

            -

            However, Microsoft Office 2013 is not free. You need to purchase a product key or a subscription plan to activate it legally. This can be quite expensive for some users who may not have the budget or the need for such an investment. That's why many people look for alternative ways to get Microsoft Office 2013 for free.

            -

            One of these ways is to download Microsoft Office 2013 full crack sinhvienit from online sources. This means that you can get a cracked version of Microsoft Office 2013 that has been modified to bypass the activation process and work without a product key. This way, you can save money and avoid any activation issues that may arise from using an invalid or expired key.

            -

            But how do you download Microsoft Office 2013 full crack sinhvienit safely and easily? How do you install it and run it on your computer? How do you make sure that it works properly and does not cause any problems? Don't worry, we have got you covered! In this article, we will guide you through the whole process step by step.

            -

            So what are you waiting for? Read on to find out how to download Microsoft Office 2013 full crack sinhvienit today!

            -

            What is Microsoft Office 2013?

            -

            Before we get into the details of how to download Microsoft Office 2013 full crack sinhvienit, let's take a moment to review what Microsoft Office 2013 is and what it can do for you.

            -

            How to download Microsoft Office 2013 full crack sinhvienit?

            -

            To download Microsoft Office 2013 full crack sinhvienit, you need to follow these steps:

            -
              -
            1. Download Office 2013 professional 32bit or 64bit from one of the links below, depending on your system requirements. These are the official links from Microsoft that have been tested and verified by many users.
            2. -
            3. Download KMSpico 10.1.6, a tool that can crack Office 2013 without a product key. This is a software that can activate Office 2013 by emulating a KMS server on your computer.
            4. -
            5. Install Office 2013 by running the setup file and following the instructions on the screen. You can choose to customize your installation options or use the default ones.
            6. -
            7. Run KMSpico 10.1.6 by right-clicking on it and choosing Run as administrator. Wait for a few seconds until you see a green check mark and a message saying "Completed". This means that Office 2013 has been activated successfully.
            8. -
            -

            Congratulations! You have just downloaded Microsoft Office 2013 full crack sinhvienit and activated it for free. You can now enjoy all the features and benefits of this powerful suite of productivity applications.

            -

            download ms office 2013 full crack sinhvienit
            -download microsoft office 2013 professional plus crack sinhvienit
            -download microsoft office 2013 full version crack sinhvienit
            -download microsoft office 2013 64 bit full crack sinhvienit
            -download microsoft office 2013 32 bit full crack sinhvienit
            -download microsoft office 2013 activator crack sinhvienit
            -download microsoft office 2013 full crack keygen sinhvienit
            -download microsoft office 2013 full crack iso sinhvienit
            -download microsoft office 2013 full crack google drive sinhvienit
            -download microsoft office 2013 full crack free sinhvienit
            -download microsoft office 2013 full crack for windows 10 sinhvienit
            -download microsoft office 2013 full crack for windows 7 sinhvienit
            -download microsoft office 2013 full crack for windows 8.1 sinhvienit
            -download microsoft office 2013 full crack for mac sinhvienit
            -download microsoft office 2013 full crack offline sinhvienit
            -download microsoft office 2013 full crack online sinhvienit
            -download microsoft office 2013 full crack one link sinhvienit
            -download microsoft office 2013 full crack one drive sinhvienit
            -download microsoft office 2013 full crack rar sinhvienit
            -download microsoft office 2013 full crack zip sinhvienit
            -download microsoft office 2013 full crack torrent sinhvienit
            -download microsoft office 2013 full crack mega.nz sinhvienit
            -download microsoft office 2013 full crack fshare.vn sinhvienit
            -download microsoft office 2013 full crack mediafire.com sinhvienit
            -download microsoft office 2013 full crack zippyshare.com sinhvienit
            -how to download microsoft office 2013 full crack sinhvienit
            -where to download microsoft office 2013 full crack sinhvienit
            -best site to download microsoft office 2013 full crack sinhvienit
            -safe way to download microsoft office 2013 full crack sinhvienit
            -easy way to download microsoft office 2013 full crack sinhvienit
            -fast way to download microsoft office 2013 full crack sinhvienit
            -free way to download microsoft office 2013 full crack sinhvienit
            -legal way to download microsoft office 2013 full crack sinhvienit
            -illegal way to download microsoft office 2013 full crack sinhvienit
            -pros and cons of downloading microsoft office 2013 full crack sinhvienit
            -benefits of downloading microsoft office 2013 full crack sinhvienit
            -drawbacks of downloading microsoft office 2013 full crack sinhvienit
            -risks of downloading microsoft office 2013 full crack sinhvienit
            -alternatives to downloading microsoft office 2013 full crack sinhvienit
            -reviews of downloading microsoft office 2013 full crack sinhvienit
            -testimonials of downloading microsoft office 2013 full crack sinhvienit
            -feedback of downloading microsoft office 2013 full crack sinhvienit
            -tips and tricks for downloading microsoft office 2013 full crack sinhvienit
            -guide and tutorial for downloading microsoft office 2013 full crack sinhvienit
            -steps and instructions for downloading microsoft office 2013 full crack sinhvienit
            -problems and solutions for downloading microsoft office 2013 full crack sinhvienit
            -errors and fixes for downloading microsoft office 2013 full crack sinhvienit
            -issues and troubleshooting for downloading microsoft office 2013 full crack sinhvienit

            -

            What are the benefits of using Microsoft Office 2013 full crack sinhvienit?

            -

            By using Microsoft Office 2013 full crack sinhvienit, you can enjoy many benefits that will enhance your productivity and performance. Here are some of them:

            -

            Improved performance

            -

            Microsoft Office 2013 full crack sinhvienit offers faster and smoother operation, better memory management, and reduced loading time. It also has improved compatibility with Windows 8 and other devices, such as tablets and smartphones.

            -

            Enhanced functionality

            -

            Microsoft Office 2013 full crack sinhvienit provides more features and tools for creating and editing documents, spreadsheets, presentations, and more. Some of these features are:

            -

            Ribbon interface and animations

            -

            Microsoft Office 2013 full crack sinhvienit has a sleek and modern ribbon interface and subtle animations that enhance the user experience. The ribbon interface allows you to access all the commands and options easily and quickly. The animations add visual interest and feedback to your actions.

            -

            PDF import and editing

            -

            Microsoft Office 2013 full crack sinhvienit allows you to import and edit PDF files directly in Word without converting them. You can also save your documents as PDF files with a few clicks.

            -

            Online sharing and collaboration

            -

            Microsoft Office 2013 full crack sinhvienit enables you to share and collaborate on files online via OneDrive, Skype, Yammer, and other platforms. You can also access your files from anywhere and from any device with an internet connection.

            -

            Compatibility with Windows 8 and other devices

            -

            Microsoft Office 2013 full crack sinhvienit is compatible with Windows 8 and its touch-screen features, such as swiping, tapping, zooming, and snapping. It also works well with other devices, such as tablets and smartphones, that run on Windows or Android operating systems.

            -

            Conclusion

            -

            In conclusion, Microsoft Office 2013 is one of the best office software in the market that offers a range of applications that can help you create and edit documents, spreadsheets, presentations, databases, emails, and more. By downloading Microsoft Office 2013 full crack sinhvienit from reliable sources and activating it with KMSpico 10.1.6, you can enjoy all the features and benefits of this powerful suite of productivity applications for free.

            -

            So what are you waiting for? Download Microsoft Office 2013 full crack sinhvienit today and boost your productivity and performance!

            -

            FAQs

            -
              -
            • Is Microsoft Office 2013 full crack sinhvienit safe to use?
              Yes, Microsoft Office 2013 full crack sinhvienit is safe to use if you download it from reliable sources and scan it with an antivirus program before installing it. However, you should be aware that using cracked software may violate Microsoft's terms of service and may expose you to legal risks.
            • -
            • What are the system requirements for Microsoft Office 2013?
              The system requirements for Microsoft Office 2013 are:
              • A computer with a 1 GHz or faster processor
              • A minimum of 1 GB RAM for 32-bit versions or 2 GB RAM for 64-bit versions
              • A minimum of 3 GB of available disk space
              • A monitor with a resolution of at least 1024 x 768 pixels
              • A DirectX10 graphics card
              • A Windows operating system (Windows 7 or later)
            • -

              What are the differences between Microsoft Office 2013 and Microsoft Office 2016 or 2019?

              -

              Microsoft Office 2016 and 2019 are newer versions of Microsoft Office that have some additional features and improvements over Microsoft Office 2013. Some of these features are:

              -
                -
              • Word: Text-to-speech, focus mode, translator, black theme, @mentions, and more.
              • -
              • Excel: Excel connectors, @mentions, PowerPivot enhancements, PowerQuery enhancements, feature to publish Excel in PowerBI, and more.
              • -
              • PowerPoint: Improvement for the digital pen, Morph transition, Zoom capabilities, 3D models and icons, SVG support, and more.
              • -
              • Outlook: Office Groups 365, intelligent inbox, updated contact cards, travel and delivery summary cards, and more.
              • -
              • OneNote: OneNote for Windows 10 app that replaces OneNote 2016 as the default OneNote experience.
              • -
              • Access: Large Number data type, new charts and templates, improved linked table manager, and more.
              • -
              • Project: Task progress at a glance, timeline labels, accessibility improvements, and more.
              • -
              • Visio: New starter diagrams, database reverse engineering tool, UML component diagrams, wireframe diagrams, and more.
              • -
              • Publisher: Improved zooming and scrolling performance.
              • -
              -

              However, Microsoft Office 2016 and 2019 also have some drawbacks compared to Microsoft Office 2013. Some of these drawbacks are:

              -
                -
              • Microsoft Office 2016 and 2019 are only compatible with Windows 10 or later operating systems.
              • -
              • Microsoft Office 2016 and 2019 require a one-time purchase or a subscription plan to activate them legally. They do not offer free updates or support like Microsoft Office 2013.
              • -
              • Microsoft Office 2016 and 2019 may not work well with some older add-ins or macros that were designed for Microsoft Office 2013 or earlier versions.
              • -
              -

              Therefore, depending on your needs and preferences, you may choose to stick with Microsoft Office 2013 full crack sinhvienit or upgrade to Microsoft Office 2016 or 2019.

              -

              - : Source: https://support.microsoft.com/en-us/office/what-s-new-in-office-2019-5077cbbe-0d94-44cc-b30e-654e37629b0c : Source: https://softwarekeep.com/help-center/office-2016-vs-office-2019-comparison

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Goldmaster Rc 451 Driver 15l Everything You Need to Know in One Place.md b/spaces/raedeXanto/academic-chatgpt-beta/Goldmaster Rc 451 Driver 15l Everything You Need to Know in One Place.md deleted file mode 100644 index a6276812c1406310865926630e1c6338465aa87b..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Goldmaster Rc 451 Driver 15l Everything You Need to Know in One Place.md +++ /dev/null @@ -1,110 +0,0 @@ -
              -

              Goldmaster Rc 451 Driver 15l: What You Need to Know

              -

              Are you looking for a powerful and portable speaker that can deliver high-quality sound for your music, movies, games, and more? If so, you might want to check out the Goldmaster Rc 451 Driver 15l, a wireless speaker that offers a lot of features and benefits for a reasonable price.

              -

              In this article, we will review the Goldmaster Rc 451 Driver 15l in detail and tell you everything you need to know about it. We will cover its features and benefits, how to use it, its pros and cons, customer reviews and ratings, and where to buy it. By the end of this article, you will have a clear idea of whether this speaker is right for you or not.

              -

              Goldmaster Rc 451 Driver 15l


              Downloadhttps://tinourl.com/2uKZGg



              -

              Features and Benefits of Goldmaster Rc 451 Driver 15l

              -

              The Goldmaster Rc 451 Driver 15l is a wireless speaker that has a lot of features and benefits that make it stand out from other speakers in its category. Here are some of them:

              -
                -
              • High-quality sound with 15 liters of volume: The speaker has a powerful driver that can produce clear and loud sound with deep bass and crisp treble. The speaker has a volume capacity of 15 liters, which means it can fill a large room or outdoor space with sound. You can enjoy your favorite tunes or watch movies with immersive sound quality.
              • -
              • Wireless connection with Bluetooth and USB: The speaker can connect wirelessly to your smartphone, tablet, laptop, or any other device that supports Bluetooth or USB. You can stream music or audio from your device without any hassle or wires. You can also use the USB port to play music from a flash drive or charge your device.
              • -
              • Remote control and LED display: The speaker comes with a remote control that lets you control the speaker from a distance. You can adjust the volume, change the sound mode, switch between sources, and more with the remote. The speaker also has an LED display that shows the battery level, mode, source, frequency, and time.
              • -
              • FM radio and AUX input: The speaker has a built-in FM radio that lets you listen to your favorite radio stations. You can scan and save up to 20 stations with the remote control. You can also use the AUX input to connect any device that has a headphone jack, such as an MP3 player or a CD player.
              • -
              • Compact and portable design: The speaker has a compact and portable design that makes it easy to carry around. It has a handle on the top and rubber feet on the bottom for stability. It weighs only 3 kg and measures 28 x 18 x 18 cm. You can take it anywhere you want without any trouble.
              • -
              • Affordable price and warranty: The speaker is available at an affordable price of $49.99 on Amazon. It also comes with a one-year warranty from the manufacturer. You can buy it with confidence and enjoy its features and benefits for a long time.
              • -
              -

              How to Use Goldmaster Rc 451 Driver 15l

              -

              The Goldmaster Rc 451 Driver 15l is easy to use and operate. Here are some steps on how to use it:

              -
                -
              1. How to connect it to your device: To connect the speaker to your device via Bluetooth, turn on the speaker and press the mode button until you see BT on the display. Then turn on the Bluetooth on your device and search for RC-451. Select it and pair it with your device. To connect via USB, plug in your flash drive or your device's charging cable into the USB port on the speaker. Press the mode button until you see USB on the display.
              2. -```html
              3. How to switch between different sources: To switch between different sources, press the mode button on the remote control or on the speaker. You can choose between BT, USB, FM, and AUX. The display will show the current source.
              4. -
              5. How to use the remote control: The remote control has various buttons that let you control the speaker. Here are some of them:
                  -
                • Power: Turn the speaker on or off.
                • -
                • Mute: Mute or unmute the sound.
                • -
                • Play/Pause: Play or pause the music or audio.
                • -
                • Prev/Next: Skip to the previous or next track or station.
                • -
                • Scan: Scan and save FM radio stations.
                • -
                • Time: Set the time and date on the display.
                • -
                • Alarm: Set an alarm on the speaker.
                • -
                -
              6. -
              7. How to charge and maintain it: To charge the speaker, plug in the power adapter into the DC input on the back of the speaker. The display will show CHG when charging and FULL when fully charged. The speaker has a built-in rechargeable battery that can last up to 4 hours of playtime. To maintain the speaker, keep it away from water, heat, and dust. Clean it with a soft cloth and avoid using harsh chemicals.
              8. -
              -

              Pros and Cons of Goldmaster Rc 451 Driver 15l

              -

              The Goldmaster Rc 451 Driver 15l has many pros and cons that you should consider before buying it. Here are some of them:

              - - - - - - - -
              ProsCons
              - Great sound quality- Limited battery life
              - Easy to use- No microphone
              - Versatile- No equalizer
              - Durable
              - Budget-friendly
              -

              Customer Reviews and Ratings of Goldmaster Rc 451 Driver 15l

              -

              The Goldmaster Rc 451 Driver 15l has received many customer reviews and ratings from online platforms. Here is a summary of what they say:

              -

              Goldmaster Rc 451 Driver 15l download
              -Goldmaster Rc 451 Driver 15l installation
              -Goldmaster Rc 451 Driver 15l update
              -Goldmaster Rc 451 Driver 15l compatibility
              -Goldmaster Rc 451 Driver 15l troubleshooting
              -Goldmaster Rc 451 Driver 15l review
              -Goldmaster Rc 451 Driver 15l manual
              -Goldmaster Rc 451 Driver 15l price
              -Goldmaster Rc 451 Driver 15l features
              -Goldmaster Rc 451 Driver 15l specifications
              -Goldmaster Rc 451 Driver 15l warranty
              -Goldmaster Rc 451 Driver 15l support
              -Goldmaster Rc 451 Driver 15l software
              -Goldmaster Rc 451 Driver 15l firmware
              -Goldmaster Rc 451 Driver 15l windows 10
              -Goldmaster Rc 451 Driver 15l mac os
              -Goldmaster Rc 451 Driver 15l linux
              -Goldmaster Rc 451 Driver 15l android
              -Goldmaster Rc 451 Driver 15l ios
              -Goldmaster Rc 451 Driver 15l bluetooth
              -Goldmaster Rc 451 Driver 15l wireless
              -Goldmaster Rc 451 Driver 15l usb
              -Goldmaster Rc 451 Driver 15l hdmi
              -Goldmaster Rc 451 Driver 15l vga
              -Goldmaster Rc 451 Driver 15l audio
              -Goldmaster Rc 451 Driver 15l video
              -Goldmaster Rc 451 Driver 15l microphone
              -Goldmaster Rc 451 Driver 15l speaker
              -Goldmaster Rc 451 Driver 15l webcam
              -Goldmaster Rc 451 Driver 15l keyboard
              -Goldmaster Rc 451 Driver 15l mouse
              -Goldmaster Rc 451 Driver 15l touchpad
              -Goldmaster Rc 451 Driver 15l screen
              -Goldmaster Rc 451 Driver 15l battery
              -Goldmaster Rc 451 Driver 15l charger
              -Goldmaster Rc
              -Master Rc
              -Master Rc
              -Master Rc

              -
                -
              • Positive feedback: Customers who liked the speaker praised its sound quality, design, functionality, and price. They said it was loud, clear, bassy, and balanced. They also liked its wireless connection, remote control, LED display, FM radio, and AUX input. They said it was easy to set up, use, and carry around. They also said it was well-made, sturdy, and durable. They said it was worth every penny and a great value for money.
              • -
              • Negative feedback: Customers who disliked the speaker complained about its battery life, microphone, and equalizer. They said it had a short battery life that lasted only 2 hours or less. They also said it had no microphone for hands-free calls or voice commands. They also said it had no equalizer to adjust the sound settings. They said it was disappointing and not as expected.
              • -
              -

              The average rating of the speaker on Amazon is 4.2 out of 5 stars based on 36 reviews. The average rating on eBay is 4.5 out of 5 stars based on 12 reviews. The average rating on AliExpress is 4.8 out of 5 stars based on 16 reviews.

              -

              Where to Buy Goldmaster Rc 451 Driver 15l

              -

              If you are interested in buying the Goldmaster Rc 451 Driver 15l, you have several options to choose from. Here are some of them:

              -
                -
              • Online options: You can buy the speaker online from various platforms such as Amazon, eBay, AliExpress, etc. You can compare prices, read reviews, see ratings, and check availability from these platforms. You can also enjoy free shipping, discounts, coupons, and other offers from some of them.
              • -
              • Offline options: You can also buy the speaker offline from local stores or distributors near you. You can visit their websites or call them to find out their location, inventory, price, and warranty. You can also see and test the speaker in person before buying it.
              • -
              -

              Conclusion

              - ```html and more. It has a lot of features and benefits that make it a great choice for anyone who loves sound. It has a high-quality sound with 15 liters of volume, a wireless connection with Bluetooth and USB, a remote control and LED display, an FM radio and AUX input, a compact and portable design, and an affordable price and warranty. It is easy to use and operate, and you can connect it to any device that supports Bluetooth or USB. You can also listen to your favorite radio stations or use the AUX input to connect any device that has a headphone jack.

              -

              However, the speaker also has some drawbacks that you should be aware of. It has a limited battery life that lasts only up to 4 hours of playtime. It also has no microphone for hands-free calls or voice commands. It also has no equalizer to adjust the sound settings according to your preference. You might find these issues disappointing or not as expected.

              -

              Overall, the Goldmaster Rc 451 Driver 15l is a wireless speaker that can provide high-quality sound for your music, movies, games, and more. It has many features and benefits that make it worth buying, but it also has some cons that you should consider before buying it. You can buy it online from platforms such as Amazon, eBay, AliExpress, etc., or offline from local stores or distributors near you.

              -

              FAQs

              -

              Here are some frequently asked questions about the Goldmaster Rc 451 Driver 15l:

              -
                -
              1. What is the size and weight of the speaker?: The speaker measures 28 x 18 x 18 cm and weighs 3 kg.
              2. -
              3. What is the battery capacity and charging time of the speaker?: The speaker has a built-in rechargeable battery that has a capacity of 1800 mAh and a charging time of 3 hours.
              4. -
              5. What is the Bluetooth range and version of the speaker?: The speaker has a Bluetooth range of 10 meters and a Bluetooth version of 4.2.
              6. -
              7. What are the sound modes and sources of the speaker?: The speaker has four sound modes: normal, pop, rock, jazz, classic, country, bass boost. The speaker has four sources: BT, USB, FM, AUX.
              8. -
              9. What are the accessories included with the speaker?: The speaker comes with a power adapter, a remote control, an AUX cable, and a user manual.
              10. -
              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/razfar/anything-counter/utils/google_utils.py b/spaces/razfar/anything-counter/utils/google_utils.py deleted file mode 100644 index c2e7293de826d81fa05e01022dabdfbf74fa995e..0000000000000000000000000000000000000000 --- a/spaces/razfar/anything-counter/utils/google_utils.py +++ /dev/null @@ -1,122 +0,0 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries - -import os -import platform -import subprocess -import time -from pathlib import Path - -import requests -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(file, repo='WongKinYiu/yolov6'): - # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) - - if not file.exists(): - try: - response = requests.get(f'https://api.github.com/repos/{repo}/releases/weights').json() # github api - assets = [x['name'] for x in response['assets']] # release assets - tag = response['tag_name'] # i.e. 'v1.0' - except: # fallback plan - assets = ['yolov6.pt'] - tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] - - name = file.name - if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return - - -def gdrive_download(id='', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov6.utils.google_utils import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - file.unlink(missing_ok=True) # remove existing file - cookie.unlink(missing_ok=True) # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - cookie.unlink(missing_ok=True) # remove existing cookie - - # Error check - if r != 0: - file.unlink(missing_ok=True) # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/spaces/rbalacha/04-Gradio-SOTA-Seq2Seq/README.md b/spaces/rbalacha/04-Gradio-SOTA-Seq2Seq/README.md deleted file mode 100644 index 50fec100681d4e617991640423d61aabe49465e8..0000000000000000000000000000000000000000 --- a/spaces/rbalacha/04-Gradio-SOTA-Seq2Seq/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 04 Gradio SOTA Seq2Seq -emoji: 🐢 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rcajegas/HTML5-Aframe-3DMAP-FLIGHT/style.css b/spaces/rcajegas/HTML5-Aframe-3DMAP-FLIGHT/style.css deleted file mode 100644 index 57ac874613ad432d3129fa1757249a319a601f3e..0000000000000000000000000000000000000000 --- a/spaces/rcajegas/HTML5-Aframe-3DMAP-FLIGHT/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/GettingOverItwithBennettFoddynosurveynopassword2019.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/GettingOverItwithBennettFoddynosurveynopassword2019.md deleted file mode 100644 index 71128a41962838c7e05fad9a3db8c717884df621..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/GettingOverItwithBennettFoddynosurveynopassword2019.md +++ /dev/null @@ -1,9 +0,0 @@ -

              GettingOverItwithBennettFoddynosurveynopassword2019


              Downloadhttps://urlgoal.com/2uCM6K



              -
              -GettingOverItwithBennettFoddynosurveynopassword2019 Download: ( Download: ( Apk Data android Free Download GettingOverItwithBennettFoddynosurveynopassword2019 Doraemon No . ##1.0.3 Apk Data android Free Download GettingOverItwithBennettFoddynosurveynopassword2019 Doraemon Nobita And The Legend Of The Sun King Movie Download un. ## -1.0.3 Apk Data android Free Download GettingOverItwithBennettFoddynosurveynopassword2019 Doraemon Nobita And The Legend Of The Sun King Movie Download un. ## -1.0.3 Apk Data android Free Download GettingOverItwithBennettFoddynosurveynopassword2019 Doraemon Nobita And The Legend Of The Sun King Movie Download une. ## -1.0.3 Ap 8a78ff9644
              -
              -
              -

              diff --git "a/spaces/rizam/literature-research-tool/documents/docs/4-\346\226\207\347\214\256\345\210\206\346\236\220\345\271\263\345\217\260\346\257\224\350\276\203.md" "b/spaces/rizam/literature-research-tool/documents/docs/4-\346\226\207\347\214\256\345\210\206\346\236\220\345\271\263\345\217\260\346\257\224\350\276\203.md" deleted file mode 100644 index 5e23ba208d6085f448f9d244798415d1605bdc60..0000000000000000000000000000000000000000 --- "a/spaces/rizam/literature-research-tool/documents/docs/4-\346\226\207\347\214\256\345\210\206\346\236\220\345\271\263\345\217\260\346\257\224\350\276\203.md" +++ /dev/null @@ -1,56 +0,0 @@ -# 4 Other Literature Research Tools -## 1 Citespace - -> 作者:爱学习的毛里 -> 链接:https://www.zhihu.com/question/27463829/answer/284247493 -> 来源:知乎 -> 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 - -一、工作原理 -简单来讲,citespace主要基于“共现聚类”思想: - -1. 首先对科学文献中的信息单元进行提取 - - 包括文献层面上的参考文献,主题层面上的**关键词**、主题词、学科、领域分类等,主体层面上的作者、机构、国家、期刊等 -2. 然后根据信息单元间的联系类型和强度进行重构,形成不同意义的网络结构 - - 如关键词共现、作者合作、文献共被引等, - - 网络中的节点代表文献信息单元,连线代表节点间的联系(共现) -3. 最后通过对节点、连线及网络结构进行测度、统计分析(聚类、突现词检测等)和可视化,发现特定学科和领域知识结构的隐含模式和规律。 - -![](https://pica.zhimg.com/50/v2-b94a8061c72d6e299a059da0c1cb3813_720w.jpg?source=1940ef5c)*共现聚类思想* - -二、主要用途 - -1. **研究热点分析**:一般利用关键词/主题词共现 -2. 研究前沿探测:共被引、耦合、共词、突现词检测都有人使用,但因为对“研究前沿”的定义尚未统一,所以方法的选择和图谱结果的解读上众说纷纭 -3. 研究演进路径分析:将时序维度与主题聚类结合,例如citespace中的时间线图和时区图 -4. 研究群体发现:一般建立作者/机构合作、作者耦合等网络,可以发现研究小团体、核心作者/机构等 -5. 学科/领域/知识交叉和流动分析:一般建立期刊/学科等的共现网络,可以研究学科之间的交叉、知识流动和融合等除分析 科学文献 外,citespace也可以用来分析 专利技术文献,用途与科学文献类似,包括技术研究热点、趋势、结构、核心专利权人或团体的识别等。 - -三、工作流程 -![](https://pic1.zhimg.com/50/v2-165aa367fa07d8e46f286dfe06f0fce4_720w.jpg?source=1940ef5c) -*摘自《引文空间分析原理与应用》* - -### 聚类算法 - -CiteSpace提供的算法有3个,3个算法的名称分别是: - -- LSI/LSA: Latent Semantic Indexing/Latent Semantic Analysis 浅语义索引 - [intro](https://www.cnblogs.com/pinard/p/6805861.html) - -- LLR: Log Likelihood Ratio 对数极大似然率 - -- MI: Mutual Information 互信息 - - -对不同的数据,3种算法表现一样,可在实践中多做实践。 - -[paper](https://readpaper.com/paper/2613897633) - -## 2 VOSviewer - -VOSviewer的处理流程与大部分的科学知识图谱类软件类似,即文件导入——信息单元抽取(如作者、关键词等)——建立共现矩阵——利用相似度计算对关系进行标准化处理——统计分析(一般描述统计+聚类)——可视化展现(布局+其它图形属性映射) - - -Normalization, mapping, and clustering - -[paper](https://www.vosviewer.com/download/f-x2.pdf) (See Appendix) \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Ativador Win 10.md b/spaces/rorallitri/biomedical-language-models/logs/Download Ativador Win 10.md deleted file mode 100644 index 8f7fc0ca577f4d6d0fee30b8207801177799a0d8..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Ativador Win 10.md +++ /dev/null @@ -1,50 +0,0 @@ -

              download ativador win 10


              DOWNLOADhttps://tinurll.com/2uzmVh



              - -mira como é o pessoal na teoria - - vc tem que adicionar o site do download - - uma aplicação que permite conectar uma página web dobradada - - não sei se é a opção da proxima - - eu sei que é nesta fase - - hahaha - - ok - - então fico pronto - - opa, hj é sábado - - pessoal fala Brasil - - tbm - - boa tarde - - boa tarde :) - - mirqui: oi - - opa - - hj é sábado - - hmm - - é né? - - só - - deixa - - sim, sábado - - ele fez: - - -
              -
              -

              diff --git a/spaces/sachinrcz/isItCarOrPlaceOrBus/app.py b/spaces/sachinrcz/isItCarOrPlaceOrBus/app.py deleted file mode 100644 index 0837a61f3d79f2ba77d63a3ecda5c905a3a4b550..0000000000000000000000000000000000000000 --- a/spaces/sachinrcz/isItCarOrPlaceOrBus/app.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio as gr -from fastai.vision.all import * - -learn = load_learner('model.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -title = "Is it a car or plane or bus?" -description = "Are you a vampire sleeping for last 500 years and struggling to cope with this new world? Have you ever looked at something and could not figure out if it is a car or plane or bus? Don't worry you can now use this app to help you figure what it is exactly" -examples = ['car.jpg', 'plane.jpg', 'bus.jpg'] -interpretation='default' -enable_queue=True - -gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,examples=examples,interpretation=interpretation).launch() \ No newline at end of file diff --git a/spaces/salemamassi/PdfChatBot/app.py b/spaces/salemamassi/PdfChatBot/app.py deleted file mode 100644 index 936e3c53b69ae861bebf6138fc5a7cacfd56d10d..0000000000000000000000000000000000000000 --- a/spaces/salemamassi/PdfChatBot/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import gradio as gr -import os -import tempfile -from langchain.document_loaders import UnstructuredPDFLoader -from langchain.indexes import VectorstoreIndexCreator -from langchain.chains import RetrievalQA -from langchain.schema import AIMessage, HumanMessage -from langchain.vectorstores import FAISS -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain import HuggingFaceHub - -# Set your API keys -API_KEY = os.environ["API_KEY"] -pdf_path = './Adventure Works Analysis Report.pdf' - -# Create a temporary upload directory - -# Define global variables for loaders and index -index = None -def load_file(pdf_path): - global index - pdf_loader = UnstructuredPDFLoader(pdf_path) - index = VectorstoreIndexCreator( - embedding=HuggingFaceEmbeddings(), - text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - ).from_loaders([pdf_loader]) - return "DONE ✅" -load_file(pdf_path) - -def chat(message,history): - global index - history_langchain_format = [] - for human, ai in history: - history_langchain_format.append(HumanMessage(content=human)) - history_langchain_format.append(AIMessage(content=ai)) - history_langchain_format.append(HumanMessage(content=message)) - history_langchain_format.append(HumanMessage(content=message)) - # Create the index (update index) - llm2 = HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature": 0, "max_length": 512},huggingfacehub_api_token = API_KEY ) - chain = RetrievalQA.from_chain_type(llm=llm2, - chain_type="stuff", - retriever=index.vectorstore.as_retriever(), - input_key="question") - # Perform question-answering on the uploaded PDF with the user's question - gpt_response = chain.run("Based on the file you have processed, provide a related answer to this question: "+ message) - return gpt_response - - -# Create a Gradio interface for chat -chat_interface = gr.ChatInterface( - chat, - theme=gr.themes.Soft() -) - - - -with gr.Blocks(theme=gr.themes.Soft()) as demo: - with gr.Row(): - with gr.Column(): - # text = gr.Textbox(load_file, [pdf_path],label="Status") - chat_interface = gr.ChatInterface( - chat, - theme=gr.themes.Soft() - ) - -demo.queue().launch(inline=False) diff --git a/spaces/sccstandardteam/ChuanhuChatGPT/modules/utils.py b/spaces/sccstandardteam/ChuanhuChatGPT/modules/utils.py deleted file mode 100644 index a025a80d7b52f3ae788be960c17520d44bf56e49..0000000000000000000000000000000000000000 --- a/spaces/sccstandardteam/ChuanhuChatGPT/modules/utils.py +++ /dev/null @@ -1,592 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html -import sys -import subprocess - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter -import pandas as pd - -from modules.presets import * -from . import shared -from modules.config import retrieve_proxy, hide_history_when_not_logged_in - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - -def predict(current_model, *args): - iter = current_model.predict(*args) - for i in iter: - yield i - -def billing_info(current_model): - return current_model.billing_info() - -def set_key(current_model, *args): - return current_model.set_key(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def interrupt(current_model, *args): - return current_model.interrupt(*args) - -def reset(current_model, *args): - return current_model.reset(*args) - -def retry(current_model, *args): - iter = current_model.retry(*args) - for i in iter: - yield i - -def delete_first_conversation(current_model, *args): - return current_model.delete_first_conversation(*args) - -def delete_last_conversation(current_model, *args): - return current_model.delete_last_conversation(*args) - -def set_system_prompt(current_model, *args): - return current_model.set_system_prompt(*args) - -def save_chat_history(current_model, *args): - return current_model.save_chat_history(*args) - -def export_markdown(current_model, *args): - return current_model.export_markdown(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def upload_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def set_token_upper_limit(current_model, *args): - return current_model.set_token_upper_limit(*args) - -def set_temperature(current_model, *args): - current_model.set_temperature(*args) - -def set_top_p(current_model, *args): - current_model.set_top_p(*args) - -def set_n_choices(current_model, *args): - current_model.set_n_choices(*args) - -def set_stop_sequence(current_model, *args): - current_model.set_stop_sequence(*args) - -def set_max_tokens(current_model, *args): - current_model.set_max_tokens(*args) - -def set_presence_penalty(current_model, *args): - current_model.set_presence_penalty(*args) - -def set_frequency_penalty(current_model, *args): - current_model.set_frequency_penalty(*args) - -def set_logit_bias(current_model, *args): - current_model.set_logit_bias(*args) - -def set_user_identifier(current_model, *args): - current_model.set_user_identifier(*args) - -def set_single_turn(current_model, *args): - current_model.set_single_turn(*args) - -def handle_file_upload(current_model, *args): - return current_model.handle_file_upload(*args) - -def like(current_model, *args): - return current_model.like(*args) - -def dislike(current_model, *args): - return current_model.dislike(*args) - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
              {highlighted_code}
              ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - raw = f'
              {html.escape(md_text)}
              ' - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - result.append(markdown(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - output = f'
              {result}
              ' - output += raw - output += ALREADY_CONVERTED_MARK - return output - - -def convert_asis(userinput): - return ( - f'

              {html.escape(userinput)}

              ' - + ALREADY_CONVERTED_MARK - ) - - -def detect_converted_mark(userinput): - try: - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - except: - return True - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def save_file(filename, system, history, chatbot, user_name): - logging.debug(f"{user_name} 保存对话历史中……") - os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - if "/" in filename or "\\" in filename: - history_file_path = filename - else: - history_file_path = os.path.join(HISTORY_DIR, user_name, filename) - with open(history_file_path, "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.debug(f"{user_name} 保存对话历史完毕") - return os.path.join(HISTORY_DIR, user_name, filename) - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - logging.debug(f"files are:{files}") - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False, user_name=""): - logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表") - if user_name == "" and hide_history_when_not_logged_in: - return "" - else: - return get_file_names(os.path.join(HISTORY_DIR, user_name), plain) - - -def load_template(filename, mode=0): - logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices - ) - - -def get_template_names(plain=False): - logging.debug("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - default_host = shared.state.reset_api_host() - retrieve_proxy("") - return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置" - - -def change_api_host(host): - shared.state.set_api_host(host) - msg = f"API-Host更改为了{host}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - retrieve_proxy(proxy) - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - try: - with retrieve_proxy(): - response = requests.get("https://ipapi.co/json/", timeout=5) - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - i18n("您的IP区域:未知。") - ) - else: - return i18n("获取IP地理位置失败。原因:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。") - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = i18n("您的IP区域:") + f"{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=False), - gr.Button.update(visible=True), - ) - - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode}""") - - return "" - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - message = f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode} - stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} - stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} - """ - raise RuntimeError(message) - return result.stdout.decode(encoding="utf8", errors="ignore") - -def versions_html(): - git = os.environ.get('GIT', "git") - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - try: - commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit_hash = "" - if commit_hash != "": - short_commit = commit_hash[0:7] - commit_info = f"{short_commit}" - else: - commit_info = "unknown \U0001F615" - return f""" - Python: {python_version} -  •  - Gradio: {gr.__version__} -  •  - ChuanhuChat: {commit_info} - """ - -def add_source_numbers(lst, source_name = "Source", use_source = True): - if use_source: - return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] - else: - return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] - -def add_details(lst): - nodes = [] - for index, txt in enumerate(lst): - brief = txt[:25].replace("\n", "") - nodes.append( - f"
              {brief}...

              {txt}

              " - ) - return nodes - - -def sheet_to_string(sheet, sheet_name = None): - result = [] - for index, row in sheet.iterrows(): - row_string = "" - for column in sheet.columns: - row_string += f"{column}: {row[column]}, " - row_string = row_string.rstrip(", ") - row_string += "." - result.append(row_string) - return result - -def excel_to_string(file_path): - # 读取Excel文件中的所有工作表 - excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None) - - # 初始化结果字符串 - result = [] - - # 遍历每一个工作表 - for sheet_name, sheet_data in excel_file.items(): - - # 处理当前工作表并添加到结果字符串 - result += sheet_to_string(sheet_data, sheet_name=sheet_name) - - - return result - -def get_last_day_of_month(any_day): - # The day 28 exists in every month. 4 days later, it's always next month - next_month = any_day.replace(day=28) + datetime.timedelta(days=4) - # subtracting the number of the current day brings us back one month - return next_month - datetime.timedelta(days=next_month.day) - -def get_model_source(model_name, alternative_source): - if model_name == "gpt2-medium": - return "https://huggingface.co/gpt2-medium" - -def refresh_ui_elements_on_load(current_model, selected_model_name, user_name): - current_model.set_user_identifier(user_name) - return toggle_like_btn_visibility(selected_model_name), *current_model.auto_load() - -def toggle_like_btn_visibility(selected_model_name): - if selected_model_name == "xmchat": - return gr.update(visible=True) - else: - return gr.update(visible=False) - -def new_auto_history_filename(dirname): - latest_file = get_latest_filepath(dirname) - if latest_file: - with open(os.path.join(dirname, latest_file), 'r') as f: - if len(f.read()) == 0: - return latest_file - now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') - return f'{now}.json' - -def get_latest_filepath(dirname): - pattern = re.compile(r'\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') - latest_time = None - latest_file = None - for filename in os.listdir(dirname): - if os.path.isfile(os.path.join(dirname, filename)): - match = pattern.search(filename) - if match and match.group(0) == filename[:19]: - time_str = filename[:19] - filetime = datetime.datetime.strptime(time_str, '%Y-%m-%d_%H-%M-%S') - if not latest_time or filetime > latest_time: - latest_time = filetime - latest_file = filename - return latest_file - -def get_history_filepath(username): - dirname = os.path.join(HISTORY_DIR, username) - os.makedirs(dirname, exist_ok=True) - latest_file = get_latest_filepath(dirname) - if not latest_file: - latest_file = new_auto_history_filename(dirname) - - latest_file = os.path.join(dirname, latest_file) - return latest_file diff --git a/spaces/scedlatioru/img-to-music/example/Asnafeadabbookfree REPACKdownload.md b/spaces/scedlatioru/img-to-music/example/Asnafeadabbookfree REPACKdownload.md deleted file mode 100644 index dc2ba31ac3306482814e60b64853a2da9b8c5846..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Asnafeadabbookfree REPACKdownload.md +++ /dev/null @@ -1,6 +0,0 @@ -

              asnafeadabbookfreedownload


              Download ———>>> https://gohhs.com/2uEAAF



              -
              - 3cee63e6c2
              -
              -
              -

              diff --git a/spaces/scedlatioru/img-to-music/example/Chegg Account Password Crack _HOT_.md b/spaces/scedlatioru/img-to-music/example/Chegg Account Password Crack _HOT_.md deleted file mode 100644 index a8235eb80a2eb376c131b4ba6a4b599dd8a9773a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Chegg Account Password Crack _HOT_.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Chegg account password crack


              Download File === https://gohhs.com/2uEzeX



              - - d5da3c52bf
              -
              -
              -

              diff --git a/spaces/scedlatioru/img-to-music/example/KungFuPanda3English2full [BEST]moviewithenglishsubtitlesdownloadtorrent.md b/spaces/scedlatioru/img-to-music/example/KungFuPanda3English2full [BEST]moviewithenglishsubtitlesdownloadtorrent.md deleted file mode 100644 index 9b97860f34524917a745f6dd565212218d2bcfaa..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/KungFuPanda3English2full [BEST]moviewithenglishsubtitlesdownloadtorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

              KungFuPanda3English2fullmoviewithenglishsubtitlesdownloadtorrent


              Downloadhttps://gohhs.com/2uEzFd



              - - d5da3c52bf
              -
              -
              -

              diff --git a/spaces/scedlatioru/img-to-music/example/Splitting Adam Movie Mp4 Downloa ((FREE)).md b/spaces/scedlatioru/img-to-music/example/Splitting Adam Movie Mp4 Downloa ((FREE)).md deleted file mode 100644 index d20a1c54c9163145a94cdcff1aec719d46e44cb9..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Splitting Adam Movie Mp4 Downloa ((FREE)).md +++ /dev/null @@ -1,7 +0,0 @@ - -

              the movie was directed by sydney pollack, who later made out of africa and the firm, as well as the interpreter and tootsie, and was a writer, producer, and director. it was also known as the american dream in russia. in the russian version, two of the terrorists are actually american, and their accomplices are russians.

              -

              filmy awards (bangkok) 11 hours ago open your heart ft. dj spaz mc (video) 12 hours ago hiloa & kama & breeze - walk away (video) 12 hours ago the sinking boat - many faces (video) 12 hours ago out of the woods (short film) 12 hours ago soul desire 12 hours ago trichopoli (short film) 12 hours ago the road to ravello (short film) 12 hours ago vida locura (short film) 12 hours ago the voices of the kings (short film) 12 hours ago christmas night (short film) 12 hours ago "blondel" (short film) 12 hours ago the legendary heiress (tv movie) 12 hours ago the scarlet pimpernel (tv series) 12 hours ago the magical pony-tail (tv movie) 12 hours ago the rainbow connection (tv movie) 12 hours ago the substitute (tv movie) 12 hours ago the day the earth caught fire (tv movie) 12 hours ago

              -

              Splitting Adam Movie Mp4 Downloa


              Download Filehttps://gohhs.com/2uEzXe



              -

              until youre able to stream the movie on your apple tv or fire tv or apple tv, you can watch it on your mobile device ( ios or android ) via various rental, purchase, and subscription options. to see a listing of all these streaming options, check out the sources below. then, if you find your favorite streaming service in the list, click on the service and follow the onscreen instructions. youll see a trailer or trailer-like video, a link to the service, and a box in which you can buy the movie, rent it, subscribe to it, or watch it via subscription.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/WinMend.Data.Recovery.v1.4.1.Incl.Keymaker-CORE Serial Key Keygen UPD.md b/spaces/scedlatioru/img-to-music/example/WinMend.Data.Recovery.v1.4.1.Incl.Keymaker-CORE Serial Key Keygen UPD.md deleted file mode 100644 index 98f4f5fc47e27d11812a99f2a21d233d536ca55c..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/WinMend.Data.Recovery.v1.4.1.Incl.Keymaker-CORE Serial Key Keygen UPD.md +++ /dev/null @@ -1,35 +0,0 @@ -
              -

              How to Recover Lost Data with WinMend Data Recovery v1.4.1

              -

              If you have ever lost important files due to accidental deletion, formatting, partition errors, or virus attacks, you know how frustrating and stressful it can be. Fortunately, there is a powerful and easy-to-use data recovery software that can help you restore your data in minutes. It is called WinMend Data Recovery v1.4.1.

              -

              WinMend.Data.Recovery.v1.4.1.Incl.Keymaker-CORE Serial Key keygen


              Downloadhttps://gohhs.com/2uEA8U



              -

              WinMend Data Recovery v1.4.1 is a Windows-based data recovery application that can recover data on FAT12/ FAT16/FAT32/NTFS/NTFS5 partitions in Windows operating systems. It can scan partitions in various hard drives, removable drives and even in data cards, and search and recover deleted or missing files, files lost during formatting or due to partition exceptions[^2^]. It also works with standard, external, and mobile drives.

              -

              WinMend Data Recovery v1.4.1 has a very simple and user-friendly interface. It offers two modes of data recovery: quick scan and full scan. Quick scan can help you recover files that have been recently deleted for faster access[^2^]. Full scan can perform a thorough scan of your drive for any recoverable items[^2^]. You can preview the files before recovering them and select the ones you want to restore.

              -

              WinMend Data Recovery v1.4.1 is a reliable and efficient data recovery tool that can restore formatted, accidentally removed, formatted or overwritten files from partitions of hard drives[^2^]. It uses the latest algorithms to ensure the accuracy and completeness of the recovery[^2^]. It can recover various types of files, such as documents, photos, videos, music, emails, archives, etc.

              -

              To use WinMend Data Recovery v1.4.1, you need to download it from the official website or from a trusted source[^1^]. You also need to obtain a serial key and a keygen to activate the software[^1^]. A serial key is a unique code that identifies your copy of the software and allows you to use its full features. A keygen is a program that generates valid serial keys for a specific software.

              -

              After downloading WinMend Data Recovery v1.4.1, you need to install it on your computer by following the instructions on the screen. Then, you need to run the keygen and copy the serial key it generates. Next, you need to launch WinMend Data Recovery v1.4.1 and enter the serial key when prompted. Finally, you can start using the software to recover your lost data.

              -

              WinMend Data Recovery v1.4.1 is an excellent tool for recovering lost or deleted files from hard drives. It is easy to use and provides a comprehensive scan of your drive for any recoverable items. It can restore formatted, accidentally removed, formatted or overwritten files from partitions of hard drives[^2^], and supports FAT12, FAT16, FAT32, NTFS or NTFS5 structures[^2^]. It also works with standard, external, and mobile drives[^2^]. If you are looking for a simple and effective data recovery solution, you should try WinMend Data Recovery v1.4.1.

              -

              - -

              How to Use WinMend Data Recovery v1.4.1

              -

              Using WinMend Data Recovery v1.4.1 is very easy and straightforward. You just need to follow these simple steps:

              -
                -
              1. Begin by installing the program on your computer. Make sure that you install the program on a separate drive to avoid overwriting the missing data[^3^].
              2. -
              3. In the main interface, select the mode of data recovery that suits your situation. You can choose from quick scan, full scan, or unformatting scan[^2^]. Quick scan can help you recover files that have been recently deleted for faster access[^2^]. Full scan can perform a thorough scan of your drive for any recoverable items[^2^]. Unformatting scan can help you recover data from a formatted drive[^4^].
              4. -
              5. In the next window, select the drive that contains the missing data and then click "Start"[^3^]. The program will start scanning the drive for any recoverable files.
              6. -
              7. Once the scan is complete, you will see a list of files that can be recovered. You can preview the files before recovering them and select the ones you want to restore[^2^]. You can also filter the files by name, size, type, or date[^2^].
              8. -
              9. Click on "Recover" and choose a destination folder to save the recovered files[^3^]. Make sure that you save the files to a different drive than the original one to avoid data loss[^3^]. Wait for the recovery process to finish.
              10. -
              -

              Congratulations! You have successfully recovered your lost data with WinMend Data Recovery v1.4.1.

              - -

              Tips and Tricks for Using WinMend Data Recovery v1.4.1

              -

              To make the most out of WinMend Data Recovery v1.4.1, here are some tips and tricks that you should keep in mind:

              -
                -
              • Stop using your computer as soon as you realize that you have lost some data. This will prevent further data loss and increase the chances of recovery[^3^].
              • -
              • Do not install or run WinMend Data Recovery v1.4.1 on the same drive that contains the missing data[^3^]. This will avoid overwriting the data and make it unrecoverable.
              • -
              • Do not save the recovered files to the same drive that contains the missing data[^3^]. This will also avoid overwriting the data and make it unrecoverable.
              • -
              • Use a reliable antivirus program to scan your computer for any virus or malware infections that may have caused data loss[^3^]. Remove any threats and protect your computer from future attacks.
              • -
              • Back up your important data regularly to an external drive or a cloud service[^3^]. This will help you avoid data loss in case of any unexpected situations.
              • -
              -

              WinMend Data Recovery v1.4.1 is a powerful and easy-to-use data recovery software that can help you restore your data in minutes. It can recover data on FAT12/ FAT16/FAT32/NTFS/NTFS5 partitions in Windows operating systems[^2^]. It can scan partitions in various hard drives, removable drives and even in data cards, and search and recover deleted or missing files, files lost during formatting or due to partition exceptions[^2^]. It also works with standard, external, and mobile drives[^2^]. If you are looking for a simple and effective data recovery solution, you should try WinMend Data Recovery v1.4.1.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet2/text/phoneme_tokenizer.py b/spaces/segments-tobias/conex/espnet2/text/phoneme_tokenizer.py deleted file mode 100644 index a1298a18cc65587220397362df679fed68272194..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/text/phoneme_tokenizer.py +++ /dev/null @@ -1,218 +0,0 @@ -from pathlib import Path -from typing import Iterable -from typing import List -from typing import Optional -from typing import Union - -import g2p_en -from typeguard import check_argument_types - -from espnet2.text.abs_tokenizer import AbsTokenizer - - -def split_by_space(text) -> List[str]: - return text.split(" ") - - -def pyopenjtalk_g2p(text) -> List[str]: - import pyopenjtalk - - # phones is a str object separated by space - phones = pyopenjtalk.g2p(text, kana=False) - phones = phones.split(" ") - return phones - - -def pyopenjtalk_g2p_accent(text) -> List[str]: - import pyopenjtalk - import re - - phones = [] - for labels in pyopenjtalk.run_frontend(text)[1]: - p = re.findall(r"\-(.*?)\+.*?\/A:([0-9\-]+).*?\/F:.*?_([0-9])", labels) - if len(p) == 1: - phones += [p[0][0], p[0][2], p[0][1]] - return phones - - -def pyopenjtalk_g2p_accent_with_pause(text) -> List[str]: - import pyopenjtalk - import re - - phones = [] - for labels in pyopenjtalk.run_frontend(text)[1]: - if labels.split("-")[1].split("+")[0] == "pau": - phones += ["pau"] - continue - p = re.findall(r"\-(.*?)\+.*?\/A:([0-9\-]+).*?\/F:.*?_([0-9])", labels) - if len(p) == 1: - phones += [p[0][0], p[0][2], p[0][1]] - return phones - - -def pyopenjtalk_g2p_kana(text) -> List[str]: - import pyopenjtalk - - kanas = pyopenjtalk.g2p(text, kana=True) - return list(kanas) - - -def pypinyin_g2p(text) -> List[str]: - from pypinyin import pinyin - from pypinyin import Style - - phones = [phone[0] for phone in pinyin(text, style=Style.TONE3)] - return phones - - -def pypinyin_g2p_phone(text) -> List[str]: - from pypinyin import pinyin - from pypinyin import Style - from pypinyin.style._utils import get_finals - from pypinyin.style._utils import get_initials - - phones = [ - p - for phone in pinyin(text, style=Style.TONE3) - for p in [ - get_initials(phone[0], strict=True), - get_finals(phone[0], strict=True), - ] - if len(p) != 0 - ] - return phones - - -class G2p_en: - """On behalf of g2p_en.G2p. - - g2p_en.G2p isn't pickalable and it can't be copied to the other processes - via multiprocessing module. - As a workaround, g2p_en.G2p is instantiated upon calling this class. - - """ - - def __init__(self, no_space: bool = False): - self.no_space = no_space - self.g2p = None - - def __call__(self, text) -> List[str]: - if self.g2p is None: - self.g2p = g2p_en.G2p() - - phones = self.g2p(text) - if self.no_space: - # remove space which represents word serapater - phones = list(filter(lambda s: s != " ", phones)) - return phones - - -class Phonemizer: - """Phonemizer module for various languages. - - This is wrapper module of https://github.com/bootphon/phonemizer. - You can define various g2p modules by specifying options for phonemizer. - - See available options: - https://github.com/bootphon/phonemizer/blob/master/phonemizer/phonemize.py#L32 - - """ - - def __init__( - self, - word_separator: Optional[str] = None, - syllable_separator: Optional[str] = None, - **phonemize_kwargs, - ): - # delayed import - from phonemizer import phonemize - from phonemizer.separator import Separator - - self.phonemize = phonemize - self.separator = Separator( - word=word_separator, syllable=syllable_separator, phone=" " - ) - self.phonemize_kwargs = phonemize_kwargs - - def __call__(self, text) -> List[str]: - return self.phonemize( - text, - separator=self.separator, - **self.phonemize_kwargs, - ).split() - - -class PhonemeTokenizer(AbsTokenizer): - def __init__( - self, - g2p_type: Union[None, str], - non_linguistic_symbols: Union[Path, str, Iterable[str]] = None, - space_symbol: str = "", - remove_non_linguistic_symbols: bool = False, - ): - assert check_argument_types() - if g2p_type is None: - self.g2p = split_by_space - elif g2p_type == "g2p_en": - self.g2p = G2p_en(no_space=False) - elif g2p_type == "g2p_en_no_space": - self.g2p = G2p_en(no_space=True) - elif g2p_type == "pyopenjtalk": - self.g2p = pyopenjtalk_g2p - elif g2p_type == "pyopenjtalk_kana": - self.g2p = pyopenjtalk_g2p_kana - elif g2p_type == "pyopenjtalk_accent": - self.g2p = pyopenjtalk_g2p_accent - elif g2p_type == "pyopenjtalk_accent_with_pause": - self.g2p = pyopenjtalk_g2p_accent_with_pause - elif g2p_type == "pypinyin_g2p": - self.g2p = pypinyin_g2p - elif g2p_type == "pypinyin_g2p_phone": - self.g2p = pypinyin_g2p_phone - elif g2p_type == "espeak_ng_arabic": - self.g2p = Phonemizer(language="ar", backend="espeak", with_stress=True) - else: - raise NotImplementedError(f"Not supported: g2p_type={g2p_type}") - - self.g2p_type = g2p_type - self.space_symbol = space_symbol - if non_linguistic_symbols is None: - self.non_linguistic_symbols = set() - elif isinstance(non_linguistic_symbols, (Path, str)): - non_linguistic_symbols = Path(non_linguistic_symbols) - with non_linguistic_symbols.open("r", encoding="utf-8") as f: - self.non_linguistic_symbols = set(line.rstrip() for line in f) - else: - self.non_linguistic_symbols = set(non_linguistic_symbols) - self.remove_non_linguistic_symbols = remove_non_linguistic_symbols - - def __repr__(self): - return ( - f"{self.__class__.__name__}(" - f'g2p_type="{self.g2p_type}", ' - f'space_symbol="{self.space_symbol}", ' - f'non_linguistic_symbols="{self.non_linguistic_symbols}"' - f")" - ) - - def text2tokens(self, line: str) -> List[str]: - tokens = [] - while len(line) != 0: - for w in self.non_linguistic_symbols: - if line.startswith(w): - if not self.remove_non_linguistic_symbols: - tokens.append(line[: len(w)]) - line = line[len(w) :] - break - else: - t = line[0] - tokens.append(t) - line = line[1:] - - line = "".join(tokens) - tokens = self.g2p(line) - return tokens - - def tokens2text(self, tokens: Iterable[str]) -> str: - # phoneme type is not invertible - return "".join(tokens) diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/docs/training_tips_ko.md b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/docs/training_tips_ko.md deleted file mode 100644 index 8b3b6245862aef69480f57263d268c94d5e843ca..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/docs/training_tips_ko.md +++ /dev/null @@ -1,53 +0,0 @@ -RVC 훈련에 대한 설명과 팁들 -====================================== -본 팁에서는 어떻게 데이터 훈련이 이루어지고 있는지 설명합니다. - -# 훈련의 흐름 -GUI의 훈련 탭의 단계를 따라 설명합니다. - -## step1 -실험 이름을 지정합니다. 또한, 모델이 피치(소리의 높낮이)를 고려해야 하는지 여부를 여기에서 설정할 수도 있습니다.. -각 실험을 위한 데이터는 `/logs/experiment name/`에 배치됩니다.. - -## step2a -음성 파일을 불러오고 전처리합니다. - -### 음성 파일 불러오기 -음성 파일이 있는 폴더를 지정하면 해당 폴더에 있는 음성 파일이 자동으로 가져와집니다. -예를 들어 `C:Users\hoge\voices`를 지정하면 `C:Users\hoge\voices\voice.mp3`가 읽히지만 `C:Users\hoge\voices\dir\voice.mp3`는 읽히지 않습니다. - -음성 로드에는 내부적으로 ffmpeg를 이용하고 있으므로, ffmpeg로 대응하고 있는 확장자라면 자동적으로 읽힙니다. -ffmpeg에서 int16으로 변환한 후 float32로 변환하고 -1과 1 사이에 정규화됩니다. - -### 잡음 제거 -음성 파일에 대해 scipy의 filtfilt를 이용하여 잡음을 처리합니다. - -### 음성 분할 -입력한 음성 파일은 먼저 일정 기간(max_sil_kept=5초?)보다 길게 무음이 지속되는 부분을 감지하여 음성을 분할합니다.무음으로 음성을 분할한 후에는 0.3초의 overlap을 포함하여 4초마다 음성을 분할합니다.4초 이내에 구분된 음성은 음량의 정규화를 실시한 후 wav 파일을 `/logs/실험명/0_gt_wavs`로, 거기에서 16k의 샘플링 레이트로 변환해 `/logs/실험명/1_16k_wavs`에 wav 파일로 저장합니다. - -## step2b -### 피치 추출 -wav 파일에서 피치(소리의 높낮이) 정보를 추출합니다. parselmouth나 pyworld에 내장되어 있는 메서드으로 피치 정보(=f0)를 추출해, `/logs/실험명/2a_f0`에 저장합니다. 그 후 피치 정보를 로그로 변환하여 1~255 정수로 변환하고 `/logs/실험명/2b-f0nsf`에 저장합니다. - -### feature_print 추출 -HuBERT를 이용하여 wav 파일을 미리 embedding으로 변환합니다. `/logs/실험명/1_16k_wavs`에 저장한 wav 파일을 읽고 HuBERT에서 wav 파일을 256차원 feature들로 변환한 후 npy 형식으로 `/logs/실험명/3_feature256`에 저장합니다. - -## step3 -모델의 훈련을 진행합니다. - -### 초보자용 용어 해설 -심층학습(딥러닝)에서는 데이터셋을 분할하여 조금씩 학습을 진행합니다.한 번의 모델 업데이트(step) 단계 당 batch_size개의 데이터를 탐색하여 예측과 오차를 수정합니다. 데이터셋 전부에 대해 이 작업을 한 번 수행하는 이를 하나의 epoch라고 계산합니다. - -따라서 학습 시간은 단계당 학습 시간 x (데이터셋 내 데이터의 수 / batch size) x epoch 수가 소요됩니다. 일반적으로 batch size가 클수록 학습이 안정적이게 됩니다. (step당 학습 시간 ÷ batch size)는 작아지지만 GPU 메모리를 더 많이 사용합니다. GPU RAM은 nvidia-smi 명령어를 통해 확인할 수 있습니다. 실행 환경에 따라 배치 크기를 최대한 늘리면 짧은 시간 내에 학습이 가능합니다. - -### 사전 학습된 모델 지정 -RVC는 적은 데이터셋으로도 훈련이 가능하도록 사전 훈련된 가중치에서 모델 훈련을 시작합니다. 기본적으로 `rvc-location/pretrained/f0G40k.pth` 및 `rvc-location/pretrained/f0D40k.pth`를 불러옵니다. 학습을 할 시에, 모델 파라미터는 각 save_every_epoch별로 `logs/experiment name/G_{}.pth` 와 `logs/experiment name/D_{}.pth`로 저장이 되는데, 이 경로를 지정함으로써 학습을 재개하거나, 다른 실험에서 학습한 모델의 가중치에서 학습을 시작할 수 있습니다. - -### index의 학습 -RVC에서는 학습시에 사용된 HuBERT의 feature값을 저장하고, 추론 시에는 학습 시 사용한 feature값과 유사한 feature 값을 탐색해 추론을 진행합니다. 이 탐색을 고속으로 수행하기 위해 사전에 index을 학습하게 됩니다. -Index 학습에는 근사 근접 탐색법 라이브러리인 Faiss를 사용하게 됩니다. `/logs/실험명/3_feature256`의 feature값을 불러와, 이를 모두 결합시킨 feature값을 `/logs/실험명/total_fea.npy`로서 저장, 그것을 사용해 학습한 index를`/logs/실험명/add_XXX.index`로 저장합니다. - -### 버튼 설명 -- モデルのトレーニング (모델 학습): step2b까지 실행한 후, 이 버튼을 눌러 모델을 학습합니다. -- 特徴インデックスのトレーニング (특징 지수 훈련): 모델의 훈련 후, index를 학습합니다. -- ワンクリックトレーニング (원클릭 트레이닝): step2b까지의 모델 훈련, feature index 훈련을 일괄로 실시합니다. \ No newline at end of file diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/ema.py b/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/ema.py deleted file mode 100644 index e5d61e90eadb4701c7c38d9ed63e4fca7afb78d9..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/ema.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -from torch import nn - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_updates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_updates - else torch.tensor(-1,dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - #remove as '.'-character is not allowed in buffers - s_name = name.replace('.','') - self.m_name2s_name.update({name:s_name}) - self.register_buffer(s_name,p.clone().detach().data) - - self.collected_params = [] - - def forward(self, model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/shravankumar147/IsCat/app.py b/spaces/shravankumar147/IsCat/app.py deleted file mode 100644 index e81e7e080649fb1f9c31c44d1b4fea9cb51763ac..0000000000000000000000000000000000000000 --- a/spaces/shravankumar147/IsCat/app.py +++ /dev/null @@ -1,19 +0,0 @@ -import gradio as gr -from fastai.vision.all import * - -def is_cat(x): return x[0].isupper() - -learn = load_learner('model.pkl') - -categories = ('Dog', 'Cat') - -def classify_img(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['dog.jpg', 'cat.jpg', 'dunno.jpg'] - -iface = gr.Interface(fn=classify_img, inputs=image, outputs=label, examples=examples) -iface.launch() \ No newline at end of file diff --git a/spaces/sidharthism/fashion-eye/models/__init__.py b/spaces/sidharthism/fashion-eye/models/__init__.py deleted file mode 100644 index 9941a7bb29d1b9a0a00f9cf90ddf2c48f1e38ed9..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2020 Erik Härkönen. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - -from .wrappers import * \ No newline at end of file diff --git a/spaces/silk-road/ChatHaruhi/characters/haruhi/gradio_header.md b/spaces/silk-road/ChatHaruhi/characters/haruhi/gradio_header.md deleted file mode 100644 index 2d62044576b8c991b1f19faae8b16cd6c967d92c..0000000000000000000000000000000000000000 --- a/spaces/silk-road/ChatHaruhi/characters/haruhi/gradio_header.md +++ /dev/null @@ -1,4 +0,0 @@ -## Chat凉宫春日 ChatHaruhi -项目地址 [https://github.com/LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) -骆驼项目地址 [https://github.com/LC1332/Luotuo-Chinese-LLM](https://github.com/LC1332/Luotuo-Chinese-LLM) -此版本为图文版本,非最终版本,将上线更多功能,敬请期待 \ No newline at end of file diff --git a/spaces/simonduerr/diffdock/esm/esm/inverse_folding/gvp_modules.py b/spaces/simonduerr/diffdock/esm/esm/inverse_folding/gvp_modules.py deleted file mode 100644 index 484d9d5d0d8a52153de1f557c698e400b6fb1dc4..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/diffdock/esm/esm/inverse_folding/gvp_modules.py +++ /dev/null @@ -1,473 +0,0 @@ -# Contents of this file are from the open source code for -# -# Jing, B., Eismann, S., Suriana, P., Townshend, R. J. L., & Dror, R. (2020). -# Learning from Protein Structure with Geometric Vector Perceptrons. In -# International Conference on Learning Representations. -# -# MIT License -# -# Copyright (c) 2020 Bowen Jing, Stephan Eismann, Patricia Suriana, Raphael Townshend, Ron Dror -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import typing as T -import torch -from torch import nn -import torch.nn.functional as F -from torch_geometric.nn import MessagePassing -from torch_scatter import scatter_add, scatter - -def tuple_size(tp): - return tuple([0 if a is None else a.size() for a in tp]) - -def tuple_sum(tp1, tp2): - s1, v1 = tp1 - s2, v2 = tp2 - if v2 is None and v2 is None: - return (s1 + s2, None) - return (s1 + s2, v1 + v2) - -def tuple_cat(*args, dim=-1): - ''' - Concatenates any number of tuples (s, V) elementwise. - - :param dim: dimension along which to concatenate when viewed - as the `dim` index for the scalar-channel tensors. - This means that `dim=-1` will be applied as - `dim=-2` for the vector-channel tensors. - ''' - dim %= len(args[0][0].shape) - s_args, v_args = list(zip(*args)) - return torch.cat(s_args, dim=dim), torch.cat(v_args, dim=dim) - -def tuple_index(x, idx): - ''' - Indexes into a tuple (s, V) along the first dimension. - - :param idx: any object which can be used to index into a `torch.Tensor` - ''' - return x[0][idx], x[1][idx] - -def randn(n, dims, device="cpu"): - ''' - Returns random tuples (s, V) drawn elementwise from a normal distribution. - - :param n: number of data points - :param dims: tuple of dimensions (n_scalar, n_vector) - - :return: (s, V) with s.shape = (n, n_scalar) and - V.shape = (n, n_vector, 3) - ''' - return torch.randn(n, dims[0], device=device), \ - torch.randn(n, dims[1], 3, device=device) - -def _norm_no_nan(x, axis=-1, keepdims=False, eps=1e-8, sqrt=True): - ''' - L2 norm of tensor clamped above a minimum value `eps`. - - :param sqrt: if `False`, returns the square of the L2 norm - ''' - # clamp is slow - # out = torch.clamp(torch.sum(torch.square(x), axis, keepdims), min=eps) - out = torch.sum(torch.square(x), axis, keepdims) + eps - return torch.sqrt(out) if sqrt else out - -def _split(x, nv): - ''' - Splits a merged representation of (s, V) back into a tuple. - Should be used only with `_merge(s, V)` and only if the tuple - representation cannot be used. - - :param x: the `torch.Tensor` returned from `_merge` - :param nv: the number of vector channels in the input to `_merge` - ''' - v = torch.reshape(x[..., -3*nv:], x.shape[:-1] + (nv, 3)) - s = x[..., :-3*nv] - return s, v - -def _merge(s, v): - ''' - Merges a tuple (s, V) into a single `torch.Tensor`, where the - vector channels are flattened and appended to the scalar channels. - Should be used only if the tuple representation cannot be used. - Use `_split(x, nv)` to reverse. - ''' - v = torch.reshape(v, v.shape[:-2] + (3*v.shape[-2],)) - return torch.cat([s, v], -1) - -class GVP(nn.Module): - ''' - Geometric Vector Perceptron. See manuscript and README.md - for more details. - - :param in_dims: tuple (n_scalar, n_vector) - :param out_dims: tuple (n_scalar, n_vector) - :param h_dim: intermediate number of vector channels, optional - :param activations: tuple of functions (scalar_act, vector_act) - :param tuple_io: whether to keep accepting tuple inputs and outputs when vi - or vo = 0 - ''' - def __init__(self, in_dims, out_dims, h_dim=None, vector_gate=False, - activations=(F.relu, torch.sigmoid), tuple_io=True, - eps=1e-8): - super(GVP, self).__init__() - self.si, self.vi = in_dims - self.so, self.vo = out_dims - self.tuple_io = tuple_io - if self.vi: - self.h_dim = h_dim or max(self.vi, self.vo) - self.wh = nn.Linear(self.vi, self.h_dim, bias=False) - self.ws = nn.Linear(self.h_dim + self.si, self.so) - if self.vo: - self.wv = nn.Linear(self.h_dim, self.vo, bias=False) - if vector_gate: - self.wg = nn.Linear(self.so, self.vo) - else: - self.ws = nn.Linear(self.si, self.so) - - self.vector_gate = vector_gate - self.scalar_act, self.vector_act = activations - self.eps = eps - - def forward(self, x): - ''' - :param x: tuple (s, V) of `torch.Tensor`, - or (if vectors_in is 0), a single `torch.Tensor` - :return: tuple (s, V) of `torch.Tensor`, - or (if vectors_out is 0), a single `torch.Tensor` - ''' - if self.vi: - s, v = x - v = torch.transpose(v, -1, -2) - vh = self.wh(v) - vn = _norm_no_nan(vh, axis=-2, eps=self.eps) - s = self.ws(torch.cat([s, vn], -1)) - if self.scalar_act: - s = self.scalar_act(s) - if self.vo: - v = self.wv(vh) - v = torch.transpose(v, -1, -2) - if self.vector_gate: - g = self.wg(s).unsqueeze(-1) - else: - g = _norm_no_nan(v, axis=-1, keepdims=True, eps=self.eps) - if self.vector_act: - g = self.vector_act(g) - v = v * g - else: - if self.tuple_io: - assert x[1] is None - x = x[0] - s = self.ws(x) - if self.scalar_act: - s = self.scalar_act(s) - if self.vo: - v = torch.zeros(list(s.shape)[:-1] + [self.vo, 3], - device=s.device) - - if self.vo: - return (s, v) - elif self.tuple_io: - return (s, None) - else: - return s - - -class _VDropout(nn.Module): - ''' - Vector channel dropout where the elements of each - vector channel are dropped together. - ''' - def __init__(self, drop_rate): - super(_VDropout, self).__init__() - self.drop_rate = drop_rate - - def forward(self, x): - ''' - :param x: `torch.Tensor` corresponding to vector channels - ''' - if x is None: - return None - device = x.device - if not self.training: - return x - mask = torch.bernoulli( - (1 - self.drop_rate) * torch.ones(x.shape[:-1], device=device) - ).unsqueeze(-1) - x = mask * x / (1 - self.drop_rate) - return x - -class Dropout(nn.Module): - ''' - Combined dropout for tuples (s, V). - Takes tuples (s, V) as input and as output. - ''' - def __init__(self, drop_rate): - super(Dropout, self).__init__() - self.sdropout = nn.Dropout(drop_rate) - self.vdropout = _VDropout(drop_rate) - - def forward(self, x): - ''' - :param x: tuple (s, V) of `torch.Tensor`, - or single `torch.Tensor` - (will be assumed to be scalar channels) - ''' - if type(x) is torch.Tensor: - return self.sdropout(x) - s, v = x - return self.sdropout(s), self.vdropout(v) - -class LayerNorm(nn.Module): - ''' - Combined LayerNorm for tuples (s, V). - Takes tuples (s, V) as input and as output. - ''' - def __init__(self, dims, tuple_io=True, eps=1e-8): - super(LayerNorm, self).__init__() - self.tuple_io = tuple_io - self.s, self.v = dims - self.scalar_norm = nn.LayerNorm(self.s) - self.eps = eps - - def forward(self, x): - ''' - :param x: tuple (s, V) of `torch.Tensor`, - or single `torch.Tensor` - (will be assumed to be scalar channels) - ''' - if not self.v: - if self.tuple_io: - return self.scalar_norm(x[0]), None - return self.scalar_norm(x) - s, v = x - vn = _norm_no_nan(v, axis=-1, keepdims=True, sqrt=False, eps=self.eps) - nonzero_mask = (vn > 2 * self.eps) - vn = torch.sum(vn * nonzero_mask, dim=-2, keepdim=True - ) / (self.eps + torch.sum(nonzero_mask, dim=-2, keepdim=True)) - vn = torch.sqrt(vn + self.eps) - v = nonzero_mask * (v / vn) - return self.scalar_norm(s), v - -class GVPConv(MessagePassing): - ''' - Graph convolution / message passing with Geometric Vector Perceptrons. - Takes in a graph with node and edge embeddings, - and returns new node embeddings. - - This does NOT do residual updates and pointwise feedforward layers - ---see `GVPConvLayer`. - - :param in_dims: input node embedding dimensions (n_scalar, n_vector) - :param out_dims: output node embedding dimensions (n_scalar, n_vector) - :param edge_dims: input edge embedding dimensions (n_scalar, n_vector) - :param n_layers: number of GVPs in the message function - :param module_list: preconstructed message function, overrides n_layers - :param aggr: should be "add" if some incoming edges are masked, as in - a masked autoregressive decoder architecture - ''' - def __init__(self, in_dims, out_dims, edge_dims, n_layers=3, - vector_gate=False, module_list=None, aggr="mean", eps=1e-8, - activations=(F.relu, torch.sigmoid)): - super(GVPConv, self).__init__(aggr=aggr) - self.eps = eps - self.si, self.vi = in_dims - self.so, self.vo = out_dims - self.se, self.ve = edge_dims - - module_list = module_list or [] - if not module_list: - if n_layers == 1: - module_list.append( - GVP((2*self.si + self.se, 2*self.vi + self.ve), - (self.so, self.vo), activations=(None, None))) - else: - module_list.append( - GVP((2*self.si + self.se, 2*self.vi + self.ve), out_dims, - vector_gate=vector_gate, activations=activations) - ) - for i in range(n_layers - 2): - module_list.append(GVP(out_dims, out_dims, - vector_gate=vector_gate)) - module_list.append(GVP(out_dims, out_dims, - activations=(None, None))) - self.message_func = nn.Sequential(*module_list) - - def forward(self, x, edge_index, edge_attr): - ''' - :param x: tuple (s, V) of `torch.Tensor` - :param edge_index: array of shape [2, n_edges] - :param edge_attr: tuple (s, V) of `torch.Tensor` - ''' - x_s, x_v = x - message = self.propagate(edge_index, - s=x_s, v=x_v.reshape(x_v.shape[0], 3*x_v.shape[1]), - edge_attr=edge_attr) - return _split(message, self.vo) - - def message(self, s_i, v_i, s_j, v_j, edge_attr): - v_j = v_j.view(v_j.shape[0], v_j.shape[1]//3, 3) - v_i = v_i.view(v_i.shape[0], v_i.shape[1]//3, 3) - message = tuple_cat((s_j, v_j), edge_attr, (s_i, v_i)) - message = self.message_func(message) - return _merge(*message) - - -class GVPConvLayer(nn.Module): - ''' - Full graph convolution / message passing layer with - Geometric Vector Perceptrons. Residually updates node embeddings with - aggregated incoming messages, applies a pointwise feedforward - network to node embeddings, and returns updated node embeddings. - - To only compute the aggregated messages, see `GVPConv`. - - :param node_dims: node embedding dimensions (n_scalar, n_vector) - :param edge_dims: input edge embedding dimensions (n_scalar, n_vector) - :param n_message: number of GVPs to use in message function - :param n_feedforward: number of GVPs to use in feedforward function - :param drop_rate: drop probability in all dropout layers - :param autoregressive: if `True`, this `GVPConvLayer` will be used - with a different set of input node embeddings for messages - where src >= dst - ''' - def __init__(self, node_dims, edge_dims, vector_gate=False, - n_message=3, n_feedforward=2, drop_rate=.1, - autoregressive=False, attention_heads=0, - conv_activations=(F.relu, torch.sigmoid), - n_edge_gvps=0, layernorm=True, eps=1e-8): - - super(GVPConvLayer, self).__init__() - if attention_heads == 0: - self.conv = GVPConv( - node_dims, node_dims, edge_dims, n_layers=n_message, - vector_gate=vector_gate, - aggr="add" if autoregressive else "mean", - activations=conv_activations, - eps=eps, - ) - else: - raise NotImplementedError - if layernorm: - self.norm = nn.ModuleList([LayerNorm(node_dims, eps=eps) for _ in range(2)]) - else: - self.norm = nn.ModuleList([nn.Identity() for _ in range(2)]) - self.dropout = nn.ModuleList([Dropout(drop_rate) for _ in range(2)]) - - ff_func = [] - if n_feedforward == 1: - ff_func.append(GVP(node_dims, node_dims, activations=(None, None))) - else: - hid_dims = 4*node_dims[0], 2*node_dims[1] - ff_func.append(GVP(node_dims, hid_dims, vector_gate=vector_gate)) - for i in range(n_feedforward-2): - ff_func.append(GVP(hid_dims, hid_dims, vector_gate=vector_gate)) - ff_func.append(GVP(hid_dims, node_dims, activations=(None, None))) - self.ff_func = nn.Sequential(*ff_func) - - self.edge_message_func = None - if n_edge_gvps > 0: - si, vi = node_dims - se, ve = edge_dims - module_list = [ - GVP((2*si + se, 2*vi + ve), edge_dims, vector_gate=vector_gate) - ] - for i in range(n_edge_gvps - 2): - module_list.append(GVP(edge_dims, edge_dims, - vector_gate=vector_gate)) - if n_edge_gvps > 1: - module_list.append(GVP(edge_dims, edge_dims, - activations=(None, None))) - self.edge_message_func = nn.Sequential(*module_list) - if layernorm: - self.edge_norm = LayerNorm(edge_dims, eps=eps) - else: - self.edge_norm = nn.Identity() - self.edge_dropout = Dropout(drop_rate) - - def forward(self, x, edge_index, edge_attr, - autoregressive_x=None, node_mask=None): - ''' - :param x: tuple (s, V) of `torch.Tensor` - :param edge_index: array of shape [2, n_edges] - :param edge_attr: tuple (s, V) of `torch.Tensor` - :param autoregressive_x: tuple (s, V) of `torch.Tensor`. - If not `None`, will be used as srcqq node embeddings - for forming messages where src >= dst. The corrent node - embeddings `x` will still be the base of the update and the - pointwise feedforward. - :param node_mask: array of type `bool` to index into the first - dim of node embeddings (s, V). If not `None`, only - these nodes will be updated. - ''' - if self.edge_message_func: - src, dst = edge_index - if autoregressive_x is None: - x_src = x[0][src], x[1][src] - else: - mask = (src < dst).unsqueeze(-1) - x_src = ( - torch.where(mask, x[0][src], autoregressive_x[0][src]), - torch.where(mask.unsqueeze(-1), x[1][src], - autoregressive_x[1][src]) - ) - x_dst = x[0][dst], x[1][dst] - x_edge = ( - torch.cat([x_src[0], edge_attr[0], x_dst[0]], dim=-1), - torch.cat([x_src[1], edge_attr[1], x_dst[1]], dim=-2) - ) - edge_attr_dh = self.edge_message_func(x_edge) - edge_attr = self.edge_norm(tuple_sum(edge_attr, - self.edge_dropout(edge_attr_dh))) - - if autoregressive_x is not None: - src, dst = edge_index - mask = src < dst - edge_index_forward = edge_index[:, mask] - edge_index_backward = edge_index[:, ~mask] - edge_attr_forward = tuple_index(edge_attr, mask) - edge_attr_backward = tuple_index(edge_attr, ~mask) - - dh = tuple_sum( - self.conv(x, edge_index_forward, edge_attr_forward), - self.conv(autoregressive_x, edge_index_backward, edge_attr_backward) - ) - - count = scatter_add(torch.ones_like(dst), dst, - dim_size=dh[0].size(0)).clamp(min=1).unsqueeze(-1) - - dh = dh[0] / count, dh[1] / count.unsqueeze(-1) - - else: - dh = self.conv(x, edge_index, edge_attr) - - if node_mask is not None: - x_ = x - x, dh = tuple_index(x, node_mask), tuple_index(dh, node_mask) - - x = self.norm[0](tuple_sum(x, self.dropout[0](dh))) - - dh = self.ff_func(x) - x = self.norm[1](tuple_sum(x, self.dropout[1](dh))) - - if node_mask is not None: - x_[0][node_mask], x_[1][node_mask] = x[0], x[1] - x = x_ - - return x, edge_attr diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 5.0.1 APK and Join the Action in Los Santos.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 5.0.1 APK and Join the Action in Los Santos.md deleted file mode 100644 index 91120d1834dbf4f4851c8bf2e7a18de025f511d6..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 5.0.1 APK and Join the Action in Los Santos.md +++ /dev/null @@ -1,153 +0,0 @@ - -

              GTA 5 5.0.1 APK Download: How to Play GTA 5 on Android, PC and Mac

              -

              Introduction

              -

              GTA 5 is one of the most popular and acclaimed video games of all time, with millions of fans around the world. The game offers an immersive and thrilling experience of living in the criminal underworld of Los Santos, a fictional city inspired by Los Angeles. You can play as one of the three main characters, Michael, Trevor, or Franklin, and switch between them at any time to see the story from different perspectives. You can also explore the vast open world of the game, which includes urban areas, beaches, mountains, deserts, and more. You can take part in various activities, such as missions, heists, races, fights, and more.

              -

              But what if you want to play GTA 5 on your mobile device? Or what if you want to enjoy the game on your PC or Mac with better graphics and performance? Well, there is a way to do that, and it is called GTA 5 5.0.1 APK.

              -

              gta 5 5.0.1 apk download


              Download File ✓✓✓ https://ssurll.com/2uNVxN



              -

              What is GTA 5?

              -

              GTA 5 is an action-adventure game developed by Rockstar Games and released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. The game is the fifth main installment in the Grand Theft Auto series, which is known for its controversial and satirical depiction of crime, violence, and social issues.

              -

              The game follows the lives of three protagonists who are involved in various criminal activities in Los Santos and its surrounding areas. The game features a nonlinear storyline that allows the player to choose how to complete missions and influence the outcome of events. The game also features a multiplayer mode called GTA Online, where up to 30 players can cooperate or compete in various modes.

              -

              What is GTA 5 5.0.1 APK?

              -

              GTA 5 5.0.1 APK is a modified version of the original GTA 5 game that allows you to play it on your Android device. It is not an official release by Rockstar Games, but rather a fan-made project that uses the game's assets and code to create a mobile version of the game.

              -

              The APK file is a package that contains all the necessary files and data to install and run the game on your device. You can download it from various websites that offer it for free or for a small fee. However, you should be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device or steal your personal information.

              -

              Why download GTA 5 5.0.1 APK?

              -

              There are many reasons why you might want to download GTA 5 5.0.1 APK for your Android device. Here are some of them:

              -
                -
              • You can play GTA 5 on your mobile device anytime and anywhere, without needing a console or a PC.
              • -
              • You can enjoy the same gameplay and features as the original game, such as missions, heists, vehicles, weapons, characters, etc.
              • -
              • You can customize the graphics settings according to your device's specifications and preferences.
              • -
              • You can save your progress and resume it later on any device that has the APK installed.
              • -
              • You can access some exclusive features that are not available in the original game, such as cheats, mods, skins, etc.
              • -
              -

              How to download GTA 5 5.0.1 APK for Android

              -

              Requirements

              -

              Before you download GTA 5 5.0.1 APK for your Android device, you need to make sure that your device meets the following requirements:

              -

              gta 5 mobile apk download for android
              -gta 5 apk obb download latest version
              -gta 5 apk free download full version
              -gta 5 android apk data download
              -gta 5 apk download highly compressed
              -gta 5 apk download for pc windows 10
              -gta 5 apk mod download unlimited money
              -gta 5 apk download offline play
              -gta 5 apk download no verification
              -gta 5 apk download with cheats
              -gta 5 apk download for ios
              -gta 5 apk download for mac
              -gta 5 apk download for laptop
              -gta 5 apk download online multiplayer
              -gta 5 apk download real graphics
              -gta 5 apk download update version
              -gta 5 apk download zip file
              -gta 5 apk download by rockstar games
              -gta 5 apk download bluestacks
              -gta 5 apk download beta version
              -gta 5 apk download cracked
              -gta 5 apk download direct link
              -gta 5 apk download easy install
              -gta 5 apk download fast and furious
              -gta 5 apk download google drive link
              -gta 5 apk download hack version
              -gta 5 apk download in parts
              -gta 5 apk download jio phone
              -gta 5 apk download key generator
              -gta 5 apk download low mb
              -gta 5 apk download mediafıre link
              -gta 5 apk download new update
              -gta 5 apk download original game
              -gta 5 apk download play store link
              -gta 5 apk download quora answer
              -gta 5 apk download revdl.com link
              -gta 5 apk download size mb
              -gta 5 apk download tamil tutorial
              -gta 5 apk download unlimited health
              -gta 5 apk download virus free link
              -gta 5 apk download with license key
              -gta 5 apk download xbox one controller support
              -gta 5 apk download youtube video link
              -gta 5 apkpure.com free download link
              -how to install and play GTA V on Android using APK file

              -
                -
              • Your device must have at least 4 GB of RAM and 3 GB of free storage space.
              • -
              • Your device must have Android 7.0 or higher operating system.
              • -
              • Your device must support OpenGL ES 3.0 or higher graphics API.
              • -
              • Your device must have a stable internet connection to download the APK file and the additional data files.
              • -
              • Your device must allow the installation of apps from unknown sources. You can enable this option in your device's settings, under security or privacy.
              • -
              -

              Steps

              -

              Once you have checked that your device meets the requirements, you can follow these steps to download and install GTA 5 5.0.1 APK on your Android device:

              -
                -
              1. Go to a reliable website that offers GTA 5 5.0.1 APK for download, such as [GTA5APK.com] or [APKPure.com].
              2. -
              3. Click on the download button and wait for the APK file to be downloaded on your device. The file size is about 36 MB.
              4. -
              5. After the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process.
              6. -
              7. Follow the instructions on the screen and grant the necessary permissions to the app.
              8. -
              9. Wait for the installation to finish and launch the app from your home screen or app drawer.
              10. -
              11. The app will ask you to download the additional data files, which are about 2.6 GB in size. You can choose to download them via Wi-Fi or mobile data, depending on your preference and availability.
              12. -
              13. After the data files are downloaded, the app will verify them and start loading the game.
              14. -
              15. You can now enjoy playing GTA 5 on your Android device!
              16. -
              -

              Tips and tricks

              -

              To make the most out of your GTA 5 5.0.1 APK experience, here are some tips and tricks that you can use:

              -
                -
              • You can adjust the graphics settings in the game's menu, under display options. You can choose from low, medium, high, or ultra settings, depending on your device's performance and battery life.
              • -
              • You can use cheats in the game by typing them in the in-game phone's dialer. Some of the cheats are: 199933284227 for invincibility, 199955015050 for all weapons, 199946236342 for fast run, etc. You can find more cheats online or by using a cheat app.
              • -
              • You can use mods in the game by downloading them from various websites or by using a mod app. Mods are modifications that add new features, vehicles, weapons, skins, etc. to the game. However, you should be careful when using mods, as they may cause glitches or crashes in the game.
              • -
              • You can save your progress in the game by using the quick save option in the game's menu, under game options. You can also use the auto save option, which saves your progress automatically after completing a mission or an activity.
              • -
              • You can switch between the three main characters in the game by tapping on their icons on the top left corner of the screen. You can also switch between first-person and third-person views by tapping on the camera icon on the top right corner of the screen.
              • -
              -

              How to play GTA 5 5.0.1 APK on PC and Mac

              -

              Requirements

              -

              If you want to play GTA 5 5.0.1 APK on your PC or Mac, you need to have the following requirements:

              -
                -
              • A PC or Mac with at least 8 GB of RAM and 4 GB of free storage space.
              • -
              • A Windows 7 or higher or a Mac OS X 10.9 or higher operating system.
              • -
              • A graphics card that supports OpenGL ES 3.0 or higher graphics API.
              • -
              • An Android emulator that can run GTA 5 5.0.1 APK, such as [BlueStacks] or [NoxPlayer].
              • -
              • A stable internet connection to download the emulator and the APK file and the additional data files.
              • -
              -

              Steps

              -

              To play GTA 5 5.0.1 APK on your PC or Mac, you need to follow these steps:

              -
                -
              1. Download and install an Android emulator of your choice on your PC or Mac. You can download them from their official websites or from other sources. For example, you can download BlueStacks from [BlueStacks.com] or NoxPlayer from [Bignox.com]. Follow the instructions on the screen and grant the necessary permissions to the emulator.
              2. -
              3. Download GTA 5 5.0.1 APK file from a reliable website, such as [GTA5APK.com] or [APKPure.com]. Save the file on your PC or Mac.
              4. -
              5. Launch the emulator and sign in with your Google account. If you don't have one, you can create one for free.
              6. -
              7. Locate the APK file on your PC or Mac and drag and drop it to the emulator's window. Alternatively, you can use the emulator's file manager to browse and install the APK file.
              8. -
              9. Wait for the installation to finish and launch the app from the emulator's home screen or app drawer.
              10. -
              11. The app will ask you to download the additional data files, which are about 2.6 GB in size. You can choose to download them via Wi-Fi or mobile data, depending on your preference and availability.
              12. -
              13. After the data files are downloaded, the app will verify them and start loading the game.
              14. -
              15. You can now play GTA 5 on your PC or Mac!
              16. -
              -

              Tips and tricks

              -

              To enhance your GTA 5 5.0.1 APK experience on your PC or Mac, here are some tips and tricks that you can use:

              -
                -
              • You can use your keyboard and mouse to control the game, or you can use a gamepad if you have one. You can customize the controls in the emulator's settings, under keyboard mapping or gamepad mapping.
              • -
              • You can use the emulator's features to improve the game's performance and graphics, such as turbo mode, high FPS mode, high resolution mode, etc. You can access these features in the emulator's settings, under engine or display.
              • -
              • You can use the emulator's screenshot and video recording tools to capture your gameplay and share it with your friends or online. You can access these tools in the emulator's toolbar, under tools or media manager.
              • -
              • You can use the emulator's multi-instance feature to run multiple instances of GTA 5 5.0.1 APK on your PC or Mac. This way, you can play with different characters, accounts, or modes at the same time. You can access this feature in the emulator's toolbar, under multi-instance manager.
              • -
              -

              Conclusion

              -

              Summary of the article

              -

              In this article, we have shown you how to download and play GTA 5 5.0.1 APK on your Android device, PC, or Mac. We have explained what GTA 5 5.0.1 APK is, why you might want to download it, and what are the requirements and steps to do so. We have also given you some tips and tricks to make the most out of your GTA 5 5.0.1 APK experience.

              -

              Call to action

              -

              If you are a fan of GTA 5 and want to enjoy it on your mobile device or computer, then you should definitely try GTA 5 5.0.1 APK. It is a free and easy way to play one of the best games ever made on any platform you want. So what are you waiting for? Download GTA 5 5.0.1 APK today and have fun!

              -

              FAQs

              -

              Is GTA 5 5.0.1 APK safe to download?

              -

              GTA 5 5.0.1 APK is generally safe to download if you get it from a reputable website that does not contain any malware or viruses. However, you should always be careful when downloading any APK file from unknown sources, as they may pose a risk to your device or personal information.

              -

              Is GTA 5 5.0.1 APK legal to download?

              -

              GTA 5 5.0.1 APK is not an official release by Rockstar Games, but rather a fan-made project that uses the game's assets and code to create a mobile version of the game. Therefore, it may not be legal to download in some countries or regions where GTA 5 is copyrighted or banned.

              -

              Does GTA 5 5.0.1 APK support GTA Online?

              -

              GTA 5 5.0.1 APK does not support GTA Online, which is the multiplayer mode of GTA 5 that allows up to 30 players to cooperate or compete in various modes online. If you want to play GTA Online, you need to have the original GTA 5 game on a console or a PC and a valid Rockstar Games account.

              -

              Can I play GTA 5 5.0.1 APK with my friends?

              -

              GTA 5 5.0.1 APK does not support online multiplayer, but you can still play with your friends locally by using a Wi-Fi hotspot or a Bluetooth connection. You can also use a third-party app or software that allows you to create a virtual LAN network and connect with your friends online.

              -

              How can I update GTA 5 5.0.1 APK?

              -

              GTA 5 5.0.1 APK is not an official release by Rockstar Games, so it does not receive regular updates or patches from the developer. However, you can check the website where you downloaded the APK file for any new versions or updates that may be available. You can also follow the social media accounts or forums of the fan-made project for any news or announcements.

              -

              How can I uninstall GTA 5 5.0.1 APK?

              -

              If you want to uninstall GTA 5 5.0.1 APK from your device, you can do so by following these steps:

              -
                -
              1. Go to your device's settings, under apps or applications.
              2. -
              3. Find and tap on GTA 5 5.0.1 APK.
              4. -
              5. Tap on uninstall and confirm your choice.
              6. -
              7. Delete the APK file and the data files from your device's storage.
              8. -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Truck Simulator Europe 2 and Deliver Cargo Across Impressive Distances.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Truck Simulator Europe 2 and Deliver Cargo Across Impressive Distances.md deleted file mode 100644 index 8957d5daf4bd641fbdb93a81e57aa892abfb3bd2..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Truck Simulator Europe 2 and Deliver Cargo Across Impressive Distances.md +++ /dev/null @@ -1,100 +0,0 @@ - -

              Download Truck Simulator Europe 2: A Guide for Truck Enthusiasts

              -

              Do you love trucks and driving? Do you dream of traveling across Europe and delivering cargo to different destinations? If you answered yes to these questions, then you might want to download Truck Simulator Europe 2, a popular simulation game that lets you experience the life of a truck driver. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, why you should play it, and some tips and tricks to help you succeed. Let's get started!

              -

              What is Truck Simulator Europe 2?

              -

              Truck Simulator Europe 2 is a game developed by WandA, a studio that specializes in creating realistic and immersive simulation games. It is the sequel to Truck Simulator Europe, which was released in 2017. The game is available for Android and iOS devices, as well as Windows PCs. You can download it for free from the Google Play Store, the App Store, or the official website . However, some features and content may require in-app purchases.

              -

              download truck simulator europe 2


              Downloadhttps://ssurll.com/2uNQIL



              -

              Features of the game

              -

              Truck Simulator Europe 2 has many features that make it one of the best truck simulation games on the market. Here are some of them:

              -
                -
              • You can choose from over 15 licensed truck brands and models, each with their own specifications and performance. You can also customize your truck with various parts, accessories, paint jobs, and decals.
              • -
              • You can drive across more than 60 European cities, covering over 20 countries. You can see famous landmarks, such as the Eiffel Tower, Big Ben, or the Colosseum, as well as realistic roads, bridges, tunnels, and tolls.
              • -
              • You can transport a wide variety of cargo types, such as food, furniture, chemicals, or livestock. You have to be careful not to damage your cargo or lose it on the way.
              • -
              • You can run your own trucking company, hiring drivers, buying garages, managing finances, and expanding your business. You can also compete with other players online and see who has the best reputation and income.
              • -
              • You can enjoy realistic driving physics, weather effects, day and night cycles, traffic rules, speed limits, fuel consumption, fatigue system, and more. You can also use different camera angles, including first-person view from the cockpit.
              • -
              -

              How to download and install the game

              -

              Downloading and installing Truck Simulator Europe 2 is easy and fast. Here are the steps you need to follow:

              -
                -
              1. Go to the Google Play Store, the App Store, or the official website and search for Truck Simulator Europe 2.
              2. -
              3. Tap on the Install button and wait for the game to download. The game size is about 300 MB.
              4. -
              5. Once the download is complete, tap on the Open button and launch the game.
              6. -
              7. Follow the instructions on the screen to set up your profile and preferences.
              8. -
              9. Enjoy playing Truck Simulator Europe 2!
              10. -
              -

              Why should you play Truck Simulator Europe 2?

              -

              If you are still not convinced that Truck Simulator Europe 2 is worth playing, here are some reasons why you should give it a try:

              -

              Experience realistic truck driving across Europe

              -

              Truck Simulator Europe

              Truck Simulator Europe 2 is a game that simulates the real-life challenges and rewards of truck driving across Europe. You can feel the thrill of driving a powerful truck on various roads and terrains, as well as the responsibility of delivering cargo safely and on time. You can also learn about the different cultures and landmarks of Europe, as well as the traffic rules and regulations of each country. You can immerse yourself in the realistic graphics, sounds, and physics of the game, and feel like you are actually behind the wheel of a truck.

              -

              Customize your own truck and company

              -

              Truck Simulator Europe 2 is not just about driving, but also about creating your own identity and style. You can choose from a wide range of truck brands and models, each with their own strengths and weaknesses. You can also customize your truck with various parts, accessories, paint jobs, and decals, to make it stand out from the crowd. You can also run your own trucking company, hiring drivers, buying garages, managing finances, and expanding your business. You can compete with other players online and see who has the best reputation and income. You can also join or create your own clans and chat with other truck enthusiasts.

              -

              Explore diverse landscapes and landmarks

              -

              Truck Simulator Europe 2 is a game that lets you travel across more than 60 European cities, covering over 20 countries. You can see famous landmarks, such as the Eiffel Tower, Big Ben, or the Colosseum, as well as realistic roads, bridges, tunnels, and tolls. You can also explore diverse landscapes and climates, such as mountains, forests, deserts, snow, rain, fog, and more. You can enjoy the changing scenery and weather effects as you drive along. You can also discover hidden roads and shortcuts that may save you time or money.

              -

              How to download truck simulator europe 2 for free
              -Download truck simulator europe 2 mod apk
              -Best truck simulator games for PC
              -Euro truck simulator 2 download full version
              -Download truck simulator europe 2 for android
              -Euro truck simulator 2 steam key
              -Download truck simulator europe 2 for mac
              -Euro truck simulator 2 DLCs
              -Download truck simulator europe 2 for windows 10
              -Euro truck simulator 2 system requirements
              -Download truck simulator europe 2 latest version
              -Euro truck simulator 2 multiplayer mod
              -Download truck simulator europe 2 offline installer
              -Euro truck simulator 2 cheats and hacks
              -Download truck simulator europe 2 demo
              -Euro truck simulator 2 review and rating
              -Download truck simulator europe 2 update patch
              -Euro truck simulator 2 tips and tricks
              -Download truck simulator europe 2 crack file
              -Euro truck simulator 2 map expansion
              -Download truck simulator europe 2 highly compressed
              -Euro truck simulator 2 mods and skins
              -Download truck simulator europe 2 from official website
              -Euro truck simulator 2 gameplay and features
              -Download truck simulator europe 2 torrent link
              -Euro truck simulator 2 best trucks and trailers
              -Download truck simulator europe 2 on steam
              -Euro truck simulator 2 online play
              -Download truck simulator europe 2 for linux
              -Euro truck simulator 2 VR support
              -Download truck simulator europe 2 for ios
              -Euro truck simulator 2 custom music and radio stations
              -Download truck simulator europe 2 for ps4
              -Euro truck simulator 2 achievements and trophies
              -Download truck simulator europe 2 for xbox one
              -Euro truck simulator 2 screenshots and videos
              -Download truck simulator europe 2 for switch
              -Euro truck simulator 2 news and updates
              -Download truck simulator europe 2 for windows phone
              -Euro truck simulator 2 community and forums

              -

              Tips and tricks for playing Truck Simulator Europe 2

              -

              If you want to succeed in Truck Simulator Europe 2, you need to master some skills and strategies. Here are some tips and tricks that may help you:

              -

              Use the map and GPS to plan your routes

              -

              Before you start a delivery job, you should check the map and GPS to see the best route to your destination. You should consider factors such as distance, time, traffic, tolls, fuel stations, rest areas, and weather. You should also check the cargo type, weight, size, and value. You should choose a route that is safe, efficient, and profitable. You should also follow the directions of the GPS voice navigation system to avoid getting lost or missing turns.

              -

              Follow the traffic rules and avoid fines

              -

              When you are driving on the road, you should follow the traffic rules and regulations of each country. You should obey the speed limits, traffic lights, signs, signs, and signals. You should also respect the other vehicles and pedestrians on the road. You should avoid driving recklessly, such as speeding, overtaking, running red lights, or crashing. You should also avoid driving under the influence of alcohol or drugs. If you break the traffic rules, you may get fined by the police or lose your license. You may also damage your truck or cargo, which will cost you money and reputation.

              -

              Manage your fuel, fatigue, and cargo damage

              -

              When you are driving on the road, you should also manage your fuel, fatigue, and cargo damage. You should monitor your fuel gauge and refill your tank at the nearest fuel station when it is low. You should also monitor your fatigue level and rest at the nearest rest area when it is high. You should also monitor your cargo damage and repair your truck at the nearest service station when it is high. If you run out of fuel, get too tired, or damage your cargo too much, you may fail your delivery job or lose money and reputation.

              -

              Upgrade your skills and unlock new opportunities

              -

              When you complete delivery jobs, you will earn money and experience points. You can use the money to buy new trucks, upgrade your existing trucks, or expand your company. You can use the experience points to level up and unlock new skills. There are four skill categories: long distance, heavy cargo, fragile cargo, and urgent delivery. Each skill category has six levels, and each level gives you a bonus or a perk. For example, if you level up your long distance skill, you can access longer routes and earn more money. If you level up your heavy cargo skill, you can transport heavier loads and earn more money. If you level up your fragile cargo skill, you can reduce the damage to your cargo and earn more money. If you level up your urgent delivery skill, you can access faster routes and earn more money.

              -

              Conclusion

              -

              Truck Simulator Europe 2 is a game that lets you experience the life of a truck driver across Europe. You can drive realistic trucks, transport various cargoes, run your own company, and compete with other players online. You can also customize your truck, explore diverse landscapes and landmarks, and learn about the traffic rules and cultures of different countries. You can download the game for free from the Google Play Store, the App Store, or the official website . However, some features and content may require in-app purchases.

              -

              If you are a truck enthusiast or a simulation game fan, you should definitely try Truck Simulator Europe 2. It is a fun and challenging game that will keep you entertained for hours. It is also a great way to learn about Europe and its geography, history, and culture. So what are you waiting for? Download Truck Simulator Europe 2 today and start your trucking adventure!

              -

              FAQs

              -
                -
              • Q: How do I save my progress in Truck Simulator Europe 2?
              • -
              • A: The game automatically saves your progress every time you complete a delivery job or exit the game. You can also manually save your progress by tapping on the menu button on the top left corner of the screen and then tapping on the save button.
              • -
              • Q: How do I change the language of Truck Simulator Europe 2?
              • -
              • A: The game supports multiple languages, such as English, French, German, Spanish, Italian, Portuguese, Turkish, Russian, Russian, and more. You can change the language of the game by tapping on the menu button on the top left corner of the screen and then tapping on the settings button. Then, you can select the language option and choose your preferred language.
              • -
              • Q: How do I connect with other players in Truck Simulator Europe 2?
              • -
              • A: The game has a multiplayer mode that allows you to connect with other players online. You can join or create your own clans and chat with other truck enthusiasts. You can also compete with other players in leaderboards and rankings. To access the multiplayer mode, you need to have an internet connection and a registered account.
              • -
              • Q: How do I get more money and experience in Truck Simulator Europe 2?
              • -
              • A: The main way to get more money and experience in the game is to complete delivery jobs. You can choose from different types of jobs, such as long distance, heavy cargo, fragile cargo, or urgent delivery. Each job has a different payout and difficulty level. You can also get more money and experience by completing achievements, challenges, and events. You can also watch ads or make in-app purchases to get more money and experience.
              • -
              • Q: How do I contact the developers of Truck Simulator Europe 2?
              • -
              • A: If you have any questions, feedback, suggestions, or issues with the game, you can contact the developers of Truck Simulator Europe 2 by emailing them at support@wanda.com or visiting their Facebook page. They will try to respond to you as soon as possible.
              • -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download the Latest Worship Hit King of Kings by Chandler Moore Essential Worship.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download the Latest Worship Hit King of Kings by Chandler Moore Essential Worship.md deleted file mode 100644 index 3016c82e0d9a575a1e5e2a28258d746d64d463aa..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download the Latest Worship Hit King of Kings by Chandler Moore Essential Worship.md +++ /dev/null @@ -1,95 +0,0 @@ - -

              How to Download King of Kings by Chandler Moore

              -

              If you are looking for a powerful and uplifting worship song, you might want to check out King of Kings by Chandler Moore. This song celebrates the majesty and glory of Jesus Christ, who came to earth as a humble baby, died on the cross for our sins, and rose again in victory. Whether you want to sing along, meditate, or just enjoy the beautiful melody, this song will inspire you and draw you closer to God.

              -

              download king of kings by chandler moore


              Download Zip - https://ssurll.com/2uNUCc



              -

              But how can you download this song and listen to it anytime and anywhere? In this article, we will show you how to buy or get this song for free from different sources, and how to transfer it to your device for offline listening. Let's get started!

              -

              What is King of Kings?

              -

              King of Kings is a worship song performed by Chandler Moore, a singer-songwriter and worship leader who is part of Maverick City Music. The song was written by Jason Ingram, Brooke Ligertwood, and Scott Ligertwood, and was originally recorded by Hillsong Worship in 2019.

              -

              The song tells the story of God's plan of salvation through Jesus Christ, from the creation to the cross to the church. It praises God as the Father, the Son, and the Holy Spirit, who is worthy of all honor and glory. The song also declares that we are free from sin and death because of Jesus' resurrection, and that we are called to share the gospel with the world.

              -

              The song has a catchy tune and a powerful chorus that invites us to join in worship. Here are some of the lyrics:

              -
              Praise the Father Praise the Son Praise the Spirit three in one God of glory Majesty Praise forever to the King of Kings 
              -

              Where can you buy King of Kings?

              -

              If you want to support Chandler Moore and his ministry, you can buy his version of King of Kings from various platforms. Here are some of the options:

              -

              download mp3 king of kings chandler moore
              -chandler moore king of kings official music video
              -king of kings by chandler moore and essential worship
              -how to download king of kings chandler moore song
              -chandler moore king of kings lyrics and chords
              -king of kings chandler moore itunes store
              -download king of kings chandler moore worship song
              -chandler moore king of kings youtube video
              -king of kings by chandler moore and essential music publishing
              -download king of kings chandler moore single
              -chandler moore king of kings free charts
              -king of kings by chandler moore and jason ingram
              -download king of kings chandler moore instrumental
              -chandler moore king of kings live performance
              -king of kings by chandler moore and brooke ligertwood
              -download king of kings chandler moore audio
              -chandler moore king of kings spotify playlist
              -king of kings by chandler moore and scott ligertwood
              -download king of kings chandler moore feat el grace
              -chandler moore king of kings guitar tabs
              -king of kings by chandler moore and essential worship newsletter
              -download king of kings chandler moore cover
              -chandler moore king of kings piano tutorial
              -king of kings by chandler moore and maci shingleton
              -download king of kings chandler moore karaoke
              -chandler moore king of kings drum lesson
              -king of kings by chandler moore and the cox brothers
              -download king of kings chandler moore ringtone
              -chandler moore king of kings bass tabs
              -king of kings by chandler moore and meg hutchinson
              -download king of kings chandler moore remix
              -chandler moore king of kings violin sheet music
              -king of kings by chandler moore and laura epling
              -download king of kings chandler moore acoustic version
              -chandler moore king of kings cello score
              -king of kings by chandler moore and avery bright
              -download king of kings chandler moore background track
              -chandler moore king of kings viola part
              -king of kings by chandler moore and lila crosswhite
              -download king of kings chandler moore medley
              -chandler moore king of kings flute notes
              -king of kings by chandler moore and maggie chafee

              -
                -
              • iTunes: You can buy the song for $1.29 from iTunes Store on your computer or smartphone. You will need an Apple ID account and a payment method to do so. You can also stream the song with an Apple Music subscription.
              • -
              • Google Play: You can buy the song for $1.29 from Google Play Music on your computer or Android device. You will need a Google account and a payment method to do so. You can also stream the song with a YouTube Music subscription.
              • -
              • Amazon: You can buy the song for $1.29 from Amazon Music on your computer or smartphone. You will need an Amazon account and a payment method to do so. You can also stream the song with an Amazon Music Unlimited subscription.
              • -
              -

              How to download King of Kings for free?

              -

              If you don't want to pay for the song, you can still download it for free from some websites that offer free music downloads. However, you need to be careful about the legality and safety of these sites, as some of them may contain viruses or malware, or violate copyright laws.

              -

              One way to download King of Kings for free is to use YouTube or Sound. Cloud as a source. YouTube and SoundCloud are popular platforms where you can find and listen to music online. You can also download the songs from these sites using some tools that allow you to convert the videos or audios to MP3 files. Here are the steps to do so:

              -
                -
              1. Find the song on YouTube or SoundCloud: Search for King of Kings by Chandler Moore on either platform and choose the video or audio that you want to download.
              2. -
              3. Copy the URL of the song: Right-click on the video or audio and select Copy video URL or Copy link address.
              4. -
              5. Paste the URL into a converter tool: Go to a website that offers free YouTube or SoundCloud to MP3 conversion, such as ytmp3.cc or scdownloader.io. Paste the URL into the input box and click Convert or Download.
              6. -
              7. Download the MP3 file: After the conversion is done, you will see a link to download the MP3 file. Click on it and save it to your computer or smartphone.
              8. -
              -

              How to enjoy King of Kings offline?

              -

              Now that you have downloaded King of Kings, you can enjoy it offline without an internet connection. However, you may need to transfer the song to your device if you downloaded it on your computer. Here are some ways to do that:

              -
                -
              • Via USB cable: Connect your smartphone to your computer using a USB cable. Locate the MP3 file on your computer and drag and drop it to your device's music folder.
              • -
              • Via Bluetooth: Enable Bluetooth on both your computer and smartphone. Pair them and send the MP3 file from your computer to your device.
              • -
              • Via cloud storage: Upload the MP3 file to a cloud storage service, such as Google Drive or Dropbox. Access the service from your smartphone and download the file to your device.
              • -
              -

              Once you have transferred the song to your device, you can play it with any music player app that supports MP3 format. You can also create a playlist with other worship songs and listen to them anytime you want.

              -

              Conclusion

              -

              In this article, we have shown you how to download King of Kings by Chandler Moore, a worship song that celebrates the greatness of God and his love for us. We have compared different platforms where you can buy the song, as well as how to get it for free from YouTube or SoundCloud. We have also explained how to transfer the song to your device and enjoy it offline.

              -

              We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. And don't forget to share this article with your friends and family who might be interested in this song.

              -

              Now, go ahead and download King of Kings by Chandler Moore and let it fill your heart with joy and gratitude. Praise forever to the King of Kings!

              -

              FAQs

              -
                -
              • Who is Chandler Moore?
              • -

                Chandler Moore is a singer-songwriter and worship leader who is part of Maverick City Music, a collective of artists who create gospel-centered music. He has also collaborated with other artists, such as Travis Greene, Tasha Cobbs Leonard, and Elevation Worship.

                -
              • What is Maverick City Music?
              • -

                Maverick City Music is a group of singers, songwriters, musicians, and producers who aim to create authentic and diverse worship music that reflects God's heart. They started in 2018 as a dream to write songs with their friends, and have since released several albums and singles that have reached millions of listeners.

                -
              • What is the difference between King of Kings by Chandler Moore and King of Kings by Hillsong Worship?
              • -

                The main difference is that Chandler Moore's version is a live performance that features his own vocals and style, while Hillsong Worship's version is a studio recording that features Brooke Ligertwood's vocals and arrangement. Both versions have the same lyrics and melody, but they may have some variations in tempo, key, and instrumentation.

                -
              • How can I watch the video of King of Kings by Chandler Moore?
              • -

                You can watch the video of King of Kings by Chandler Moore on YouTube. The video was uploaded by Maverick City Music on December 25, 2020, as part of their Christmas album. The video shows Chandler Moore singing the song with a band and a choir in front of a Christmas-themed backdrop. You can find the video here: [King of Kings (feat. Chandler Moore) - Maverick City | TRIBL].

                -
              • How can I learn more about Chandler Moore and his music?
              • -

                You can follow Chandler Moore on his social media accounts, such as Instagram, Twitter, and Facebook, where he posts updates and insights about his life and ministry. You can also visit his website, chandlerdmoore.com, where you can find his bio, music, videos, events, and contact information. You can also subscribe to his YouTube channel, where he uploads his songs, covers, and vlogs.

                -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Various Types of Vehicles in Driver Jobs Online Simulator APK.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Various Types of Vehicles in Driver Jobs Online Simulator APK.md deleted file mode 100644 index 463e38959a3d8523b25117f95713327544e9f441..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Various Types of Vehicles in Driver Jobs Online Simulator APK.md +++ /dev/null @@ -1,223 +0,0 @@ - -

              Driver Jobs Online Simulator: A Review of the Game and How to Download It

              -

              If you are looking for a realistic and immersive driving simulation game, you might want to check out Driver Jobs Online Simulator. This game lets you work as a driver of various types of vehicles, such as trucks, buses, cars, and vans. You can transport cargo, passengers, or race against other players online. You can also customize your vehicles and explore different cities and routes. In this article, we will review the game and show you how to download it for your Android device.

              -

              What is Driver Jobs Online Simulator?

              -

              Driver Jobs Online Simulator is an online vehicle simulation game developed by Dynamic Games Ltda. It was released in March 2021 and has over one million downloads on Google Play Store. It has a rating of 4.0 out of 5 stars based on over 60,000 user reviews.

              -

              driver jobs online simulator apk download


              Download ——— https://ssurll.com/2uNUbz



              -

              The concept and features of the game

              -

              The game is based on the idea of working as a driver of different vehicles and completing various tasks. You can choose from four categories of vehicles: trucks, buses, cars, and vans. Each category has its own challenges and rewards. You can earn money by delivering cargo, taking passengers, or racing. You can use the money to buy new vehicles or upgrade your existing ones.

              -

              The game also has many features that make it more realistic and fun. For example, you can:

              -
                -
              • Drive in different weather conditions, such as rain, snow, fog, or night.
              • -
              • Follow the traffic rules and signals, or face penalties.
              • -
              • Use the GPS navigation system to find your destination.
              • -
              • Refuel your vehicle at gas stations.
              • -
              • Repair your vehicle at workshops.
              • -
              • Park your vehicle at designated spots.
              • -
              -

              The types of vehicles and jobs available

              -

              The game offers a variety of vehicles to choose from, each with its own specifications and performance. You can drive trucks, buses, cars, or vans. Some examples are:

              - - - - - - - - - - - - - - - - - - - - - - - - - - -
              VehicleDescriptionJob
              TruckA large vehicle used for transporting goods.Transport cargo and deliver to large companies.
              BusA large vehicle used for carrying passengers.Take passengers safely to other cities.
              CarA small vehicle used for personal transportation.Take passengers to the most varied locations, participate in races.
              VanA medium-sized vehicle used for carrying goods or people.Load your vehicle and make multiple deliveries in cities.
              -

              You can also modify your vehicles by changing their color, wheels, engine, suspension, brakes, or accessories. You can also add stickers or decals to personalize them.

              -

              The online and offline modes

              -

              The game has two modes: online and offline. In the online mode, you can play with your friends or other players

              around the world. You can chat with them, join races, or cooperate in missions. You can also create your own private room and invite your friends to join. The online mode requires an internet connection and a Google Play Games account.

              -

              driver jobs online simulator game download
              -driver jobs online simulator android apk
              -driver jobs online simulator free download
              -driver jobs online simulator mod apk download
              -driver jobs online simulator latest version apk
              -driver jobs online simulator 2022 apk download
              -driver jobs online simulator multiplayer apk
              -driver jobs online simulator offline apk download
              -driver jobs online simulator hack apk download
              -driver jobs online simulator unlimited money apk
              -driver jobs online simulator pc download
              -driver jobs online simulator ios download
              -driver jobs online simulator update download
              -driver jobs online simulator beta apk download
              -driver jobs online simulator cheats apk
              -driver jobs online simulator gameplay download
              -driver jobs online simulator truck driving apk
              -driver jobs online simulator bus driving apk
              -driver jobs online simulator car driving apk
              -driver jobs online simulator van driving apk
              -driver jobs online simulator dynamic games apk
              -driver jobs online simulator world truck apk
              -driver jobs online simulator world bus apk
              -driver jobs online simulator racing apk
              -driver jobs online simulator delivery apk
              -driver jobs online simulator review download
              -driver jobs online simulator tips and tricks apk
              -driver jobs online simulator guide download
              -driver jobs online simulator tutorial download
              -driver jobs online simulator walkthrough download
              -driver jobs online simulator best vehicles apk
              -driver jobs online simulator customization apk
              -driver jobs online simulator graphics download
              -driver jobs online simulator realistic apk
              -driver jobs online simulator fun apk download
              -driver jobs online simulator new features download
              -driver jobs online simulator challenges apk
              -driver jobs online simulator missions apk
              -driver jobs online simulator levels download
              -driver jobs online simulator rewards apk
              -driver jobs online simulator codes download
              -driver jobs online simulator generator apk
              -driver jobs online simulator installer download
              -driver jobs online simulator emulator download
              -driver jobs online simulator cracked apk download
              -driver jobs online simulator premium apk download
              -driver jobs online simulator pro apk download
              -driver jobs online simulator full version apk download
              -driver jobs online simulator unlocked apk download

              -

              In the offline mode, you can play without an internet connection. You can still enjoy the game features and complete the jobs. You can also earn money and experience points. The offline mode is ideal for practicing your driving skills or exploring the map.

              -

              How to download Driver Jobs Online Simulator APK for Android?

              -

              If you want to play Driver Jobs Online Simulator on your Android device, you have two options: you can download it from the Google Play Store or from an APK file. An APK file is an Android application package that contains all the files and data needed to install an app on your device. Here are the steps to download and install the game from APKPure.com, one of the most popular and trusted sources of APK files.

              -

              The steps to download and install the game from APKPure.com

              -
                -
              1. Go to APKPure.com on your device's browser.
              2. -
              3. Search for "Driver Jobs Online Simulator" in the search bar.
              4. -
              5. Select the game from the results and tap on "Download APK".
              6. -
              7. Wait for the download to finish and then open the file.
              8. -
              9. If prompted, enable the installation of apps from unknown sources in your device's settings.
              10. -
              11. Follow the instructions on the screen to install the game.
              12. -
              13. Launch the game and enjoy!
              14. -
              -

              The benefits and risks of downloading APK files

              -

              Downloading APK files can have some advantages and disadvantages. Some of the benefits are:

              -
                -
              • You can access apps that are not available in your region or device.
              • -
              • You can get the latest updates and features before they are released on the official app store.
              • -
              • You can save data and storage space by downloading smaller files.
              • -
              -

              Some of the risks are:

              -
                -
              • You may download malicious or fake apps that can harm your device or steal your data.
              • -
              • You may violate the terms and conditions of the app developer or publisher.
              • -
              • You may encounter compatibility or performance issues with your device or other apps.
              • -
              -

              To avoid these risks, you should always download APK files from reputable and verified sources, such as APKPure.com. You should also scan the files with an antivirus software before installing them. You should also backup your data and uninstall any unwanted or suspicious apps from your device.

              -

              The system requirements and compatibility issues

              -

              Before downloading Driver Jobs Online Simulator APK for Android, you should check if your device meets the minimum system requirements for the game. According to APKPure.com, these are:

              -
                -
              • Android version: 5.0 or higher
              • -
              • RAM: 2 GB or more
              • -
              • Storage: 1 GB or more
              • -
              • Internet connection: required for online mode
              • -
              -

              If your device does not meet these requirements, you may not be able to install or run the game properly. You may also experience crashes, lags, glitches, or errors. You may also need to update your device's software or firmware to ensure compatibility with the game.

              -

              What are the pros and cons of Driver Jobs Online Simulator?

              -

              Driver Jobs Online Simulator is a fun and realistic driving simulation game that offers a lot of variety and challenge. However, it also has some drawbacks and limitations that you should be aware of. Here are some of the pros and cons of the game based on our review and user feedback.

              -

              The advantages of playing the game

              -

              Some of the advantages of playing Driver Jobs Online Simulator are:

              -
                -
              • You can experience different types of vehicles and jobs in a realistic environment.
              • -
              • You can customize your vehicles and improve their performance.
              • -
              • You can explore different cities and routes with different weather conditions and traffic situations.
              • -
              • You can play with your friends or other players online in various modes and events.
              • -
              • You can learn and practice your driving skills and knowledge.
              • -
              -

              The disadvantages and limitations of the game

              -

              Some of the disadvantages and limitations of Driver Jobs Online Simulator are:

              -
                -
              • The game has ads that may interrupt your gameplay or consume your data.
              • -
              • The game has in-app purchases that may require real money to unlock some features or items.
              • -
              • The game has bugs and glitches that may affect your gameplay or cause crashes.
              • -
              • The game has high system requirements that may not run smoothly or at all on some devices.
              • -
              • The game has limited content and variety compared to other driving simulation games.
              • -
              -

              The user reviews and ratings of the game

              -

              To get a better idea of what other players think of Driver Jobs Online Simulator, we looked at some of the user reviews and ratings of the game on Google Play Store. Here are some of the positive and negative comments we found:

              - - - - - - - - - - - - - - - - - -
              PositiveNegative
              "This game is awesome. I love the graphics and the gameplay. The online mode is very fun and competitive. The vehicles are very realistic and customizable. I recommend this game to anyone who likes driving simulation games.""This game is terrible. The ads are annoying and intrusive. The game crashes a lot and freezes my device. The vehicles are hard to control and the physics are unrealistic. The online mode is full of hackers and cheaters. I do not recommend this game to anyone."
              "This game is very good. I like the concept and the features. The game is challenging and rewarding. The vehicles are varied and well-designed. The game is updated regularly and the developers are responsive.""This game is boring. The game is repetitive and easy. The vehicles are expensive and unbalanced. The game is not updated often and the developers are unresponsive."
              "This game is amazing. I enjoy the graphics and the sound effects. The online mode is very interactive and cooperative. The vehicles are fun to drive and modify. The game is worth downloading and playing.""This game is disappointing. I hate the graphics and the sound effects. The online mode is very laggy and buggy. The vehicles are slow and ugly. The game is not worth downloading or playing."
              -

              As you can see, the user reviews and ratings of Driver Jobs Online Simulator are mixed. Some players love the game and some players hate it. Some players praise the game's realism and variety, while some players criticize its bugs and limitations. Ultimately, your experience with the game may depend on your personal preferences, expectations, and device specifications.

              -

              What are some alternatives to Driver Jobs Online Simulator?

              -

              If you are not satisfied with Driver Jobs Online Simulator or you want to try something different, you may want to check out some alternatives to the game. Here are some suggestions for other driving simulation games for Android, other platforms to play driving simulation games, and other genres of simulation games to try.

              -

              Other driving simulation games for Android

              -

              There are many other driving simulation games for Android that you can download from the Google Play Store or from APK files. Some of them are:

              -
                -
              • Bus Simulator: Ultimate: A bus driving simulation game that lets you create your own bus company, transport passengers across different countries, customize your buses, and compete with other players online.
              • -
              • Car Parking Multiplayer: A car parking simulation game that lets you park your car in various scenarios, drive different cars, customize your cars, and play with other players online.
              • -
              • Truck Simulator 2018: Europe: A truck driving simulation game that lets you drive across Europe, transport various goods, buy new trucks, upgrade your trucks, and enjoy realistic graphics and physics.
              • -
              • Real Driving Sim: A car driving simulation game that lets you drive over 80 cars, explore a huge open world map, complete various missions, customize your cars, and race with other players online.
              • -
              • City Coach Bus Simulator 2021: Free Bus Games: A coach bus driving simulation game that lets you drive in different cities, transport passengers, follow traffic rules, customize your buses, and experience realistic gameplay.
              • -
              -

              Other platforms to play driving simulation games

              -

              If you want to play driving simulation games on other devices or platforms, you have many options as well. Some of them are:

              -
                -
              • PC: You can play driving simulation games on your PC using a keyboard, a mouse, a joystick, a steering wheel, or a VR headset. Some of the most popular driving simulation games for PC are Euro Truck Simulator 2, American Truck Simulator, Assetto Corsa, Project Cars 2, Forza Horizon 4, Gran Turismo Sport, Dirt Rally 2.0, BeamNG.drive, City Car Driving, and Bus Simulator 18.
              • -
              • Console: You can play driving simulation games on your console using a controller, a steering wheel, or a VR headset. Some of the most popular driving simulation games for console are Forza Motorsport 7, Gran Turismo Sport, F1 2020, Dirt 5, WRC 9, Need for Speed Heat, The Crew 2, Burnout Paradise Remastered, and Bus Simulator.
              • -
              • Mobile: You can play driving simulation games on your mobile device using a touchscreen, a gyroscope, or a Bluetooth controller. Some of the most popular driving simulation games for mobile are Real Racing 3, Asphalt 9: Legends, CSR Racing 2, Traffic Racer, Dr. Driving, Hill Climb Racing 2, Extreme Car Driving Simulator, and Coach Bus Simulator.
              • -
              -

              Other genres of simulation games to try

              -

              If you want to try other genres of simulation games besides driving, you have many options as well. Some of them are:

              -
                -
              • Flight simulation games: These games let you fly various types of aircraft, such as planes, helicopters, jets, or drones. You can learn and practice your flying skills, complete missions, explore different locations, or fight enemies. Some examples are Microsoft Flight Simulator, X-Plane 11, DCS World, Ace Combat 7: Skies Unknown, and Infinite Flight.
              • -
              • Construction simulation games: These games let you build various types of structures, such as houses, bridges, roads, or cities. You can design and plan your projects, manage your resources and budget, solve problems, or create your own scenarios. Some examples are Cities: Skylines, Planet Coaster, SimCity, Bridge Constructor Portal, and House Flipper.
              • -
              • Farming simulation games: These games let you run your own farm, grow crops, raise animals, or sell products. You can manage your land and equipment, harvest and sell your produce, interact with other farmers or customers, or enjoy the countryside. Some examples are Farming Simulator 19, Stardew Valley, Harvest Moon: Light of Hope, Hay Day, and FarmVille 2.
              • -
              • Life simulation games: These games let you create and control your own characters, live their lives, make choices, or fulfill their dreams. You can customize your appearance and personality, build relationships, pursue careers or hobbies, explore the world, or have fun. Some examples are The Sims 4, Second Life, BitLife, Animal Crossing: New Horizons, and Avakin Life.
              • -
              -

              Conclusion

              -

              Driver Jobs Online Simulator is a driving simulation game that lets you work as a driver of various types of vehicles, such as trucks, buses, cars, and vans. You can transport cargo, passengers, or race against other players online. You can also customize your vehicles and explore different cities and routes. The game has realistic graphics and physics, as well as online and offline modes. However, the game also has ads, in-app purchases, bugs, glitches, and high system requirements. You can download the game from the Google Play Store or from an APK file. If you are looking for other driving simulation games or other genres of simulation games, you have many options to choose from.

              -

              If you are interested in playing Driver Jobs Online Simulator, you can download it from the link below. You can also watch the trailer of the game and see some screenshots of the gameplay. We hope you enjoy the game and have fun!

              -

              Download Driver Jobs Online Simulator APK for Android

              -

              Watch the trailer of Driver Jobs Online Simulator

              -

              See some screenshots of Driver Jobs Online Simulator

              -

              FAQs

              -

              Here are some frequently asked questions about Driver Jobs Online Simulator and their answers.

              -
                -
              1. How do I play online mode in Driver Jobs Online Simulator?
              2. -

                To play online mode in Driver Jobs Online Simulator, you need to have an internet connection and a Google Play Games account. You can then select the online mode from the main menu and choose a vehicle category. You can then join a room or create your own room and invite your friends. You can chat with other players, join races, or cooperate in missions.

                -
              3. How do I earn money in Driver Jobs Online Simulator?
              4. -

                To earn money in Driver Jobs Online Simulator, you need to complete jobs or missions with your vehicles. You can transport cargo, passengers, or race with other players. You can also earn money by watching ads or making in-app purchases.

                -
              5. How do I buy new vehicles in Driver Jobs Online Simulator?
              6. -

                To buy new vehicles in Driver Jobs Online Simulator, you need to have enough money and experience points. You can then go to the garage and select the vehicle category you want. You can then browse through the available vehicles and tap on the one you want to buy. You can also sell your old vehicles if you want to.

                -
              7. How do I customize my vehicles in Driver Jobs Online Simulator?
              8. -

                To customize your vehicles in Driver Jobs Online Simulator, you need to go to the workshop and select the vehicle you want to modify. You can then change its color, wheels, engine, suspension, brakes, or accessories. You can also add stickers or decals to personalize it.

                -
              9. How do I update Driver Jobs Online Simulator?
              10. -

                To update Driver Jobs Online Simulator, you need to check if there is a new version available on the Google Play Store or on APKPure.com. You can then download and install the update on your device. You may also need to update your device's software or firmware to ensure compatibility with the game.

                -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FateGrand Order Arcade APK - Enjoy the Thrilling Action and Story of Fate on Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FateGrand Order Arcade APK - Enjoy the Thrilling Action and Story of Fate on Android.md deleted file mode 100644 index d0608da1cd80b2c78b49f81aef2dcbd5240ba73a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FateGrand Order Arcade APK - Enjoy the Thrilling Action and Story of Fate on Android.md +++ /dev/null @@ -1,149 +0,0 @@ -
              -

              Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP Zip File Download: How to Play the Game on Your Android Device

              -

              Introduction

              -

              If you are a fan of Naruto anime and manga series, you might have heard of Naruto Shippuden Ultimate Ninja Storm Revolution, a fighting game based on the popular franchise. The game was released in 2014 for PlayStation 3, Xbox 360, and Microsoft Windows platforms, and it features over 100 playable characters, new game modes, and improved graphics and gameplay.

              -

              But what if you want to play this game on your Android device? Is it possible? The answer is yes, thanks to PPSSPP, a PSP emulator that allows you to run PSP games on your smartphone or tablet. In this article, we will show you how to download and install Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP zip file, and how to optimize the settings for better performance. Let's get started!

              -

              fate grand order arcade apk download


              Download Zip === https://ssurll.com/2uNWpB



              -

              What is Naruto Shippuden Ultimate Ninja Storm Revolution?

              -

              Naruto Shippuden Ultimate Ninja Storm Revolution is a fighting game developed by CyberConnect2 and published by Bandai Namco Games. It is the fifth installment in the Naruto Ultimate Ninja Storm series, and it follows the events of the Naruto Shippuden anime. The game features a roster of over 100 characters from the Naruto universe, including new additions such as Mecha-Naruto, Obito Uchiha, and Shisui Uchiha. The game also introduces new game modes, such as Ninja World Tournament, where you can compete with other players in a battle royale format, and Ninja Escapades, where you can watch original animated stories that reveal more about the characters' backgrounds.

              -

              What is PPSSPP and how does it work?

              -

              PPSSPP is an open-source PSP emulator that allows you to play PSP games on your Android device. It works by simulating the hardware and software of the PSP console, and rendering the graphics using OpenGL ES. PPSSPP supports most of the PSP games available, and it offers various features such as save states, cheats, controller support, network play, and more. You can download PPSSPP from Google Play Store or from its official website.

              -

              Why play Naruto Shippuden Ultimate Ninja Storm Revolution on PPSSPP?

              -

              There are several reasons why you might want to play Naruto Shippuden Ultimate Ninja Storm Revolution on PPSSPP instead of on its original platforms. Here are some of them:

              -
                -
              • You can enjoy the game on a bigger screen and with better resolution.
              • -
              • You can customize the controls according to your preference.
              • -
              • You can save your progress anytime and anywhere.
              • -
              • You can access cheats and mods to enhance your gaming experience.
              • -
              • You can play with your friends online or locally using Wi-Fi or Bluetooth.
              • -
              -

              How to Download and Install Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP Zip File

              -

              To play Naruto Shippuden Ultimate Ninja Storm Revolution on PPSSPP, you will need two files: the game file (ISO) and the texture file (MOD). The game file is the original PSP version of the game, while the texture file is a mod that enhances the graphics and adds new features to the game. You can download both files from the links below:

              -
                -
              • Game file: [Naruto Shippuden Ultimate Ninja Storm Revolution ISO]
              • -
              • Texture file: [Naruto Shippuden Ultimate Ninja Storm Revolution MOD]
              • -
              -

              After downloading the files, follow these steps to install them:

              -

              Step 1: Download the game file and the texture file

              -

              The game file and the texture file are compressed in zip format, so you will need to extract them using a file manager app that supports zip extraction. You can use any app that you prefer, but we recommend using [ZArchiver], which is a free and easy-to-use app that you can download from Google Play Store.

              -

              Step 2: Extract the zip files using a file manager app

              -

              Open ZArchiver and locate the zip files that you downloaded in your device's storage. Tap on the game file (Naruto Shippuden Ultimate Ninja Storm Revolution ISO.zip) and select "Extract here". This will create a folder named "Naruto Shippuden Ultimate Ninja Storm Revolution ISO" that contains the game file (Naruto Shippuden Ultimate Ninja Storm Revolution.iso). Do the same for the texture file (Naruto Shippuden Ultimate Ninja Storm Revolution MOD.zip), which will create a folder named "Naruto Shippuden Ultimate Ninja Storm Revolution MOD" that contains the texture file (TEXTURES.NSUNS4).

              -

              fate grand order arcade apk download for android
              -fate grand order arcade apk download free
              -fate grand order arcade apk download latest version
              -fate grand order arcade apk download english
              -fate grand order arcade apk download mod
              -fate grand order arcade apk download offline
              -fate grand order arcade apk download no vpn
              -fate grand order arcade apk download 2023
              -fate grand order arcade apk download qooapp
              -fate grand order arcade apk download reddit
              -fate grand order arcade apk download ios
              -fate grand order arcade apk download pc
              -fate grand order arcade apk download bluestacks
              -fate grand order arcade apk download emulator
              -fate grand order arcade apk download windows 10
              -fate grand order arcade apk download mac
              -fate grand order arcade apk download laptop
              -fate grand order arcade apk download chromebook
              -fate grand order arcade apk download update
              -fate grand order arcade apk download patch notes
              -fate grand order arcade apk download full game
              -fate grand order arcade apk download unlimited money
              -fate grand order arcade apk download all characters
              -fate grand order arcade apk download cheats
              -fate grand order arcade apk download hack
              -fate grand order arcade apk download tips and tricks
              -fate grand order arcade apk download guide
              -fate grand order arcade apk download walkthrough
              -fate grand order arcade apk download gameplay
              -fate grand order arcade apk download review
              -fate grand order arcade apk download rating
              -fate grand order arcade apk download best settings
              -fate grand order arcade apk download system requirements
              -fate grand order arcade apk download size
              -fate grand order arcade apk download link
              -fate grand order arcade apk download mirror
              -fate grand order arcade apk download alternative
              -fate grand order arcade apk download safe
              -fate grand order arcade apk download virus free
              -fate grand order arcade apk download original
              -fate grand order arcade apk download official site
              -fate grand order arcade apk download aniplex inc.
              -fate grand order arcade apk download type-moon franchise
              -fate grand order arcade apk download rpg game
              -fate grand order arcade apk download story mode
              -fate grand order arcade apk download character quests
              -fate grand order arcade apk download gacha system
              -fate grand order arcade apk download online multiplayer
              -fate grand order arcade apk download events and campaigns

              -

              Step 3: Move the extracted files to the appropriate folders

              -

              Now that you have extracted the files, you need to move them to the correct folders so that PPSSPP can recognize them. To do this, follow these steps:

              -
                -
              • Move the game file (Naruto Shippuden Ultimate Ninja Storm Revolution.iso) to the PSP/GAME folder in your device's storage. If you don't have this folder, create it manually.
              • -
              • Move the texture file (TEXTURES.NSUNS4) to the PSP/TEXTURES folder in your device's storage. If you don't have this folder, create it manually.
              • -
              -

              Step 4: Download and install PPSSPP emulator app

              -

              If you haven't done so already, download and install PPSSPP emulator app from Google Play Store or from its official website. PPSSPP is a free and open-source app that lets you play PSP games on your Android device. Once you have installed PPSSPP, open it and grant it the necessary permissions to access your device's storage.

              -

              Step 5: Launch PPSSPP and select the game file to play

              -

              In PPSSPP, tap on "Games" and navigate to the PSP/GAME folder where you moved the game file. Tap on Naruto Shippuden Ultimate Ninja Storm Revolution.iso and enjoy playing the game on your Android device!

              -

              How to Optimize Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP Settings for Better Performance

              -

              Naruto Shippuden Ultimate Ninja Storm Revolution is a high-end PSP game that requires a lot of resources to run smoothly on PPSSPP. Depending on your device's specifications, you might experience some lag or stuttering while playing the game. To fix this, you can tweak some settings in PPSSPP to improve the performance and reduce the lag. Here are some of the settings that you can adjust:

              -

              Graphics settings

              -

              In PPSSPP, tap on "Settings" and then on "Graphics". Here are some of the options that you can change:

              -
                -
              • Rendering mode: Set this to "Buffered rendering" for better graphics quality.
              • -
              • Frame skipping: Set this to "Off" for smoother gameplay.
              • -
              • Rendering resolution: Set this to "1x PSP" for faster performance.
              • -
              • Display resolution: Set this to "Native device resolution" for sharper display.
              • -
              • Mipmapping: Enable this for better texture quality.
              • -
              • Anisotropic filtering: Set this to "16x" for smoother textures.
              • -
              • Texture scaling: Set this to "Off" for faster performance.
              • -
              • Texture filtering: Set this to "Linear" for smoother textures.
              • -
              • Hack settings: Enable "Timer hack" and "Disable alpha test" for faster performance.
              • -
              -

              Audio settings

              -

              In PPSSPP, tap on "Settings" and then on "Audio". Here are some of the options that you can change:

              -
                Audio backend: Set this to "OpenSL ES" for better sound quality. -
              • Audio latency: Set this to "Low" for smoother sound.
              • -
              • Sound speed hack: Enable this for faster performance.
              • -
              -

              System settings

              -

              In PPSSPP, tap on "Settings" and then on "System". Here are some of the options that you can change:

              -
                -
              • Fast memory: Enable this for faster performance.
              • -
              • Multithreaded: Enable this for better performance on multi-core devices.
              • -
              • I/O on thread: Enable this for faster loading times.
              • -
              • Force real clock sync: Disable this for smoother gameplay.
              • -
              • Change emulated PSP's CPU clock: Set this to "0 (Auto)" for optimal performance.
              • -
              -

              Conclusion

              -

              Naruto Shippuden Ultimate Ninja Storm Revolution is a fun and exciting game that you can play on your Android device using PPSSPP emulator. By following the steps above, you can download and install the game file and the texture file, and optimize the settings for better performance. You can also enjoy the game's features, such as the Ninja World Tournament, the Ninja Escapades, and the online multiplayer mode. If you are a Naruto fan, you will love playing this game on your smartphone or tablet!

              -

              FAQs

              -

              Here are some of the frequently asked questions about Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP zip file download:

              -
                -
              • Q: How much storage space do I need to download and install the game file and the texture file?
              • -
              • A: You will need about 1.5 GB of free storage space to download and install the game file and the texture file.
              • -
              • Q: How can I play Naruto Shippuden Ultimate Ninja Storm Revolution online with my friends?
              • -
              • A: You can play Naruto Shippuden Ultimate Ninja Storm Revolution online with your friends using PPSSPP's network play feature. To do this, you will need to create or join a room using a server address and a port number. You can find more information on how to set up network play on PPSSPP's official website.
              • -
              • Q: How can I use cheats and mods in Naruto Shippuden Ultimate Ninja Storm Revolution?
              • -
              • A: You can use cheats and mods in Naruto Shippuden Ultimate Ninja Storm Revolution by downloading and installing cheat files and mod files from various sources online. You can then enable them in PPSSPP's cheat menu or mod menu. However, be careful when using cheats and mods, as they might cause glitches or crashes in the game.
              • -
              • Q: What are the minimum requirements to run Naruto Shippuden Ultimate Ninja Storm Revolution on PPSSPP?
              • -
              • A: The minimum requirements to run Naruto Shippuden Ultimate Ninja Storm Revolution on PPSSPP are:
              • -
                  -
                • An Android device with at least 1 GB of RAM and a quad-core processor.
                • -
                • A stable internet connection for downloading the game file and the texture file, and for playing online.
                • -
                • A file manager app that supports zip extraction, such as ZArchiver.
                • -
                • A PSP emulator app, such as PPSSPP.
                • -
                -
              • Q: Where can I get more information and support about Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP zip file download?
              • -
              • A: You can get more information and support about Naruto Shippuden Ultimate Ninja Storm Revolution PPSSPP zip file download from these sources:
              • -
                  -
                • The official website of PPSSPP emulator.
                • -
                • The official forum of PPSSPP emulator.
                • -
                • The official subreddit of PPSSPP emulator.
                • -
                • The official Facebook page of PPSSPP emulator.
                • -
                -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/sqc1729/bingi/src/components/ui/separator.tsx b/spaces/sqc1729/bingi/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/sssdtgvg/Sex/configure.py b/spaces/sssdtgvg/Sex/configure.py deleted file mode 100644 index 9a49e639508340a859a41848bb57feedbd9c0606..0000000000000000000000000000000000000000 --- a/spaces/sssdtgvg/Sex/configure.py +++ /dev/null @@ -1,1367 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""configure script to get build parameters from user.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import errno -import os -import platform -import re -import subprocess -import sys - -# pylint: disable=g-import-not-at-top -try: - from shutil import which -except ImportError: - from distutils.spawn import find_executable as which -# pylint: enable=g-import-not-at-top - -_TF_BAZELRC = os.path.join(os.path.dirname(os.path.abspath(__file__)), - '.tf_configure.bazelrc') -_TF_WORKSPACE = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'WORKSPACE') -_DEFAULT_CUDA_VERSION = '9.0' -_DEFAULT_CUDNN_VERSION = '7' -_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,5.2' -_DEFAULT_CUDA_PATH = '/usr/local/cuda' -_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda' -_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing ' - 'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION) -_TF_OPENCL_VERSION = '1.2' -_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp' -_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include' -_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15] - -_DEFAULT_PROMPT_ASK_ATTEMPTS = 10 - - -class UserInputError(Exception): - pass - - -def is_windows(): - return platform.system() == 'Windows' - - -def is_linux(): - return platform.system() == 'Linux' - - -def is_macos(): - return platform.system() == 'Darwin' - - -def is_ppc64le(): - return platform.machine() == 'ppc64le' - - -def is_cygwin(): - return platform.system().startswith('CYGWIN_NT') - - -def get_input(question): - try: - try: - answer = raw_input(question) - except NameError: - answer = input(question) # pylint: disable=bad-builtin - except EOFError: - answer = '' - return answer - - -def symlink_force(target, link_name): - """Force symlink, equivalent of 'ln -sf'. - - Args: - target: items to link to. - link_name: name of the link. - """ - try: - os.symlink(target, link_name) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(link_name) - os.symlink(target, link_name) - else: - raise e - - -def sed_in_place(filename, old, new): - """Replace old string with new string in file. - - Args: - filename: string for filename. - old: string to replace. - new: new string to replace to. - """ - with open(filename, 'r') as f: - filedata = f.read() - newdata = filedata.replace(old, new) - with open(filename, 'w') as f: - f.write(newdata) - - -def remove_line_with(filename, token): - """Remove lines that contain token from file. - - Args: - filename: string for filename. - token: string token to check if to remove a line from file or not. - """ - with open(filename, 'r') as f: - filedata = f.read() - - with open(filename, 'w') as f: - for line in filedata.strip().split('\n'): - if token not in line: - f.write(line + '\n') - - -def write_to_bazelrc(line): - with open(_TF_BAZELRC, 'a') as f: - f.write(line + '\n') - - -def write_action_env_to_bazelrc(var_name, var): - write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var))) - - -def run_shell(cmd, allow_non_zero=False): - if allow_non_zero: - try: - output = subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - output = e.output - else: - output = subprocess.check_output(cmd) - return output.decode('UTF-8').strip() - - -def cygpath(path): - """Convert path from posix to windows.""" - return os.path.abspath(path).replace('\\', '/') - - -def get_python_path(environ_cp, python_bin_path): - """Get the python site package paths.""" - python_paths = [] - if environ_cp.get('PYTHONPATH'): - python_paths = environ_cp.get('PYTHONPATH').split(':') - try: - library_paths = run_shell( - [python_bin_path, '-c', - 'import site; print("\\n".join(site.getsitepackages()))']).split('\n') - except subprocess.CalledProcessError: - library_paths = [run_shell( - [python_bin_path, '-c', - 'from distutils.sysconfig import get_python_lib;' - 'print(get_python_lib())'])] - - all_paths = set(python_paths + library_paths) - - paths = [] - for path in all_paths: - if os.path.isdir(path): - paths.append(path) - return paths - - -def get_python_major_version(python_bin_path): - """Get the python major version.""" - return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])']) - - -def setup_python(environ_cp): - """Setup python related env variables.""" - # Get PYTHON_BIN_PATH, default is the current running python. - default_python_bin_path = sys.executable - ask_python_bin_path = ('Please specify the location of python. [Default is ' - '%s]: ') % default_python_bin_path - while True: - python_bin_path = get_from_env_or_user_or_default( - environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path, - default_python_bin_path) - # Check if the path is valid - if os.path.isfile(python_bin_path) and os.access( - python_bin_path, os.X_OK): - break - elif not os.path.exists(python_bin_path): - print('Invalid python path: %s cannot be found.' % python_bin_path) - else: - print('%s is not executable. Is it the python binary?' % python_bin_path) - environ_cp['PYTHON_BIN_PATH'] = '' - - # Convert python path to Windows style before checking lib and version - if is_windows() or is_cygwin(): - python_bin_path = cygpath(python_bin_path) - - # Get PYTHON_LIB_PATH - python_lib_path = environ_cp.get('PYTHON_LIB_PATH') - if not python_lib_path: - python_lib_paths = get_python_path(environ_cp, python_bin_path) - if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1': - python_lib_path = python_lib_paths[0] - else: - print('Found possible Python library paths:\n %s' % - '\n '.join(python_lib_paths)) - default_python_lib_path = python_lib_paths[0] - python_lib_path = get_input( - 'Please input the desired Python library path to use. ' - 'Default is [%s]\n' % python_lib_paths[0]) - if not python_lib_path: - python_lib_path = default_python_lib_path - environ_cp['PYTHON_LIB_PATH'] = python_lib_path - - python_major_version = get_python_major_version(python_bin_path) - - # Convert python path to Windows style before writing into bazel.rc - if is_windows() or is_cygwin(): - python_lib_path = cygpath(python_lib_path) - - # Set-up env variables used by python_configure.bzl - write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path) - write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path) - write_to_bazelrc('build --force_python=py%s' % python_major_version) - write_to_bazelrc('build --host_force_python=py%s' % python_major_version) - write_to_bazelrc('build --python_path=\"%s"' % python_bin_path) - environ_cp['PYTHON_BIN_PATH'] = python_bin_path - - # Write tools/python_bin_path.sh - with open('tools/python_bin_path.sh', 'w') as f: - f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path) - - -def reset_tf_configure_bazelrc(): - """Reset file that contains customized config settings.""" - open(_TF_BAZELRC, 'w').close() - - home = os.path.expanduser('~') - if not os.path.exists('.bazelrc'): - if os.path.exists(os.path.join(home, '.bazelrc')): - with open('.bazelrc', 'a') as f: - f.write('import %s/.bazelrc\n' % home.replace('\\', '/')) - else: - open('.bazelrc', 'w').close() - - remove_line_with('.bazelrc', 'tf_configure') - with open('.bazelrc', 'a') as f: - f.write('import %workspace%/.tf_configure.bazelrc\n') - - -def run_gen_git_source(environ_cp): - """Run the gen_git_source to create links. - - The links are for bazel to track dependencies for git hash propagation. - - Args: - environ_cp: copy of the os.environ. - """ - cmd = '"%s" tensorflow/tools/git/gen_git_source.py --configure %s' % ( - environ_cp.get('PYTHON_BIN_PATH'), os.getcwd()) - os.system(cmd) - - -def cleanup_makefile(): - """Delete any leftover BUILD files from the Makefile build. - - These files could interfere with Bazel parsing. - """ - makefile_download_dir = 'tensorflow/contrib/makefile/downloads' - if os.path.isdir(makefile_download_dir): - for root, _, filenames in os.walk(makefile_download_dir): - for f in filenames: - if f.endswith('BUILD'): - os.remove(os.path.join(root, f)) - - -def get_var(environ_cp, - var_name, - query_item, - enabled_by_default, - question=None, - yes_reply=None, - no_reply=None): - """Get boolean input from user. - - If var_name is not set in env, ask user to enable query_item or not. If the - response is empty, use the default. - - Args: - environ_cp: copy of the os.environ. - var_name: string for name of environment variable, e.g. "TF_NEED_HDFS". - query_item: string for feature related to the variable, e.g. "Hadoop File - System". - enabled_by_default: boolean for default behavior. - question: optional string for how to ask for user input. - yes_reply: optionanl string for reply when feature is enabled. - no_reply: optional string for reply when feature is disabled. - - Returns: - boolean value of the variable. - """ - if not question: - question = 'Do you wish to build TensorFlow with %s support?' % query_item - if not yes_reply: - yes_reply = '%s support will be enabled for TensorFlow.' % query_item - if not no_reply: - no_reply = 'No %s' % yes_reply - - yes_reply += '\n' - no_reply += '\n' - - if enabled_by_default: - question += ' [Y/n]: ' - else: - question += ' [y/N]: ' - - var = environ_cp.get(var_name) - while var is None: - user_input_origin = get_input(question) - user_input = user_input_origin.strip().lower() - if user_input == 'y': - print(yes_reply) - var = True - elif user_input == 'n': - print(no_reply) - var = False - elif not user_input: - if enabled_by_default: - print(yes_reply) - var = True - else: - print(no_reply) - var = False - else: - print('Invalid selection: %s' % user_input_origin) - return var - - -def set_build_var(environ_cp, var_name, query_item, option_name, - enabled_by_default, bazel_config_name=None): - """Set if query_item will be enabled for the build. - - Ask user if query_item will be enabled. Default is used if no input is given. - Set subprocess environment variable and write to .bazelrc if enabled. - - Args: - environ_cp: copy of the os.environ. - var_name: string for name of environment variable, e.g. "TF_NEED_HDFS". - query_item: string for feature related to the variable, e.g. "Hadoop File - System". - option_name: string for option to define in .bazelrc. - enabled_by_default: boolean for default behavior. - bazel_config_name: Name for Bazel --config argument to enable build feature. - """ - - var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default))) - environ_cp[var_name] = var - if var == '1': - write_to_bazelrc('build --define %s=true' % option_name) - elif bazel_config_name is not None: - # TODO(mikecase): Migrate all users of configure.py to use --config Bazel - # options and not to set build configs through environment variables. - write_to_bazelrc('build:%s --define %s=true' - % (bazel_config_name, option_name)) - - -def set_action_env_var(environ_cp, - var_name, - query_item, - enabled_by_default, - question=None, - yes_reply=None, - no_reply=None): - """Set boolean action_env variable. - - Ask user if query_item will be enabled. Default is used if no input is given. - Set environment variable and write to .bazelrc. - - Args: - environ_cp: copy of the os.environ. - var_name: string for name of environment variable, e.g. "TF_NEED_HDFS". - query_item: string for feature related to the variable, e.g. "Hadoop File - System". - enabled_by_default: boolean for default behavior. - question: optional string for how to ask for user input. - yes_reply: optionanl string for reply when feature is enabled. - no_reply: optional string for reply when feature is disabled. - """ - var = int( - get_var(environ_cp, var_name, query_item, enabled_by_default, question, - yes_reply, no_reply)) - - write_action_env_to_bazelrc(var_name, var) - environ_cp[var_name] = str(var) - - -def convert_version_to_int(version): - """Convert a version number to a integer that can be used to compare. - - Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The - 'xxxxx' part, for instance 'homebrew' on OS/X, is ignored. - - Args: - version: a version to be converted - - Returns: - An integer if converted successfully, otherwise return None. - """ - version = version.split('-')[0] - version_segments = version.split('.') - for seg in version_segments: - if not seg.isdigit(): - return None - - version_str = ''.join(['%03d' % int(seg) for seg in version_segments]) - return int(version_str) - - -def check_bazel_version(min_version): - """Check installed bezel version is at least min_version. - - Args: - min_version: string for minimum bazel version. - - Returns: - The bazel version detected. - """ - if which('bazel') is None: - print('Cannot find bazel. Please install bazel.') - sys.exit(0) - curr_version = run_shell(['bazel', '--batch', 'version']) - - for line in curr_version.split('\n'): - if 'Build label: ' in line: - curr_version = line.split('Build label: ')[1] - break - - min_version_int = convert_version_to_int(min_version) - curr_version_int = convert_version_to_int(curr_version) - - # Check if current bazel version can be detected properly. - if not curr_version_int: - print('WARNING: current bazel installation is not a release version.') - print('Make sure you are running at least bazel %s' % min_version) - return curr_version - - print('You have bazel %s installed.' % curr_version) - - if curr_version_int < min_version_int: - print('Please upgrade your bazel installation to version %s or higher to ' - 'build TensorFlow!' % min_version) - sys.exit(0) - return curr_version - - -def set_cc_opt_flags(environ_cp): - """Set up architecture-dependent optimization flags. - - Also append CC optimization flags to bazel.rc.. - - Args: - environ_cp: copy of the os.environ. - """ - if is_ppc64le(): - # gcc on ppc64le does not support -march, use mcpu instead - default_cc_opt_flags = '-mcpu=native' - else: - default_cc_opt_flags = '-march=native' - question = ('Please specify optimization flags to use during compilation when' - ' bazel option "--config=opt" is specified [Default is %s]: ' - ) % default_cc_opt_flags - cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS', - question, default_cc_opt_flags) - for opt in cc_opt_flags.split(): - write_to_bazelrc('build:opt --copt=%s' % opt) - # It should be safe on the same build host. - write_to_bazelrc('build:opt --host_copt=-march=native') - write_to_bazelrc('build:opt --define with_default_optimizations=true') - # TODO(mikecase): Remove these default defines once we are able to get - # TF Lite targets building without them. - write_to_bazelrc('build --copt=-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK') - write_to_bazelrc('build --host_copt=-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK') - - -def set_tf_cuda_clang(environ_cp): - """set TF_CUDA_CLANG action_env. - - Args: - environ_cp: copy of the os.environ. - """ - question = 'Do you want to use clang as CUDA compiler?' - yes_reply = 'Clang will be used as CUDA compiler.' - no_reply = 'nvcc will be used as CUDA compiler.' - set_action_env_var( - environ_cp, - 'TF_CUDA_CLANG', - None, - False, - question=question, - yes_reply=yes_reply, - no_reply=no_reply) - - -def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var, - var_default): - """Get var_name either from env, or user or default. - - If var_name has been set as environment variable, use the preset value, else - ask for user input. If no input is provided, the default is used. - - Args: - environ_cp: copy of the os.environ. - var_name: string for name of environment variable, e.g. "TF_NEED_HDFS". - ask_for_var: string for how to ask for user input. - var_default: default value string. - - Returns: - string value for var_name - """ - var = environ_cp.get(var_name) - if not var: - var = get_input(ask_for_var) - print('\n') - if not var: - var = var_default - return var - - -def set_clang_cuda_compiler_path(environ_cp): - """Set CLANG_CUDA_COMPILER_PATH.""" - default_clang_path = which('clang') or '' - ask_clang_path = ('Please specify which clang should be used as device and ' - 'host compiler. [Default is %s]: ') % default_clang_path - - while True: - clang_cuda_compiler_path = get_from_env_or_user_or_default( - environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path, - default_clang_path) - if os.path.exists(clang_cuda_compiler_path): - break - - # Reset and retry - print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path) - environ_cp['CLANG_CUDA_COMPILER_PATH'] = '' - - # Set CLANG_CUDA_COMPILER_PATH - environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path - write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH', - clang_cuda_compiler_path) - - -def prompt_loop_or_load_from_env( - environ_cp, - var_name, - var_default, - ask_for_var, - check_success, - error_msg, - suppress_default_error=False, - n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS -): - """Loop over user prompts for an ENV param until receiving a valid response. - - For the env param var_name, read from the environment or verify user input - until receiving valid input. When done, set var_name in the environ_cp to its - new value. - - Args: - environ_cp: (Dict) copy of the os.environ. - var_name: (String) string for name of environment variable, e.g. "TF_MYVAR". - var_default: (String) default value string. - ask_for_var: (String) string for how to ask for user input. - check_success: (Function) function that takes one argument and returns a - boolean. Should return True if the value provided is considered valid. May - contain a complex error message if error_msg does not provide enough - information. In that case, set suppress_default_error to True. - error_msg: (String) String with one and only one '%s'. Formatted with each - invalid response upon check_success(input) failure. - suppress_default_error: (Bool) Suppress the above error message in favor of - one from the check_success function. - n_ask_attempts: (Integer) Number of times to query for valid input before - raising an error and quitting. - - Returns: - [String] The value of var_name after querying for input. - - Raises: - UserInputError: if a query has been attempted n_ask_attempts times without - success, assume that the user has made a scripting error, and will continue - to provide invalid input. Raise the error to avoid infinitely looping. - """ - default = environ_cp.get(var_name) or var_default - full_query = '%s [Default is %s]: ' % ( - ask_for_var, - default, - ) - - for _ in range(n_ask_attempts): - val = get_from_env_or_user_or_default(environ_cp, - var_name, - full_query, - default) - if check_success(val): - break - if not suppress_default_error: - print(error_msg % val) - environ_cp[var_name] = '' - else: - raise UserInputError('Invalid %s setting was provided %d times in a row. ' - 'Assuming to be a scripting mistake.' % - (var_name, n_ask_attempts)) - - environ_cp[var_name] = val - return val - - -def create_android_ndk_rule(environ_cp): - """Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule.""" - if is_windows() or is_cygwin(): - default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' % - environ_cp['APPDATA']) - elif is_macos(): - default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME'] - else: - default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME'] - - def valid_ndk_path(path): - return (os.path.exists(path) and - os.path.exists(os.path.join(path, 'source.properties'))) - - android_ndk_home_path = prompt_loop_or_load_from_env( - environ_cp, - var_name='ANDROID_NDK_HOME', - var_default=default_ndk_path, - ask_for_var='Please specify the home path of the Android NDK to use.', - check_success=valid_ndk_path, - error_msg=('The path %s or its child file "source.properties" ' - 'does not exist.') - ) - - write_android_ndk_workspace_rule(android_ndk_home_path) - - -def create_android_sdk_rule(environ_cp): - """Set Android variables and write Android SDK WORKSPACE rule.""" - if is_windows() or is_cygwin(): - default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA']) - elif is_macos(): - default_sdk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME'] - else: - default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME'] - - def valid_sdk_path(path): - return (os.path.exists(path) and - os.path.exists(os.path.join(path, 'platforms')) and - os.path.exists(os.path.join(path, 'build-tools'))) - - android_sdk_home_path = prompt_loop_or_load_from_env( - environ_cp, - var_name='ANDROID_SDK_HOME', - var_default=default_sdk_path, - ask_for_var='Please specify the home path of the Android SDK to use.', - check_success=valid_sdk_path, - error_msg=('Either %s does not exist, or it does not contain the ' - 'subdirectories "platforms" and "build-tools".')) - - platforms = os.path.join(android_sdk_home_path, 'platforms') - api_levels = sorted(os.listdir(platforms)) - api_levels = [x.replace('android-', '') for x in api_levels] - - def valid_api_level(api_level): - return os.path.exists(os.path.join(android_sdk_home_path, - 'platforms', - 'android-' + api_level)) - - android_api_level = prompt_loop_or_load_from_env( - environ_cp, - var_name='ANDROID_API_LEVEL', - var_default=api_levels[-1], - ask_for_var=('Please specify the Android SDK API level to use. ' - '[Available levels: %s]') % api_levels, - check_success=valid_api_level, - error_msg='Android-%s is not present in the SDK path.') - - build_tools = os.path.join(android_sdk_home_path, 'build-tools') - versions = sorted(os.listdir(build_tools)) - - def valid_build_tools(version): - return os.path.exists(os.path.join(android_sdk_home_path, - 'build-tools', - version)) - - android_build_tools_version = prompt_loop_or_load_from_env( - environ_cp, - var_name='ANDROID_BUILD_TOOLS_VERSION', - var_default=versions[-1], - ask_for_var=('Please specify an Android build tools version to use. ' - '[Available versions: %s]') % versions, - check_success=valid_build_tools, - error_msg=('The selected SDK does not have build-tools version %s ' - 'available.')) - - write_android_sdk_workspace_rule(android_sdk_home_path, - android_build_tools_version, - android_api_level) - - -def write_android_sdk_workspace_rule(android_sdk_home_path, - android_build_tools_version, - android_api_level): - print('Writing android_sdk_workspace rule.\n') - with open(_TF_WORKSPACE, 'a') as f: - f.write(""" -android_sdk_repository( - name="androidsdk", - api_level=%s, - path="%s", - build_tools_version="%s")\n -""" % (android_api_level, android_sdk_home_path, android_build_tools_version)) - - -def write_android_ndk_workspace_rule(android_ndk_home_path): - print('Writing android_ndk_workspace rule.') - ndk_api_level = check_ndk_level(android_ndk_home_path) - if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS: - print('WARNING: The API level of the NDK in %s is %s, which is not ' - 'supported by Bazel (officially supported versions: %s). Please use ' - 'another version. Compiling Android targets may result in confusing ' - 'errors.\n' % (android_ndk_home_path, ndk_api_level, - _SUPPORTED_ANDROID_NDK_VERSIONS)) - with open(_TF_WORKSPACE, 'a') as f: - f.write(""" -android_ndk_repository( - name="androidndk", - path="%s", - api_level=%s)\n -""" % (android_ndk_home_path, ndk_api_level)) - - -def check_ndk_level(android_ndk_home_path): - """Check the revision number of an Android NDK path.""" - properties_path = '%s/source.properties' % android_ndk_home_path - if is_windows() or is_cygwin(): - properties_path = cygpath(properties_path) - with open(properties_path, 'r') as f: - filedata = f.read() - - revision = re.search(r'Pkg.Revision = (\d+)', filedata) - if revision: - return revision.group(1) - return None - - -def workspace_has_any_android_rule(): - """Check the WORKSPACE for existing android_*_repository rules.""" - with open(_TF_WORKSPACE, 'r') as f: - workspace = f.read() - has_any_rule = re.search(r'^android_[ns]dk_repository', - workspace, - re.MULTILINE) - return has_any_rule - - -def set_gcc_host_compiler_path(environ_cp): - """Set GCC_HOST_COMPILER_PATH.""" - default_gcc_host_compiler_path = which('gcc') or '' - cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH') - - if os.path.islink(cuda_bin_symlink): - # os.readlink is only available in linux - default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink) - - gcc_host_compiler_path = prompt_loop_or_load_from_env( - environ_cp, - var_name='GCC_HOST_COMPILER_PATH', - var_default=default_gcc_host_compiler_path, - ask_for_var= - 'Please specify which gcc should be used by nvcc as the host compiler.', - check_success=os.path.exists, - error_msg='Invalid gcc path. %s cannot be found.', - ) - - write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path) - - -def set_tf_cuda_version(environ_cp): - """Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION.""" - ask_cuda_version = ( - 'Please specify the CUDA SDK version you want to use, ' - 'e.g. 7.0. [Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION - - for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS): - # Configure the Cuda SDK version to use. - tf_cuda_version = get_from_env_or_user_or_default( - environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION) - - # Find out where the CUDA toolkit is installed - default_cuda_path = _DEFAULT_CUDA_PATH - if is_windows() or is_cygwin(): - default_cuda_path = cygpath( - environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN)) - elif is_linux(): - # If the default doesn't exist, try an alternative default. - if (not os.path.exists(default_cuda_path) - ) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX): - default_cuda_path = _DEFAULT_CUDA_PATH_LINUX - ask_cuda_path = ('Please specify the location where CUDA %s toolkit is' - ' installed. Refer to README.md for more details. ' - '[Default is %s]: ') % (tf_cuda_version, default_cuda_path) - cuda_toolkit_path = get_from_env_or_user_or_default( - environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path) - - if is_windows(): - cuda_rt_lib_path = 'lib/x64/cudart.lib' - elif is_linux(): - cuda_rt_lib_path = 'lib64/libcudart.so.%s' % tf_cuda_version - elif is_macos(): - cuda_rt_lib_path = 'lib/libcudart.%s.dylib' % tf_cuda_version - - cuda_toolkit_path_full = os.path.join(cuda_toolkit_path, cuda_rt_lib_path) - if os.path.exists(cuda_toolkit_path_full): - break - - # Reset and retry - print('Invalid path to CUDA %s toolkit. %s cannot be found' % - (tf_cuda_version, cuda_toolkit_path_full)) - environ_cp['TF_CUDA_VERSION'] = '' - environ_cp['CUDA_TOOLKIT_PATH'] = '' - - else: - raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d ' - 'times in a row. Assuming to be a scripting mistake.' % - _DEFAULT_PROMPT_ASK_ATTEMPTS) - - # Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION - environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path - write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path) - environ_cp['TF_CUDA_VERSION'] = tf_cuda_version - write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version) - - -def set_tf_cudnn_version(environ_cp): - """Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION.""" - ask_cudnn_version = ( - 'Please specify the cuDNN version you want to use. ' - '[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION - - for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS): - tf_cudnn_version = get_from_env_or_user_or_default( - environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version, - _DEFAULT_CUDNN_VERSION) - - default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH') - ask_cudnn_path = (r'Please specify the location where cuDNN %s library is ' - 'installed. Refer to README.md for more details. [Default' - ' is %s]:') % (tf_cudnn_version, default_cudnn_path) - cudnn_install_path = get_from_env_or_user_or_default( - environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path) - - # Result returned from "read" will be used unexpanded. That make "~" - # unusable. Going through one more level of expansion to handle that. - cudnn_install_path = os.path.realpath( - os.path.expanduser(cudnn_install_path)) - if is_windows() or is_cygwin(): - cudnn_install_path = cygpath(cudnn_install_path) - - if is_windows(): - cuda_dnn_lib_path = 'lib/x64/cudnn.lib' - cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib' - elif is_linux(): - cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version - cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version - elif is_macos(): - cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version - cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version - - cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path) - cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path, - cuda_dnn_lib_alt_path) - if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists( - cuda_dnn_lib_alt_path_full): - break - - # Try another alternative for Linux - if is_linux(): - ldconfig_bin = which('ldconfig') or '/sbin/ldconfig' - cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p']) - cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)', - cudnn_path_from_ldconfig) - if cudnn_path_from_ldconfig: - cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1) - if os.path.exists('%s.%s' % (cudnn_path_from_ldconfig, - tf_cudnn_version)): - cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig) - break - - # Reset and Retry - print( - 'Invalid path to cuDNN %s toolkit. None of the following files can be ' - 'found:' % tf_cudnn_version) - print(cuda_dnn_lib_path_full) - print(cuda_dnn_lib_alt_path_full) - if is_linux(): - print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)) - - environ_cp['TF_CUDNN_VERSION'] = '' - else: - raise UserInputError('Invalid TF_CUDNN setting was provided %d ' - 'times in a row. Assuming to be a scripting mistake.' % - _DEFAULT_PROMPT_ASK_ATTEMPTS) - - # Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION - environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path - write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path) - environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version - write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version) - - -def get_native_cuda_compute_capabilities(environ_cp): - """Get native cuda compute capabilities. - - Args: - environ_cp: copy of the os.environ. - Returns: - string of native cuda compute capabilities, separated by comma. - """ - device_query_bin = os.path.join( - environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery') - if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK): - try: - output = run_shell(device_query_bin).split('\n') - pattern = re.compile('[0-9]*\\.[0-9]*') - output = [pattern.search(x) for x in output if 'Capability' in x] - output = ','.join(x.group() for x in output if x is not None) - except subprocess.CalledProcessError: - output = '' - else: - output = '' - return output - - -def set_tf_cuda_compute_capabilities(environ_cp): - """Set TF_CUDA_COMPUTE_CAPABILITIES.""" - while True: - native_cuda_compute_capabilities = get_native_cuda_compute_capabilities( - environ_cp) - if not native_cuda_compute_capabilities: - default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES - else: - default_cuda_compute_capabilities = native_cuda_compute_capabilities - - ask_cuda_compute_capabilities = ( - 'Please specify a list of comma-separated ' - 'Cuda compute capabilities you want to ' - 'build with.\nYou can find the compute ' - 'capability of your device at: ' - 'https://developer.nvidia.com/cuda-gpus.\nPlease' - ' note that each additional compute ' - 'capability significantly increases your ' - 'build time and binary size. [Default is: %s]' % - default_cuda_compute_capabilities) - tf_cuda_compute_capabilities = get_from_env_or_user_or_default( - environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES', - ask_cuda_compute_capabilities, default_cuda_compute_capabilities) - # Check whether all capabilities from the input is valid - all_valid = True - for compute_capability in tf_cuda_compute_capabilities.split(','): - m = re.match('[0-9]+.[0-9]+', compute_capability) - if not m: - print('Invalid compute capability: ' % compute_capability) - all_valid = False - else: - ver = int(m.group(0).split('.')[0]) - if ver < 3: - print('Only compute capabilities 3.0 or higher are supported.') - all_valid = False - - if all_valid: - break - - # Reset and Retry - environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = '' - - # Set TF_CUDA_COMPUTE_CAPABILITIES - environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities - write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES', - tf_cuda_compute_capabilities) - - -def set_other_cuda_vars(environ_cp): - """Set other CUDA related variables.""" - if is_windows(): - # The following three variables are needed for MSVC toolchain configuration - # in Bazel - environ_cp['CUDA_PATH'] = environ_cp.get('CUDA_TOOLKIT_PATH') - environ_cp['CUDA_COMPUTE_CAPABILITIES'] = environ_cp.get( - 'TF_CUDA_COMPUTE_CAPABILITIES') - environ_cp['NO_WHOLE_ARCHIVE_OPTION'] = 1 - write_action_env_to_bazelrc('CUDA_PATH', environ_cp.get('CUDA_PATH')) - write_action_env_to_bazelrc('CUDA_COMPUTE_CAPABILITIE', - environ_cp.get('CUDA_COMPUTE_CAPABILITIE')) - write_action_env_to_bazelrc('NO_WHOLE_ARCHIVE_OPTION', - environ_cp.get('NO_WHOLE_ARCHIVE_OPTION')) - write_to_bazelrc('build --config=win-cuda') - write_to_bazelrc('test --config=win-cuda') - else: - # If CUDA is enabled, always use GPU during build and test. - if environ_cp.get('TF_CUDA_CLANG') == '1': - write_to_bazelrc('build --config=cuda_clang') - write_to_bazelrc('test --config=cuda_clang') - else: - write_to_bazelrc('build --config=cuda') - write_to_bazelrc('test --config=cuda') - - -def set_host_cxx_compiler(environ_cp): - """Set HOST_CXX_COMPILER.""" - default_cxx_host_compiler = which('g++') or '' - - host_cxx_compiler = prompt_loop_or_load_from_env( - environ_cp, - var_name='HOST_CXX_COMPILER', - var_default=default_cxx_host_compiler, - ask_for_var=('Please specify which C++ compiler should be used as the ' - 'host C++ compiler.'), - check_success=os.path.exists, - error_msg='Invalid C++ compiler path. %s cannot be found.', - ) - - write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler) - - -def set_host_c_compiler(environ_cp): - """Set HOST_C_COMPILER.""" - default_c_host_compiler = which('gcc') or '' - - host_c_compiler = prompt_loop_or_load_from_env( - environ_cp, - var_name='HOST_C_COMPILER', - var_default=default_c_host_compiler, - ask_for_var=('Please specify which C compiler should be used as the host' - 'C compiler.'), - check_success=os.path.exists, - error_msg='Invalid C compiler path. %s cannot be found.', - ) - - write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler) - - -def set_computecpp_toolkit_path(environ_cp): - """Set COMPUTECPP_TOOLKIT_PATH.""" - - def toolkit_exists(toolkit_path): - """Check if a computecpp toolkit path is valid.""" - if is_linux(): - sycl_rt_lib_path = 'lib/libComputeCpp.so' - else: - sycl_rt_lib_path = '' - - sycl_rt_lib_path_full = os.path.join(toolkit_path, - sycl_rt_lib_path) - exists = os.path.exists(sycl_rt_lib_path_full) - if not exists: - print('Invalid SYCL %s library path. %s cannot be found' % - (_TF_OPENCL_VERSION, sycl_rt_lib_path_full)) - return exists - - computecpp_toolkit_path = prompt_loop_or_load_from_env( - environ_cp, - var_name='COMPUTECPP_TOOLKIT_PATH', - var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH, - ask_for_var=( - 'Please specify the location where ComputeCpp for SYCL %s is ' - 'installed.' % _TF_OPENCL_VERSION), - check_success=toolkit_exists, - error_msg='Invalid SYCL compiler path. %s cannot be found.', - suppress_default_error=True) - - write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH', - computecpp_toolkit_path) - -def set_trisycl_include_dir(environ_cp): - """Set TRISYCL_INCLUDE_DIR""" - ask_trisycl_include_dir = ('Please specify the location of the triSYCL ' - 'include directory. (Use --config=sycl_trisycl ' - 'when building with Bazel) ' - '[Default is %s]: ' - ) % (_DEFAULT_TRISYCL_INCLUDE_DIR) - while True: - trisycl_include_dir = get_from_env_or_user_or_default( - environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir, - _DEFAULT_TRISYCL_INCLUDE_DIR) - if os.path.exists(trisycl_include_dir): - break - - print('Invalid triSYCL include directory, %s cannot be found' - % (trisycl_include_dir)) - - # Set TRISYCL_INCLUDE_DIR - environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir - write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', - trisycl_include_dir) - -def set_trisycl_include_dir(environ_cp): - """Set TRISYCL_INCLUDE_DIR.""" - ask_trisycl_include_dir = ('Please specify the location of the triSYCL ' - 'include directory. (Use --config=sycl_trisycl ' - 'when building with Bazel) ' - '[Default is %s]: ') % _DEFAULT_TRISYCL_INCLUDE_DIR - while True: - trisycl_include_dir = get_from_env_or_user_or_default( - environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir, - _DEFAULT_TRISYCL_INCLUDE_DIR) - if os.path.exists(trisycl_include_dir): - break - - print('Invalid triSYCL include directory, %s cannot be found' - % (trisycl_include_dir)) - - # Set TRISYCL_INCLUDE_DIR - environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir - write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', - trisycl_include_dir) - - -def set_trisycl_include_dir(environ_cp): - """Set TRISYCL_INCLUDE_DIR.""" - - trisycl_include_dir = prompt_loop_or_load_from_env( - environ_cp, - var_name='TRISYCL_INCLUDE_DIR', - var_default=_DEFAULT_TRISYCL_INCLUDE_DIR, - ask_for_var=('Please specify the location of the triSYCL include ' - 'directory. (Use --config=sycl_trisycl when building with ' - 'Bazel)'), - check_success=os.path.exists, - error_msg='Invalid trySYCL include directory. %s cannot be found.', - suppress_default_error=True) - - write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir) - - -def set_mpi_home(environ_cp): - """Set MPI_HOME.""" - - default_mpi_home = which('mpirun') or which('mpiexec') or '' - default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home)) - - def valid_mpi_path(mpi_home): - exists = (os.path.exists(os.path.join(mpi_home, 'include')) and - os.path.exists(os.path.join(mpi_home, 'lib'))) - if not exists: - print('Invalid path to the MPI Toolkit. %s or %s cannot be found' % - (os.path.join(mpi_home, 'include'), - os.path.exists(os.path.join(mpi_home, 'lib')))) - return exists - - _ = prompt_loop_or_load_from_env( - environ_cp, - var_name='MPI_HOME', - var_default=default_mpi_home, - ask_for_var='Please specify the MPI toolkit folder.', - check_success=valid_mpi_path, - error_msg='', - suppress_default_error=True) - - -def set_other_mpi_vars(environ_cp): - """Set other MPI related variables.""" - # Link the MPI header files - mpi_home = environ_cp.get('MPI_HOME') - symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h') - - # Determine if we use OpenMPI or MVAPICH, these require different header files - # to be included here to make bazel dependency checker happy - if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')): - symlink_force( - os.path.join(mpi_home, 'include/mpi_portable_platform.h'), - 'third_party/mpi/mpi_portable_platform.h') - # TODO(gunan): avoid editing files in configure - sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False', - 'MPI_LIB_IS_OPENMPI=True') - else: - # MVAPICH / MPICH - symlink_force( - os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h') - symlink_force( - os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h') - # TODO(gunan): avoid editing files in configure - sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True', - 'MPI_LIB_IS_OPENMPI=False') - - if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')): - symlink_force( - os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so') - else: - raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home) - - -def set_mkl(): - write_to_bazelrc('build:mkl --define using_mkl=true') - write_to_bazelrc('build:mkl -c opt') - print( - 'Add "--config=mkl" to your bazel command to build with MKL ' - 'support.\nPlease note that MKL on MacOS or windows is still not ' - 'supported.\nIf you would like to use a local MKL instead of ' - 'downloading, please set the environment variable \"TF_MKL_ROOT\" every ' - 'time before build.\n') - - -def set_monolithic(): - # Add --config=monolithic to your bazel command to use a mostly-static - # build and disable modular op registration support (this will revert to - # loading TensorFlow with RTLD_GLOBAL in Python). By default (without - # --config=monolithic), TensorFlow will build with a dependence on - # //tensorflow:libtensorflow_framework.so. - write_to_bazelrc('build:monolithic --define framework_shared_object=false') - # For projects which use TensorFlow as part of a Bazel build process, putting - # nothing in a bazelrc will default to a monolithic build. The following line - # opts in to modular op registration support by default: - write_to_bazelrc('build --define framework_shared_object=true') - - -def create_android_bazelrc_configs(): - # Flags for --config=android - write_to_bazelrc('build:android --crosstool_top=//external:android/crosstool') - write_to_bazelrc( - 'build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain') - # Flags for --config=android_arm - write_to_bazelrc('build:android_arm --config=android') - write_to_bazelrc('build:android_arm --cpu=armeabi-v7a') - # Flags for --config=android_arm64 - write_to_bazelrc('build:android_arm64 --config=android') - write_to_bazelrc('build:android_arm64 --cpu=arm64-v8a') - - -def set_grpc_build_flags(): - write_to_bazelrc('build --define grpc_no_ares=true') - -def set_windows_build_flags(): - if is_windows(): - # The non-monolithic build is not supported yet - write_to_bazelrc('build --config monolithic') - # Suppress warning messages - write_to_bazelrc('build --copt=-w --host_copt=-w') - # Output more verbose information when something goes wrong - write_to_bazelrc('build --verbose_failures') - - -def main(): - # Make a copy of os.environ to be clear when functions and getting and setting - # environment variables. - environ_cp = dict(os.environ) - - check_bazel_version('0.5.4') - - reset_tf_configure_bazelrc() - cleanup_makefile() - setup_python(environ_cp) - run_gen_git_source(environ_cp) - - if is_windows(): - environ_cp['TF_NEED_S3'] = '0' - environ_cp['TF_NEED_GCP'] = '0' - environ_cp['TF_NEED_HDFS'] = '0' - environ_cp['TF_NEED_JEMALLOC'] = '0' - environ_cp['TF_NEED_OPENCL_SYCL'] = '0' - environ_cp['TF_NEED_COMPUTECPP'] = '0' - environ_cp['TF_NEED_OPENCL'] = '0' - environ_cp['TF_CUDA_CLANG'] = '0' - - if is_macos(): - environ_cp['TF_NEED_JEMALLOC'] = '0' - - set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc', - 'with_jemalloc', True) - set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform', - 'with_gcp_support', True, 'gcp') - set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System', - 'with_hdfs_support', True, 'hdfs') - set_build_var(environ_cp, 'TF_NEED_S3', 'Amazon S3 File System', - 'with_s3_support', True, 's3') - set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support', - False, 'xla') - set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support', - False, 'gdr') - set_build_var(environ_cp, 'TF_NEED_VERBS', 'VERBS', 'with_verbs_support', - False, 'verbs') - - set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False) - if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1': - set_host_cxx_compiler(environ_cp) - set_host_c_compiler(environ_cp) - set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True) - if environ_cp.get('TF_NEED_COMPUTECPP') == '1': - set_computecpp_toolkit_path(environ_cp) - else: - set_trisycl_include_dir(environ_cp) - - set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False) - if (environ_cp.get('TF_NEED_CUDA') == '1' and - 'TF_CUDA_CONFIG_REPO' not in environ_cp): - set_tf_cuda_version(environ_cp) - set_tf_cudnn_version(environ_cp) - set_tf_cuda_compute_capabilities(environ_cp) - if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get('LD_LIBRARY_PATH') != '1': - write_action_env_to_bazelrc('LD_LIBRARY_PATH', environ_cp.get('LD_LIBRARY_PATH')) - - set_tf_cuda_clang(environ_cp) - if environ_cp.get('TF_CUDA_CLANG') == '1': - # Set up which clang we should use as the cuda / host compiler. - set_clang_cuda_compiler_path(environ_cp) - else: - # Set up which gcc nvcc should use as the host compiler - # No need to set this on Windows - if not is_windows(): - set_gcc_host_compiler_path(environ_cp) - set_other_cuda_vars(environ_cp) - - set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False) - if environ_cp.get('TF_NEED_MPI') == '1': - set_mpi_home(environ_cp) - set_other_mpi_vars(environ_cp) - - set_grpc_build_flags() - set_cc_opt_flags(environ_cp) - set_mkl() - set_monolithic() - set_windows_build_flags() - create_android_bazelrc_configs() - - if workspace_has_any_android_rule(): - print('The WORKSPACE file has at least one of ["android_sdk_repository", ' - '"android_ndk_repository"] already set. Will not ask to help ' - 'configure the WORKSPACE. Please delete the existing rules to ' - 'activate the helper.\n') - else: - if get_var( - environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', - False, - ('Would you like to interactively configure ./WORKSPACE for ' - 'Android builds?'), - 'Searching for NDK and SDK installations.', - 'Not configuring the WORKSPACE for Android builds.'): - create_android_ndk_rule(environ_cp) - create_android_sdk_rule(environ_cp) - - -if __name__ == '__main__': - main() diff --git a/spaces/starlit7/USPoliticsTTS/monotonic_align/core.py b/spaces/starlit7/USPoliticsTTS/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/starlit7/USPoliticsTTS/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/stefo/minimal/app.py b/spaces/stefo/minimal/app.py deleted file mode 100644 index 0391912a6d0e43dad3f2adcb6649dcc7084c183f..0000000000000000000000000000000000000000 --- a/spaces/stefo/minimal/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "You entered:" + name - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Anime 1080p 60 Fps Xbox One Gamesl __FULL__.md b/spaces/stomexserde/gpt4-ui/Examples/Anime 1080p 60 Fps Xbox One Gamesl __FULL__.md deleted file mode 100644 index 92b7c8f34797c0531278bbd638e8a1e3d32d6b78..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Anime 1080p 60 Fps Xbox One Gamesl __FULL__.md +++ /dev/null @@ -1,28 +0,0 @@ -
              -

              How to Enjoy Anime 1080p 60 Fps Xbox One Gamesl

              -

              Anime fans who love gaming on their Xbox One consoles may be wondering how to get the best experience out of their favorite titles. Anime 1080p 60 Fps Xbox One Gamesl is a term that refers to anime-themed games that run at a high resolution and frame rate on the Xbox One platform. These games offer smooth and immersive gameplay, as well as stunning visuals that capture the essence of anime art and animation.

              -

              But how can you find and play these games? Here are some tips and recommendations to help you enjoy Anime 1080p 60 Fps Xbox One Gamesl:

              -

              Anime 1080p 60 Fps Xbox One Gamesl


              Download Zip 🌟 https://urlgoal.com/2uI80o



              -
                -
              • Check the game's specifications before buying. Not all games support 1080p resolution and 60 frames per second (fps) performance on the Xbox One. Some games may have lower resolutions or frame rates, or may require an Xbox One X or S console to achieve optimal performance. You can check the game's specifications on the official website, the Xbox Store, or online reviews.
              • -
              • Adjust your TV settings. To enjoy Anime 1080p 60 Fps Xbox One Gamesl, you need a TV that supports 1080p resolution and has a high refresh rate. You may also need to adjust your TV settings to enable game mode, reduce input lag, and optimize brightness, contrast, and color. Refer to your TV manual or online guides for more details.
              • -
              • Explore the game's options. Some games may have options that allow you to customize the graphics, audio, and gameplay settings. You can tweak these options to suit your preferences and improve your experience. For example, you can enable or disable motion blur, anti-aliasing, subtitles, voiceovers, etc.
              • -
              • Try different genres and styles. Anime 1080p 60 Fps Xbox One Gamesl is not limited to one genre or style of game. You can find anime-inspired games in various categories, such as action, adventure, role-playing, fighting, racing, simulation, etc. You can also find games that feature different styles of anime art and animation, such as realistic, stylized, cel-shaded, etc. Experiment with different genres and styles to discover new games that appeal to you.
              • -
              -

              Anime 1080p 60 Fps Xbox One Gamesl is a great way to enjoy anime and gaming on your Xbox One console. By following these tips and recommendations, you can enhance your experience and have fun with your favorite anime-themed games.

              - -

              Some of the most popular and acclaimed Anime 1080p 60 Fps Xbox One Gamesl are:

              -
                -
              1. Nier: Automata. This is an action role-playing game that follows the story of androids fighting a war against machines in a post-apocalyptic world. The game features fast-paced combat, multiple endings, and a captivating soundtrack. The game runs at 1080p resolution and 60 fps on the Xbox One X console.
              2. -
              3. Dragon Ball FighterZ. This is a fighting game that features characters from the Dragon Ball franchise. The game has a 2.5D graphics style that mimics the anime's art and animation. The game also has a story mode, online multiplayer, and various modes and options. The game runs at 1080p resolution and 60 fps on all Xbox One consoles.
              4. -
              5. Persona 5 Royal. This is an enhanced version of the original Persona 5 game, which is a role-playing game that revolves around a group of high school students who use their personas to fight evil in a parallel world. The game has a stylish and colorful graphics style, as well as a deep and engaging story and gameplay. The game runs at 1080p resolution and 60 fps on the Xbox One X console.
              6. -
              -

              If you are looking for more Anime 1080p 60 Fps Xbox One Gamesl, you can browse the Xbox Store or online forums and websites for recommendations. You can also check out the upcoming releases and pre-order the games that interest you. Some of the upcoming Anime 1080p 60 Fps Xbox One Gamesl are:

              -

              -
                -
              • Tales of Arise. This is an action role-playing game that is part of the Tales series. The game follows the story of two characters from different worlds who join forces to change their fate and create a new future. The game has a new graphics engine that enhances the visuals and animation of the game. The game is expected to run at 1080p resolution and 60 fps on the Xbox One X console.
              • -
              • Elden Ring. This is an action role-playing game that is developed by FromSoftware, the creators of Dark Souls and Sekiro: Shadows Die Twice. The game is set in a vast and open world that is influenced by Norse mythology and fantasy. The game also features a collaboration with George R.R. Martin, the author of A Song of Ice and Fire. The game is expected to run at 1080p resolution and 60 fps on the Xbox One X console.
              • -
              • Scarlet Nexus. This is an action role-playing game that takes place in a futuristic world where humans have developed psychic abilities. The game follows the story of two characters who join an elite force to fight against mysterious enemies called Others. The game has a vibrant and dynamic graphics style that blends anime and realism. The game is expected to run at 1080p resolution and 60 fps on all Xbox One consoles.
              • -

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] Free Download ((BETTER)).md b/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] Free Download ((BETTER)).md deleted file mode 100644 index 1efa42c318246ec8865c2b1e69f2b17420317086..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] Free Download ((BETTER)).md +++ /dev/null @@ -1,45 +0,0 @@ -
              -

              How to Download Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] for Free

              -

              Autodesk AutoCAD 2014 is a powerful software for designing and documenting 2D and 3D projects. It is widely used by architects, engineers, and professionals in various fields. If you want to download Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] for free, you are in the right place. In this article, we will show you how to get this software without paying anything.

              -

              Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] free download


              DOWNLOADhttps://urlgoal.com/2uIc88



              -

              What is Autodesk AutoCAD 2014?

              -

              Autodesk AutoCAD 2014 is the latest version of the popular CAD software that offers new features and enhancements to improve your design workflow. Some of the benefits of Autodesk AutoCAD 2014 are:

              -
                -
              • It connects your design to the cloud, allowing you to collaborate with others and access your projects from any device.
              • -
              • It supports social media integration, enabling you to share your ideas and feedback with your peers and clients.
              • -
              • It has improved tools for creating and editing 3D models, such as point clouds, surfaces, solids, and meshes.
              • -
              • It has a new interface that is more intuitive and customizable.
              • -
              • It has faster performance and stability.
              • -
              -

              How to Download Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] for Free?

              -

              To download Autodesk AutoCAD 2014 (32Bits 64Bits) [Spanish] for free, you need to follow these steps:

              -
                -
              1. Disable your internet connection and antivirus software.
              2. -
              3. Download the software from one of these links[^1^] [^2^] [^3^]. Choose the version that matches your system (32 bits or 64 bits).
              4. -
              5. Extract the files using a program like WinRAR or 7-Zip.
              6. -
              7. Run the setup file and follow the instructions. When asked for a product key or serial number, use one of these codes:
                -001F1 - AutoCAD 2014
                -057F1 - AutoCAD LT 2014
                -128F1 - Autodesk 3ds Max 2014
                -129F1 - AutoCAD Map 3D 2014
                -140F1 - AutoCAD OEM 2014
                -151F1 - RealDWG 2014
                -185F1 - AutoCAD Architecture 2014
                -200F1 - Autodesk Design Academy 2014
                -206F1 - AutoCAD Mechanical 2014
                -208F1 - Autodesk Inventor Professional 2014
                -213F1 - Autodesk Inventor OEM 2014
                -225F1 - AutoCAD Electrical 2014
                -235F1 - AutoCAD MEP 2014
                -237F1 - AutoCAD Civil 3D 2014
                -240F1 - Autodesk Revit Architecture 2014
                -241F1 - AutoCAD Revit Architecture Suite 2014
                -255F1 - Autodesk Revit Structure 2014
                -256F1 - AutoCAD Revit Structure Suite 2014
                -257F1 - AutoCAD Revit MEP Suite 2014
                -262F1 - Autodesk Showcase Professional 2014
                -295F1 - Autodesk Showcase Presenter 2014
                -297F1 - AutoCAD Revit MEP-BIM Suite for Education Providers Only (EDU) – Standalone License Only (SLM) – Not For Resale (NFR) – No Commercial Use Allowed (NCUA) – No Technical Support Provided (NTSP) – No Updates Provided (NUP) – No Subscription Services Provided (NSSP) – No Cloud Services Provided (NCSP) – No Network License Available (NNLA) – No Home Use Rights Available (NHURA) – No Previous Version Rights Available (NPVRA) – No Crossgrade Rights Available (NCRA) – No Transfer Rights Available (NTRA) – No License Borrowing Available (NLBA) – No License Transfer Available (NLTA) – No License Reassignment Available (

                -

                cec2833e83
                -
                -
                \ No newline at end of file diff --git a/spaces/subhajitmaji/MusicGen/audiocraft/modules/conditioners.py b/spaces/subhajitmaji/MusicGen/audiocraft/modules/conditioners.py deleted file mode 100644 index 82792316024b88d4c5c38b0a28f443627771d509..0000000000000000000000000000000000000000 --- a/spaces/subhajitmaji/MusicGen/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -import random -import re -import typing as tp -import warnings - -from einops import rearrange -from num2words import num2words -import spacy -from transformers import T5EncoderModel, T5Tokenizer # type: ignore -import torchaudio -import torch -from torch import nn -from torch import Tensor -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio_dataset import SegmentInfo -from ..utils.autocast import TorchAutocast -from ..utils.utils import hash_trick, length_to_mask, collate - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: Tensor - length: Tensor - path: tp.List[tp.Optional[str]] = [] - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """This function transforms an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor]) - dim (int): the dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: a tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert type(condition) == tuple and \ - type(condition[0]) == Tensor and \ - type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(wav: Tensor) -> WavCondition: - """Create a nullified WavCondition from a wav tensor with appropriate shape. - - Args: - wav (Tensor): tensor of shape [B, T] - Returns: - WavCondition: wav condition with nullified wav. - """ - null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * wav.shape[0], device=wav.device), - path=['null_wav'] * wav.shape[0] - ) - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def attributes(self): - return {"text": self.text_attributes, "wav": self.wav_attributes} - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -class Tokenizer: - """Base class for all tokenizers - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATIONS = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__( - self, - texts: tp.List[tp.Optional[str]], - return_text: bool = False - ) -> tp.Tuple[Tensor, Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (tp.List[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tp.Tuple[Tensor, Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuations - text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. We allow the output dim to be different - than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim, output_dim): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == "whitespace": - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == "noop": - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__["t5"] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device) - mask = inputs["attention_mask"] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs["attention_mask"] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, wav_length: WavCondition) -> WavCondition: - wav, length, path = wav_length - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), path) - - def _get_wav_embedding(self, wav: Tensor) -> Tensor: - """Gets as input a wav and returns a dense vector of conditions.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, inputs: WavCondition) -> ConditionType: - """ - Args: - input (WavCondition): Tuple of (waveform, lengths). - Returns: - ConditionType: Dense vector representing the conditioning along with its' mask. - """ - wav, lengths, path = inputs - with torch.no_grad(): - embeds = self._get_wav_embedding(wav) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by - the insight the drums and bass often dominate the chroma, leading to the chroma not containing the - information about melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma for the chroma extractor. - radix2_exp (int): Radix2 exponent for the chroma extractor. - duration (float): Duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): If True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device) - self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3} - self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, - device=device, **kwargs) - self.chroma_len = self._get_chroma_len() - - def _downsampling_factor(self): - return self.chroma.winhop - - def _get_chroma_len(self): - """Get length of chroma during training""" - dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_filtered_wav(self, wav): - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels) - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_idx] # extract stem - stems = stems.sum(1) # merge extracted stems - stems = stems.mean(1, keepdim=True) # mono - stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1) - return stems - - @torch.no_grad() - def _get_wav_embedding(self, wav): - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self.chroma(wav) - stems = self._get_filtered_wav(wav) - chroma = self.chroma(stems) - - if self.match_len_on_eval: - b, t, c = chroma.shape - if t > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})') - elif t < self.chroma_len: - # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t)) - n_repeat = int(math.ceil(self.chroma_len / t)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})') - return chroma - - -class ChromaExtractor(nn.Module): - """Chroma extraction class, handles chroma extraction and quantization. - - Args: - sample_rate (int): Sample rate. - n_chroma (int): Number of chroma to consider. - radix2_exp (int): Radix2 exponent. - nfft (tp.Optional[int], optional): Number of FFT. - winlen (tp.Optional[int], optional): Window length. - winhop (tp.Optional[int], optional): Window hop size. - argmax (bool, optional): Whether to use argmax. Defaults to False. - norm (float, optional): Norm for chroma normalization. Defaults to inf. - device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu. - """ - def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, - nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, - argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - from librosa import filters - self.device = device - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.winlen = winlen or 2 ** radix2_exp - self.nfft = nfft or self.winlen - self.winhop = winhop or (self.winlen // 4) - self.sr = sample_rate - self.n_chroma = n_chroma - self.norm = norm - self.argmax = argmax - self.window = torch.hann_window(self.winlen).to(device) - self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0, - n_chroma=self.n_chroma)).to(device) - self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen, - hop_length=self.winhop, power=2, center=True, - pad=0, normalized=True).to(device) - - def forward(self, wav): - with self.autocast: - T = wav.shape[-1] - # in case we are getting a wav that was dropped out (nullified) - # make sure wav length is no less that nfft - if T < self.nfft: - pad = self.nfft - T - r = 0 if pad % 2 == 0 else 1 - wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0) - assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}' - spec = self.spec(wav).squeeze(1) - raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec) - norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6) - norm_chroma = rearrange(norm_chroma, "b d t -> b t d") - - if self.argmax: - idx = norm_chroma.argmax(-1, keepdims=True) - norm_chroma[:] = 0 - norm_chroma.scatter_(dim=-1, index=idx, value=1) - - return norm_chroma - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str): - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using "nullify_condition". - If the condition is of any other type, set its' value to None. - Works in-place. - """ - if condition_type not in ["text", "wav"]: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'wav' or 'text' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f"but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == "wav": - wav, length, path = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base class for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Applies dropout with a given probability per attribute. This is different from the behavior of - ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, - "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout - where if "artist" is dropped "genre" must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Applies Classifier Free Guidance dropout, meaning all attributes - are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Main class to provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - merge_text_conditions_p (float, optional): Probability to merge all text sources - into a single text condition. Defaults to 0. - drop_desc_p (float, optional): Probability to drop the original description - when merging all text sources into a single text condition. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types. - """ - def __init__( - self, - conditioners: tp.Dict[str, BaseConditioner], - merge_text_conditions_p: float = 0, - drop_desc_p: float = 0, - device: tp.Union[torch.device, str] = "cpu", - ): - super().__init__() - self.device = device - self.merge_text_conditions_p = merge_text_conditions_p - self.drop_desc_p = drop_desc_p - self.conditioners = nn.ModuleDict(conditioners) - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([type(x) == ConditioningAttributes for x in inputs]), \ - "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \ - f" but types were {set([type(x) for x in inputs])}" - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - - assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \ - f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}" - - for attribute, batch in chain(text.items(), wavs.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners - and the tokenized representations. The output is for example: - - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - """ - batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - - def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0): - def is_valid(k, v): - k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument'] - v_valid = v is not None and isinstance(v, (int, float, str, list)) - return k_valid and v_valid - - def process_value(v): - if isinstance(v, (int, float, str)): - return v - if isinstance(v, list): - return ", ".join(v) - else: - RuntimeError(f"unknown type for text value! ({type(v), v})") - - desc = cond.text['description'] - meta_data = "" - if random.uniform(0, 1) < merge_text_conditions_p: - meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)] - random.shuffle(meta_pairs) - meta_data = ". ".join(meta_pairs) - desc = desc if not random.uniform(0, 1) < drop_desc_p else None - - if desc is None: - desc = meta_data if len(meta_data) > 1 else None - else: - desc = desc.rstrip('.') + ". " + meta_data - cond.text['description'] = desc.strip() if desc else None - - if self.training and self.merge_text_conditions_p: - for sample in samples: - _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p) - - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - batch_per_attribute[condition].append(text[condition]) - - return batch_per_attribute - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]): - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attribtues. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - dict: A dicionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lens = defaultdict(list) - paths = defaultdict(list) - out = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, path = sample.wav[attribute] - wavs[attribute].append(wav.flatten()) - lens[attribute].append(length) - paths[attribute].append(path) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition(stacked_wav.unsqueeze(1), - torch.cat(lens['self_wav']), paths[attribute]) # type: ignore - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (Tensor): Transformer input. - conditions (tp.Dict[str, ConditionType]): Dict of conditions. - Returns: - tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == "sum": - input += cond - elif op == "input_interpolate": - cond = rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += rearrange(cond, "b d t -> b t d") - elif op == "prepend": - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == "cross": - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Play Kasumi Rebirth V3 1 Full Version Online [2021].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Play Kasumi Rebirth V3 1 Full Version Online [2021].md deleted file mode 100644 index 0702c563fadd6dfe854dd2b168ce77c0ebbe317b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Play Kasumi Rebirth V3 1 Full Version Online [2021].md +++ /dev/null @@ -1,6 +0,0 @@ -

                Play Kasumi Rebirth V3 1 Full Version Online


                Download File > https://cinurl.com/2uEXML



                - -December 26, 2021 - [2021] Play Kasumi Rebirth V3.1 full version online for free. Play Kasumi Rebirth V3.1 full version online. FREE DOWNLOAD: . zip Download file (link valid until 26/12/2011) Download Kasumi Rebirth V3.1 (EXE/RUS) (in Russian) (Cracked). .zip Download Kasumi Rebirth V3.1 (EXE/RUS) (in Russian) (Cracked). .zip .rar Download Kasumi Rebirth V3.1 (EXE/RUS) (in Russian) (Cracked). .zip .zip Download Kasumi Rebirth V3.1 (EXE/RUS) (in Russian) (Cracked). .zip .rar .zip Download Kasumi Rebirth V3.1 (EXE/RUS) (in Russian) (Cracked). .zip .zip 8a78ff9644
                -
                -
                -

                diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/__init__.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/__init__.py deleted file mode 100644 index 915af28cefab14a14c1188ed861161080fd138a3..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .checkpoint import CheckpointHook -from .closure import ClosureHook -from .ema import EMAHook -from .evaluation import DistEvalHook, EvalHook -from .hook import HOOKS, Hook -from .iter_timer import IterTimerHook -from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook, - NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook, - TextLoggerHook, WandbLoggerHook) -from .lr_updater import LrUpdaterHook -from .memory import EmptyCacheHook -from .momentum_updater import MomentumUpdaterHook -from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, - GradientCumulativeOptimizerHook, OptimizerHook) -from .profiler import ProfilerHook -from .sampler_seed import DistSamplerSeedHook -from .sync_buffer import SyncBuffersHook - -__all__ = [ - 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', - 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook', - 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook', - 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', - 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook', - 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook', - 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook', - 'GradientCumulativeFp16OptimizerHook' -] diff --git a/spaces/talari/MyGenAiChatBot/app.py b/spaces/talari/MyGenAiChatBot/app.py deleted file mode 100644 index 2dbf3ae89c2e3fdab7134107dd346f984dca8eb1..0000000000000000000000000000000000000000 --- a/spaces/talari/MyGenAiChatBot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/terfces0erbo/CollegeProjectV2/Blumentals Rapid PHP 2020 V16.0.0.220 Crack UPDATED.md b/spaces/terfces0erbo/CollegeProjectV2/Blumentals Rapid PHP 2020 V16.0.0.220 Crack UPDATED.md deleted file mode 100644 index 822696656b10d7ef4185bb50bbc6fe6b9f671b30..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Blumentals Rapid PHP 2020 V16.0.0.220 Crack UPDATED.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Blumentals Rapid PHP 2020 v16.0.0.220 Crack


                DOWNLOAD →→→ https://bytlly.com/2uGlMv



                - -  and PHP IDE is now even faster than it was before. 4fefd39f24
                -
                -
                -

                diff --git a/spaces/terfces0erbo/CollegeProjectV2/Brokeback Mountain 2005 Bluray 720p X264 Yify English Subtitles.md b/spaces/terfces0erbo/CollegeProjectV2/Brokeback Mountain 2005 Bluray 720p X264 Yify English Subtitles.md deleted file mode 100644 index dacd786e1e01fca044707516a12bdfef5640c4de..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Brokeback Mountain 2005 Bluray 720p X264 Yify English Subtitles.md +++ /dev/null @@ -1,6 +0,0 @@ -

                brokeback mountain 2005 bluray 720p x264 yify english subtitles


                Downloadhttps://bytlly.com/2uGkCW



                -
                -Bluray.720p.x264.YIFY . 0, English, Subtitle Brokeback Mountain 2005 720p BDRip x264.. Brokeback Mountain movie YIFY subtitles - details.. : Brokeback. 4d29de3e1b
                -
                -
                -

                diff --git a/spaces/terfces0erbo/CollegeProjectV2/Imagenomic Portraiture Crack Serial Codes [PATCHED].md b/spaces/terfces0erbo/CollegeProjectV2/Imagenomic Portraiture Crack Serial Codes [PATCHED].md deleted file mode 100644 index 53380b3c8a00e22256bbd9c095edc9321c1653ef..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Imagenomic Portraiture Crack Serial Codes [PATCHED].md +++ /dev/null @@ -1,64 +0,0 @@ -

                imagenomic portraiture crack serial codes


                Download ····· https://bytlly.com/2uGiBl



                - -With this tool, you can add a professional look to your photos even at the level of one element. Its manual adjustment allows you to use full control. - -Available for Windows, Mac, Linux, Android, and iOS. - -Its accessible features include: - -Auto-distortion, - -Magic Brush, - -Magic Brush Plus, - -Bevel & Emboss, - -Soft Light, - -Sepia, - -Soft Focus, - -HDR (High Dynamic Range), - -B/W conversion, - -Panorama, - -Photo manipulation, - -Competition. - -Encompasses 32,000+ Images, - -Presentation, - -Web-base, - -Online generator, - -Collage, - -Re-pixilation, - -Watermark, - -Metadata (TITLE, ALBUM and AUTHOR) - -You can use Digital Photo Editor to get rid of various flaws in your images, like uneven exposure, brightness, contrast, color, etc. This tool supports both the editing of RAW and non-RAW images. And, it also supports unlimited layer, unlimited undo, or a free trial. - -Key Features: - -Supports the latest Photoshop CC 2018, - -Convert, rotate, crop, adjust exposure, contrast, shadows, brightness, white balance, color balance, colorizing and tint, - -It is a part of the Vegas Pro 17 and Elements 14 which can be used for the post-production of images for your websites or blogs. It is a powerful tool which will enable you to make various adjustments like adjusting white balance, grayscale and color, and other effects in an image. It also gives you the opportunity to convert an image from RGB color to grayscale, adjust colors and brightness, and add a shadow. All these operations can be performed in many ways. - -It is a powerful tool which will enable you to edit the colors, shadows, brightness and contrast of images. It is a highly advanced tool with more than 100+ preset features. You can use it with Photoshop CS6, CS5, CS4, CS3, CC, CC 2014, CC 2013, CS2, CS1, and other versions. It allows you to quickly change the look of your images by using a dozen filter presets. It is compatible with Windows, Mac, and Linux. - -It is a powerful tool which will enable you to enhance the color, black & white, and 4fefd39f24
                -
                -
                -

                diff --git a/spaces/tez321/pipeline-visualizer/app.py b/spaces/tez321/pipeline-visualizer/app.py deleted file mode 100644 index e302ada6a32bb745ab20a2dc97e7436232a0904a..0000000000000000000000000000000000000000 --- a/spaces/tez321/pipeline-visualizer/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import spacy_streamlit -from pathlib import Path -import srsly -import importlib -import random - -DEFAULT_MODEL = "en_core_web_trf" -DEFAULT_TEXT = " " - - -def get_all_models(): - with open("requirements.txt") as f: - content = f.readlines() - models = [] - for line in content: - if "huggingface.co" in line: - models.append(line.split("/")[4]) - return models - -MODELS = get_all_models() - -def get_default_text(nlp): - # Check if spaCy has built-in example texts for the language - try: - examples = importlib.import_module(f".lang.{nlp.lang}.examples", "spacy") - return examples.sentences[0] - except (ModuleNotFoundError, ImportError): - return "" - -spacy_streamlit.visualize( - MODELS, - default_model=DEFAULT_MODEL, - visualizers=["ner"], - show_visualizer_select=True, - get_default_text=get_default_text -) \ No newline at end of file diff --git a/spaces/thomasjeon/stabilityai-stable-diffusion-2-1/README.md b/spaces/thomasjeon/stabilityai-stable-diffusion-2-1/README.md deleted file mode 100644 index ed13431546272df83ab78e684bd0dbf5fbe48daa..0000000000000000000000000000000000000000 --- a/spaces/thomasjeon/stabilityai-stable-diffusion-2-1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 1 -emoji: 🌖 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Astro Vision Lifesign 12.5 with Key How to Generate Accurate Horoscopes and Predictions.md b/spaces/tialenAdioni/chat-gpt-api/logs/Astro Vision Lifesign 12.5 with Key How to Generate Accurate Horoscopes and Predictions.md deleted file mode 100644 index 13166bdb1a838bb6bd2d87e5c1270cbd75d29e0f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Astro Vision Lifesign 12.5 with Key How to Generate Accurate Horoscopes and Predictions.md +++ /dev/null @@ -1,103 +0,0 @@ - -

                Astro Vision Lifesign 12.5 With Key: A Comprehensive Review

                -

                If you are looking for a software that can help you create and analyze horoscopes based on the Vedic astrology system, then you might want to check out Astro Vision Lifesign. This software is one of the most popular and trusted products in the field of astrology, with millions of users across the world. In this article, we will review the features, benefits, installation process, and usage of Astro Vision Lifesign 12.5 with key, which is the latest version of this software.

                -

                astro vision lifesign 12.5 with key


                DOWNLOAD ————— https://urlcod.com/2uK9LO



                -

                Features of Astro Vision Lifesign 12.5

                -

                Astro Vision Lifesign 12.5 is a comprehensive astrology software that offers a variety of features to suit your needs. Here are some of the main features of this software:

                -
                  -
                • Horoscope generation and analysis: You can generate horoscopes for yourself or anyone else by entering their birth details such as date, time, and place of birth. The software will calculate the planetary positions, ascendant, moon sign, nakshatra, etc., and display them in various charts such as Rasi, Navamsa, Bhava, etc. You can also analyze different aspects of the horoscope such as personality, character, health, wealth, career, education, marriage, children, etc., by using various tools such as Ashtakavarga, Shadbala, Bhava Bala, etc.
                • -
                • Panchanga predictions and remedies: You can get predictions based on the panchanga factors such as Tithi, Vara, Nakshatra, Yoga, and Karana for any day or period of time. The software will also suggest suitable remedies such as mantras, gemstones, yantras, etc., to overcome any obstacles or difficulties caused by the panchanga factors.
                • -
                • Dasha predictions and remedies: You can get predictions based on the dasha system such as Vimshottari Dasha, Ashtottari Dasha, Yogini Dasha, etc., for any period of time. The software will also suggest suitable remedies such as mantras, gemstones, yantras, etc., to enhance the positive effects or reduce the negative effects of the dasha periods.
                • -
                • Transit predictions and remedies: You can get predictions based on the transit of planets such as Sun, Moon, Mars, Mercury, Jupiter, Venus, Saturn, Rahu, and Ketu for any period of time. The software will also suggest suitable remedies such as mantras, gemstones, yantras, etc., to cope with the changes or challenges caused by the transit of planets.
                • -
                • Yogas and their effects: You can get information about the various yogas or combinations of planets that are formed in your horoscope and their effects on your life. The software will also suggest suitable remedies such as mantras, gemstones, etc., to overcome the negative effects or enhance the positive effects of the yogas.
                • -
                • Compatibility report and marriage matching: You can get a compatibility report for yourself and your partner by entering your birth details and checking the various factors such as mental compatibility, physical compatibility, longevity, health, progeny, etc. The software will also perform marriage matching or kundali milan by comparing the birth charts of the prospective bride and groom and giving a score based on the ashtakoota or dasakoota methods.
                • -
                -

                Benefits of Astro Vision Lifesign 12.5

                -

                Astro Vision Lifesign 12.5 is not just a software that gives you horoscopes and predictions, but also a tool that can help you improve your life and achieve your goals. Here are some of the benefits of using this software:

                -
                  -
                • Accurate and reliable calculations: The software is based on the principles of Vedic astrology, which is one of the oldest and most accurate systems of astrology in the world. The software uses advanced mathematical algorithms and precise astronomical data to calculate the planetary positions and other astrological factors. You can trust the software to give you accurate and reliable results.
                • -
                • User-friendly interface and customization options: The software has a simple and easy-to-use interface that allows you to enter your birth details and generate your horoscope in a matter of seconds. You can also customize various aspects of the software such as chart style, ayanamsa, house system, etc., according to your preference. You can also save your horoscope and other reports for future reference or print them out for convenience.
                • -
                • Multiple language support and regional charts: The software supports multiple languages such as English, Hindi, Tamil, Telugu, Malayalam, Kannada, Marathi, Bengali, Oriya and Gujarati. You can choose your preferred language and get your horoscope and other reports in that language. The software also supports various regional charts such as North Indian, South Indian, East Indian, Kerala, Sri Lankan, etc., so that you can view your horoscope in your familiar chart style.
                • -
                • Detailed reports and printouts: The software gives you detailed reports on various aspects of your horoscope such as planetary positions, bhava chart, shodashavarga table, ashtakavarga table, vimshottari dasha table, panchanga table, yogas table, etc. You can also get detailed predictions and remedies for various aspects of your life such as personality, health, wealth, career, education, marriage, children, etc. You can print out these reports or save them as PDF files for easy sharing.
                • -
                -

                How to Download and Install Astro Vision Lifesign 12.5 With Key

                -

                If you are interested in using Astro Vision Lifesign 12.5 with key, then you need to follow these steps to download and install it on your computer:

                -
                  -
                1. Go to the official website of Astro Vision Lifesign 12.5 with key and click on the "Download" button.
                2. -
                3. You will be redirected to a page where you need to enter your name, email address and phone number to get the download link.
                4. -
                5. Check your email inbox for the download link and click on it to start downloading the setup file.
                6. -
                7. Once the download is complete, run the setup file and follow the instructions to install the software on your computer.
                8. -
                9. After the installation is complete, launch the software and enter the key that you received in your email to activate it.
                10. -
                11. You are now ready to use Astro Vision Lifesign 12.5 with key.
                12. -
                -

                How to Use Astro Vision Lifesign 12.5 With Key

                -

                Using Astro Vision Lifesign 12.5 with key is very easy and simple. Here are some steps to help you use it effectively:

                -

                astro vision lifesign 12.5 full version free download
                -astro vision lifesign 12.5 crack download
                -astro vision lifesign 12.5 activation code
                -astro vision lifesign 12.5 horoscope software
                -astro vision lifesign 12.5 online
                -astro vision lifesign 12.5 tamil software
                -astro vision lifesign 12.5 english software
                -astro vision lifesign 12.5 malayalam software
                -astro vision lifesign 12.5 telugu software
                -astro vision lifesign 12.5 kannada software
                -astro vision lifesign 12.5 hindi software
                -astro vision lifesign 12.5 bengali software
                -astro vision lifesign 12.5 marathi software
                -astro vision lifesign 12.5 gujarati software
                -astro vision lifesign 12.5 oriya software
                -astro vision lifesign 12.5 sinhala software
                -astro vision lifesign 12.5 features and benefits
                -astro vision lifesign 12.5 price and plans
                -astro vision lifesign 12.5 customer reviews and ratings
                -astro vision lifesign 12.5 compatibility and system requirements
                -astro vision lifesign 12.5 installation and setup guide
                -astro vision lifesign 12.5 user manual and tutorial
                -astro vision lifesign 12.5 support and contact details
                -astro vision lifesign 12.5 refund and cancellation policy
                -astro vision lifesign 12.5 terms and conditions and privacy policy
                -astro vision lifesign 12.5 vs other astrology software
                -astro vision lifesign 12.5 pros and cons and alternatives
                -astro vision lifesign 12.5 coupons and discounts and offers
                -astro vision lifesign 12.5 affiliate program and commission rates
                -astro vision lifesign 12.5 testimonials and case studies and success stories
                -astro vision lifesign 12.5 free trial and demo and sample reports
                -astro vision lifesign 12.5 updates and upgrades and new features
                -astro vision lifesign 12.5 for windows and mac and linux and android and ios
                -astro vision lifesign 12.5 for desktop and laptop and tablet and mobile and smart tv
                -astro vision lifesign 12.5 for personal and professional and business use
                -astro vision lifesign 12.5 for beginners and experts and intermediates
                -astro vision lifesign 12.5 for individuals and couples and families and groups
                -astro vision lifesign 12.5 for love and marriage and career and health and wealth
                -astro vision lifesign 12.5 for sun sign and moon sign and rising sign and nakshatra
                -astro vision lifesign 12.5 for vedic astrology and western astrology and chinese astrology
                -astro vision lifesign 12.5 for birth chart and kundli and horoscope matching
                -astro vision lifesign 12.5 for dasa bhukti predictions and remedies
                -astro vision lifesign 12.5 for transit predictions and panchang
                -astro vision lifesign 12.5 for numerology and gemology
                -astro vision lifesign 12.5 for palmistry

                -
                  -
                1. To generate your horoscope or anyone else's horoscope, click on the "New Horoscope" button on the main screen.
                2. -
                3. Enter the birth details such as name, date of birth, of birth, place of birth, etc., and click on the "Generate Horoscope" button.
                4. -
                5. To view your predictions and remedies based on the panchanga factors, click on the "Panchanga Predictions" button on the main screen.
                6. -
                7. To view your predictions and remedies based on the dasha system, click on the "Dasha Predictions" button on the main screen.
                8. -
                9. To view your predictions and remedies based on the transit of planets, click on the "Transit Predictions" button on the main screen.
                10. -
                11. To view your yogas and their effects, click on the "Yogas" button on the main screen.
                12. -
                13. To get a compatibility report for yourself and your partner, click on the "Compatibility Report" button on the main screen and enter your partner's birth details.
                14. -
                15. To perform marriage matching or kundali milan, click on the "Marriage Matching" button on the main screen and enter the birth details of the prospective bride and groom.
                16. -
                -

                Conclusion

                -

                Astro Vision Lifesign 12.5 with key is a powerful and versatile astrology software that can help you create and analyze horoscopes, get predictions and remedies for various aspects of your life, and find your perfect partner. Whether you are an astrologer, a student of astrology, or a curious seeker of knowledge, this software can provide you with valuable insights and guidance. You can download and install this software easily and use it in your preferred language and chart style. You can also get detailed reports and printouts of your horoscope and other features. If you want to explore the world of Vedic astrology and discover its secrets, then Astro Vision Lifesign 12.5 with key is the software for you.

                -

                FAQs

                -

                Here are some common questions and answers about Astro Vision Lifesign 12.5 with key:

                -
                  -
                1. What is the difference between Astro Vision Lifesign 12.5 with key and Astro Vision Lifesign 12.5 without key?
                  The difference between Astro Vision Lifesign 12.5 with key and Astro Vision Lifesign 12.5 without key is that the former is a paid version that gives you access to all the features of the software, while the latter is a free version that gives you limited access to some of the features of the software.
                2. -
                3. How can I get Astro Vision Lifesign 12.5 with key?
                  You can get Astro Vision Lifesign 12.5 with key by visiting the official website of Astro Vision Lifesign 12.5 with key and clicking on the "Download" button. You will need to enter your name, email address and phone number to get the download link and the key in your email inbox.
                4. -
                5. Is Astro Vision Lifesign 12.5 with key compatible with Windows 10?
                  Yes, Astro Vision Lifesign 12.5 with key is compatible with Windows 10 as well as Windows 8.1, Windows 8, Windows 7, Windows Vista, Windows XP, Windows ME, Windows NT4 (SP6), Windows NT3 (SP3), Windows 2000 (SP4), Windows 98 (SE), Windows 95 (OSR2).
                6. -
                7. Can I use Astro Vision Lifesign 12.5 with key on my mobile phone or tablet?
                  No, Astro Vision Lifesign 12.5 with key is a desktop software that can only be used on your computer or laptop. However, you can use Astro Vision Lifesign ME Lite , which is a mobile app that offers some of the features of Astro Vision Lifesign 12.5 with key on your Android phone or tablet.
                8. -
                9. Can I share my horoscope or other reports generated by Astro Vision Lifesign 12.5 with key with others?
                  Yes, you can share your horoscope or other reports generated by Astro Vision Lifesign 12.5 with key with others by saving them as PDF files or printing them out. You can also email them or upload them to social media platforms.
                10. -
                - : https://astro-vision-lifesign-home.software.informer.com/12.5/ : https://play.google.com/store/apps/details?id=com.astrovision.horoscope

                0a6ba089eb
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/AutoCAD Architecture 2014 Xforce Keygen 64 Bits ((EXCLUSIVE)).md b/spaces/tialenAdioni/chat-gpt-api/logs/AutoCAD Architecture 2014 Xforce Keygen 64 Bits ((EXCLUSIVE)).md deleted file mode 100644 index f488cd6c58b948ac84d8b9a9113b30320c225072..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/AutoCAD Architecture 2014 Xforce Keygen 64 Bits ((EXCLUSIVE)).md +++ /dev/null @@ -1,29 +0,0 @@ -
                -

                How to Activate AutoCAD Architecture 2014 with Xforce Keygen 64 Bits

                -

                AutoCAD Architecture 2014 is a software that helps you design and document more efficiently with tools created for architects. It supports both 32-bit and 64-bit operating systems. To activate AutoCAD Architecture 2014, you need to use a key generator (keygen) that can generate a valid serial number and an activation code. One of the most popular keygens for AutoCAD products is Xforce Keygen 64 Bits, which can be downloaded from various sources on the internet. Here are the steps to activate AutoCAD Architecture 2014 with Xforce Keygen 64 Bits:

                -

                AutoCAD Architecture 2014 Xforce Keygen 64 Bits


                Download Ziphttps://urlcod.com/2uK1Mt



                -
                  -
                1. Finish the installation and restart Autodesk AutoCAD Architecture 2014.
                2. -
                3. Before clicking on Activate, make sure to disable your internet connection and antivirus software.
                4. -
                5. Click on Activate and if it tells you that your serial is wrong, simply click on close and click on activate again.
                6. -
                7. Select "I have an activation code from Autodesk".
                8. -
                9. Start Xforce Keygen 64 Bits and click on Patch. You should see "Successfully patched".
                10. -
                11. Copy the request code from the activation screen and paste it into the keygen. Then click on Generate.
                12. -
                13. Copy the activation code from the keygen and go back to the activation screen. Paste the code and click on Next.
                14. -
                15. You have successfully activated AutoCAD Architecture 2014.
                16. -
                -

                Note: This method is not endorsed by Autodesk and may violate their terms of service. Use it at your own risk.

                - -

                AutoCAD Architecture 2014 is a powerful software that allows you to create and modify architectural drawings with ease. It has many features and tools that can help you design faster and more accurately. Some of the features and tools are:

                -
                  -
                • Architectural Styles: You can apply predefined styles to your elements, such as walls, doors, windows, roofs, stairs, etc. You can also create and edit your own styles to suit your needs.
                • -
                • Walls, Doors, and Windows: You can draw walls with different types and materials, and insert doors and windows automatically. You can also edit their properties, such as height, width, alignment, offset, etc.
                • -
                • Roofs and Slabs: You can create roofs and slabs with various shapes and slopes. You can also add openings, skylights, dormers, etc.
                • -
                • Stairs and Railings: You can create stairs and railings with different types and configurations. You can also modify their parameters, such as tread, riser, landing, baluster, etc.
                • -
                • Sections and Elevations: You can generate sections and elevations from your model automatically. You can also add annotations, dimensions, hatches, etc.
                • -
                • Schedules and Tags: You can create schedules and tags for your elements, such as doors, windows, rooms, furniture, etc. You can also customize their appearance and content.
                • -
                • Rendering and Visualization: You can render your model with realistic materials and lighting effects. You can also create walkthroughs and animations to present your design.
                • -
                -

                AutoCAD Architecture 2014 is a great software for architects who want to improve their productivity and quality of work. It is compatible with other Autodesk products, such as Revit Architecture, AutoCAD Civil 3D, AutoCAD MEP, etc. It also supports industry standards, such as DWG, DXF, IFC, PDF, etc.

                7196e7f11a
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Folclor Literar Romanesc Mihai Pop Pdf Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/Folclor Literar Romanesc Mihai Pop Pdf Download.md deleted file mode 100644 index 01640451adadd38039c52f7077352e54987ea9c7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Folclor Literar Romanesc Mihai Pop Pdf Download.md +++ /dev/null @@ -1,19 +0,0 @@ - -Hello, this is Bing. I can help you with writing a title and an article with SEO optimization and HTML formatting for the keyword "Folclor Literar Romanesc Mihai Pop Pdf Download". Here is a possible title and article: - -

                How to Download Folclor Literar Romanesc by Mihai Pop in PDF Format

                -

                Folclor Literar Romanesc (Romanian Literary Folklore) is a classic book on the oral literature of Romania, written by the renowned folklorist Mihai Pop. The book covers various genres and themes of Romanian folklore, such as ritual and ceremonial poetry, epic songs, fairy tales, proverbs, riddles, and more. It also analyzes the cultural and historical context, the artistic features, and the social functions of folk literature.

                -

                Folclor Literar Romanesc Mihai Pop Pdf Download


                Download File –––––>>> https://urlcod.com/2uKa5H



                -

                If you are interested in learning more about the rich and diverse heritage of Romanian folklore, you might want to download Folclor Literar Romanesc by Mihai Pop in PDF format. This way, you can read it on your computer, tablet, or smartphone, without having to buy a physical copy. However, finding a reliable and legal source for downloading this book might not be easy. That's why we have prepared this guide to help you find and download Folclor Literar Romanesc by Mihai Pop in PDF format.

                -

                Step 1: Search for Folclor Literar Romanesc by Mihai Pop on Google Books

                -

                One of the best places to look for Folclor Literar Romanesc by Mihai Pop in PDF format is Google Books. Google Books is a service that allows you to search, preview, and read millions of books online. Some of these books are available for free download or purchase, while others are only accessible through libraries or publishers.

                -

                To search for Folclor Literar Romanesc by Mihai Pop on Google Books, you need to go to books.google.com and type "Folclor Literar Romanesc Mihai Pop" in the search box. You will see several results for this book, published in different years and editions. You can click on each result to see more details about the book, such as the author, the publisher, the ISBN, the number of pages, and a preview of some pages.

                -

                -

                Step 2: Check if Folclor Literar Romanesc by Mihai Pop is Available for Free Download or Purchase

                -

                Once you have found the edition of Folclor Literar Romanesc by Mihai Pop that you want to download, you need to check if it is available for free download or purchase on Google Books. To do this, you need to look for a button that says "EBOOK - FREE" or "BUY EBOOK" on the right side of the book's page. If you see this button, it means that you can download or buy the book in PDF format from Google Books.

                -

                For example, according to our search results[^1^] [^2^], the 1990 edition of Folclor Literar Romanesc by Mihai Pop is not available for free download or purchase on Google Books. However, the 1976 edition of Folclor Literar Romanesc by Mihai Pop is available for free download on Google Books[^2^]. To download this edition, you need to click on the "EBOOK - FREE" button and then choose "Download PDF" from the drop-down menu. You will be asked to sign in with your Google account and agree to the terms of service before you can download the file.

                -

                Step 3: Search for Other Sources for Downloading Folclor Literar Romanesc by Mihai Pop in PDF Format

                -

                If you cannot find a free or paid version of Folclor Literar Romanesc by Mihai Pop on Google Books, you might want to search for other sources for downloading this book in PDF format. However, you need to be careful when doing this, as some of these sources might not be legal or safe. You should always respect the copyright of the author and publisher and avoid downloading pirated or infected files.

                -

                One possible source for downloading Folclor Literar Romanesc by Mihai Pop in PDF format is Monoskop[^3^], a wiki for art, culture, and media. Monoskop has a

                7196e7f11a
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/Anokha-Bandhan-Hindi-BETTER-Full-Moviel.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/Anokha-Bandhan-Hindi-BETTER-Full-Moviel.md deleted file mode 100644 index 76d6f6dce421473d4c6c51b844e1946b70f5d04b..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/Anokha-Bandhan-Hindi-BETTER-Full-Moviel.md +++ /dev/null @@ -1,68 +0,0 @@ -## Anokha Bandhan Hindi Full Moviel - - - - - - - - - -**Download ✵✵✵ [https://urlcod.com/2txiQa](https://urlcod.com/2txiQa)** - - - - - - - - - - - - - -# Anokha Bandhan Hindi Full Movie: A Classic Family Drama - - - -If you are looking for a classic family drama that explores the bond between a sister-in-law and her brother-in-law, then you should watch **Anokha Bandhan Hindi Full Movie**. This movie was released in 1982 and starred Shabana Azmi, Navin Nischol and Ashok Kumar in the lead roles. The movie was directed by Mehul Kumar and had a melodious soundtrack composed by Usha Khanna. - - - -The plot of **Anokha Bandhan Hindi Full Movie** revolves around Annapurna (Shabana Azmi), a housewife who promises her dying mother-in-law that she will take good care of her little brother-in-law, Ram (Master Bittu). Annapurna treats Ram as her own son and showers him with love and affection. However, her own mother (Aruna Irani) is jealous of Ram and wants Annapurna to hate him. She creates a rift between Annapurna and Ram by spreading lies and manipulating them. Will Annapurna and Ram be able to overcome the evil schemes of Annapurna's mother? Will they be able to maintain their anokha bandhan (unique bond)? Watch **Anokha Bandhan Hindi Full Movie** to find out. - - - -**Anokha Bandhan Hindi Full Movie** is a heartwarming tale of love, loyalty and sacrifice. The movie showcases the emotional bond between a sister-in-law and her brother-in-law, which is rare to find in today's times. The movie also has some memorable scenes and dialogues that will touch your heart. The performances of Shabana Azmi, Navin Nischol and Ashok Kumar are commendable and realistic. The movie also has some comic relief provided by Jagdeep, Asrani and Paintal. - - - -You can watch **Anokha Bandhan Hindi Full Movie** online on ZEE5 or VI movies and tv for free with ads. You can also download it from Archive.org for free. Don't miss this classic family drama that will make you laugh, cry and appreciate your family more. - - - -Some of the highlights of **Anokha Bandhan Hindi Full Movie** are the songs sung by Lata Mangeshkar, Kishore Kumar, Asha Bhosle and Mohammed Rafi. The songs are catchy and melodious and suit the mood of the movie. Some of the popular songs are "Tu Itni Door Kyun Hai Maa", "Chhota Sa Bachcha Hoon Main", "Teri Meri Zindagi Ka Ajeeb Bandhan Hai" and "Main Tera Bhai Bhi Hoon". The songs convey the emotions of the characters and add to the charm of the movie. - - - -**Anokha Bandhan Hindi Full Movie** is a movie that will make you appreciate the value of family and relationships. It will also make you realize that sometimes, the most unexpected people can become your closest ones. The movie is a must-watch for anyone who loves family dramas and emotional stories. Watch **Anokha Bandhan Hindi Full Movie** today and enjoy this timeless classic. - - - -**Anokha Bandhan Hindi Full Movie** is a movie that has a universal appeal and can be enjoyed by people of all ages and backgrounds. The movie has a simple yet engaging storyline that keeps the viewers hooked till the end. The movie also has a social message about the importance of respecting and caring for one's elders and siblings. The movie shows that family is not only defined by blood, but also by love and trust. - - - -The movie also has some elements of suspense and thrill that add to the excitement of the movie. The movie has some twists and turns that keep the audience guessing about the outcome of the story. The movie also has some emotional scenes that will make you feel for the characters and their plight. The movie is a roller coaster ride of emotions that will make you laugh, cry and cheer for the protagonists. - - - -**Anokha Bandhan Hindi Full Movie** is a movie that deserves to be watched and appreciated by everyone. The movie is a gem of Indian cinema that showcases the talent and versatility of the actors and the director. The movie is a perfect blend of drama, comedy, romance, music and action. The movie is a masterpiece that will stay in your memory for a long time. - - 1b8d091108 - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Chak De India 1 Hd Movie Download In Hindi.md b/spaces/tioseFevbu/cartoon-converter/scripts/Chak De India 1 Hd Movie Download In Hindi.md deleted file mode 100644 index 4233f08ced87950265c1e97bac2a3deb716f2b95..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Chak De India 1 Hd Movie Download In Hindi.md +++ /dev/null @@ -1,21 +0,0 @@ - -Here is a possible title and article with HTML formatting for the keyword "Chak De India 1 Hd Movie Download In Hindi": - -

                How to Watch Chak De India (2007) Online in HD Quality

                -

                Chak De India (2007) is a Bollywood movie that tells the story of a former hockey player who coaches the Indian women's national hockey team to victory. The movie stars Shahrukh Khan as Kabir Khan, the coach, and features a talented ensemble cast of female actors as the hockey players. The movie was a critical and commercial success, earning praise for its inspiring message, realistic portrayal of sports, and powerful performances. Chak De India (2007) also won several awards, including the National Film Award for Best Popular Film Providing Wholesome Entertainment.

                -

                Chak De India 1 Hd Movie Download In Hindi


                Download ---> https://urlcod.com/2uHvhu



                -

                If you are looking for a way to watch Chak De India (2007) online in HD quality, you have several options. Here are some of them:

                -
                  -
                • Prime Video: You can stream Chak De India (2007) on Prime Video with a subscription or rent it for a fee. Prime Video offers HD quality and subtitles in various languages. You can also download the movie to watch offline on compatible devices. To watch Chak De India (2007) on Prime Video, go to [^1^].
                • -
                • PogoLinks: You can download Chak De India (2007) in HD quality from PogoLinks, a website that provides links to various sources of Bollywood movies. PogoLinks offers multiple resolutions and formats, such as 480p, 720p, 1080p, MKV, and MP4. You can also choose from different audio options, such as Hindi or dual audio. To download Chak De India (2007) from PogoLinks, go to [^2^].
                • -
                • Archive.org: You can watch or download Chak De India (2007) from Archive.org, a website that hosts millions of free books, movies, music, and more. Archive.org offers streaming and downloading options in various formats and qualities, such as OGG, MP4, and WEBM. You can also find the soundtrack of the movie on Archive.org. To watch or download Chak De India (2007) from Archive.org, go to [^3^].
                • -
                • JioSaavn: You can listen to the songs of Chak De India (2007) on JioSaavn, a music streaming service that offers a large collection of Bollywood songs. JioSaavn offers high-quality audio and lyrics for the songs. You can also create playlists and share them with your friends. To listen to the songs of Chak De India (2007) on JioSaavn, go to [^4^].
                • -
                -

                Chak De India (2007) is a movie that will make you cheer, cry, and feel proud of your country. It is a movie that celebrates the spirit of sportsmanship, teamwork, and patriotism. If you have not watched it yet, you should definitely give it a try. And if you have watched it before, you can always watch it again and relive the magic.

                Here are a few more paragraphs for the article: - -

                Chak De India (2007) is directed by Shimit Amin, who is known for his realistic and gritty style of filmmaking. He has also directed movies like Ab Tak Chhappan (2004), Rocket Singh: Salesman of the Year (2009), and Nawabzaade (2018). The script of Chak De India (2007) is written by Jaideep Sahni, who has also written movies like Khosla Ka Ghosla (2006), Bunty Aur Babli (2005), and Shuddh Desi Romance (2013).

                -

                The movie features Shah Rukh Khan as Kabir Khan, the coach of the Indian women's hockey team. Shah Rukh Khan is one of the most popular and influential actors in Bollywood, who has appeared in over 80 movies and won numerous awards. He is also known as the "King of Romance" for his romantic roles in movies like Dilwale Dulhania Le Jayenge (1995), Kuch Kuch Hota Hai (1998), and Veer-Zaara (2004). In Chak De India (2007), he plays a different kind of role, that of a strict and stern coach who motivates his team to overcome their personal and professional challenges.

                -

                The movie also features a talented ensemble cast of female actors who play the hockey players. They include Vidya Malvade as Vidya Sharma, the captain of the team; Sagarika Ghatge as Preeti Sabharwal, the star striker; Shilpa Shukla as Bindia Naik, the senior player; Chitrashi Rawat as Komal Chautala, the feisty forward; Anaitha Nair as Aliya Bose, the goalkeeper; Shubhi Mehta as Gunjan Lakhani, the defender; Seema Azmi as Rani Dispotta, the midfielder; Nisha Nair as Soimoi Kerketa, the defender; Sandia Furtado as Nethra Reddy, the forward; Arya Menon as Gul Iqbal, the defender; Masochon Zimik as Molly Zimik, the forward; Kimi Laldawla as Mary Ralte, the midfielder; Tanya Abrol as Balbir Kaur, the defender; and Nichola Sequeira as Nichola Sequeira, the forward. The actors underwent rigorous training in hockey to prepare for their roles.

                -

                7196e7f11a
                -
                -
                \ No newline at end of file diff --git a/spaces/tobiascz/demotime/pytorch_grad_cam/__init__.py b/spaces/tobiascz/demotime/pytorch_grad_cam/__init__.py deleted file mode 100644 index 65c3e35932ded9a97cd883245ce041487fc4a01f..0000000000000000000000000000000000000000 --- a/spaces/tobiascz/demotime/pytorch_grad_cam/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from pytorch_grad_cam.grad_cam import GradCAM -from pytorch_grad_cam.ablation_layer import AblationLayer, AblationLayerVit, AblationLayerFasterRCNN -from pytorch_grad_cam.ablation_cam import AblationCAM -from pytorch_grad_cam.xgrad_cam import XGradCAM -from pytorch_grad_cam.grad_cam_plusplus import GradCAMPlusPlus -from pytorch_grad_cam.score_cam import ScoreCAM -from pytorch_grad_cam.layer_cam import LayerCAM -from pytorch_grad_cam.eigen_cam import EigenCAM -from pytorch_grad_cam.eigen_grad_cam import EigenGradCAM -from pytorch_grad_cam.fullgrad_cam import FullGrad -from pytorch_grad_cam.guided_backprop import GuidedBackpropReLUModel -from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients -import pytorch_grad_cam.utils.model_targets -import pytorch_grad_cam.utils.reshape_transforms \ No newline at end of file diff --git a/spaces/tomofi/MMOCR/docs/en/training.md b/spaces/tomofi/MMOCR/docs/en/training.md deleted file mode 100644 index 2ea035d567394f40fd76943d191df9c2e7280993..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/training.md +++ /dev/null @@ -1,130 +0,0 @@ -# Training - -## Training on a Single GPU - -You can use `tools/train.py` to train a model on a single machine with a CPU and optionally a GPU. - -Here is the full usage of the script: - -```shell -python tools/train.py ${CONFIG_FILE} [ARGS] -``` - -:::{note} -By default, MMOCR prefers GPU to CPU. If you want to train a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. Note that CPU training requires **MMCV >= 1.4.4**. - -```bash -CUDA_VISIBLE_DEVICES= python tools/train.py ${CONFIG_FILE} [ARGS] -``` - -::: - -| ARGS | Type | Description | -| ----------------- | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `--work-dir` | str | The target folder to save logs and checkpoints. Defaults to `./work_dirs`. | -| `--load-from` | str | Path to the pre-trained model, which will be used to initialize the network parameters. | -| `--resume-from` | str | Resume training from a previously saved checkpoint, which will inherit the training epoch and optimizer parameters. | -| `--no-validate` | bool | Disable checkpoint evaluation during training. Defaults to `False`. | -| `--gpus` | int | **Deprecated, please use --gpu-id.** Numbers of gpus to use. Only applicable to non-distributed training. | -| `--gpu-ids` | int*N | **Deprecated, please use --gpu-id.** A list of GPU ids to use. Only applicable to non-distributed training. | -| `--gpu-id` | int | The GPU id to use. Only applicable to non-distributed training. | -| `--seed` | int | Random seed. | -| `--diff_seed` | bool | Whether or not set different seeds for different ranks. | -| `--deterministic` | bool | Whether to set deterministic options for CUDNN backend. | -| `--cfg-options` | str | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either key="[a,b]" or key=a,b. The argument also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]". Note that the quotation marks are necessary and that no white space is allowed. | -| `--launcher` | 'none', 'pytorch', 'slurm', 'mpi' | Options for job launcher. | -| `--local_rank` | int | Used for distributed training. | -| `--mc-config` | str | Memory cache config for image loading speed-up during training. | - -## Training on Multiple GPUs - -MMOCR implements **distributed** training with `MMDistributedDataParallel`. (Please refer to [datasets.md](datasets.md) to prepare your datasets) - -```shell -[PORT={PORT}] ./tools/dist_train.sh ${CONFIG_FILE} ${WORK_DIR} ${GPU_NUM} [PY_ARGS] -``` - -| Arguments | Type | Description | -| --------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `PORT` | int | The master port that will be used by the machine with rank 0. Defaults to 29500. **Note:** If you are launching multiple distrbuted training jobs on a single machine, you need to specify different ports for each job to avoid port conflicts. | -| `PY_ARGS` | str | Arguments to be parsed by `tools/train.py`. | - -## Training on Multiple Machines - -MMOCR relies on torch.distributed package for distributed training. Thus, as a basic usage, one can launch distributed training via PyTorch’s [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). - -## Training with Slurm - -If you run MMOCR on a cluster managed with [Slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. - -```shell -[GPUS=${GPUS}] [GPUS_PER_NODE=${GPUS_PER_NODE}] [CPUS_PER_TASK=${CPUS_PER_TASK}] [SRUN_ARGS=${SRUN_ARGS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] -``` - -| Arguments | Type | Description | -| --------------- | ---- | ----------------------------------------------------------------------------------------------------------- | -| `GPUS` | int | The number of GPUs to be used by this task. Defaults to 8. | -| `GPUS_PER_NODE` | int | The number of GPUs to be allocated per node. Defaults to 8. | -| `CPUS_PER_TASK` | int | The number of CPUs to be allocated per task. Defaults to 5. | -| `SRUN_ARGS` | str | Arguments to be parsed by srun. Available options can be found [here](https://slurm.schedmd.com/srun.html). | -| `PY_ARGS` | str | Arguments to be parsed by `tools/train.py`. | - -Here is an example of using 8 GPUs to train a text detection model on the dev partition. - -```shell -./tools/slurm_train.sh dev psenet-ic15 configs/textdet/psenet/psenet_r50_fpnf_sbn_1x_icdar2015.py /nfs/xxxx/psenet-ic15 -``` - -### Running Multiple Training Jobs on a Single Machine - -If you are launching multiple training jobs on a single machine with Slurm, you may need to modify the port in configs to avoid communication conflicts. - -For example, in `config1.py`, - -```python -dist_params = dict(backend='nccl', port=29500) -``` - -In `config2.py`, - -```python -dist_params = dict(backend='nccl', port=29501) -``` - -Then you can launch two jobs with `config1.py` ang `config2.py`. - -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} -CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} -``` - -## Commonly Used Training Configs - -Here we list some configs that are frequently used during training for quick reference. - -```python -total_epochs = 1200 -data = dict( - # Note: User can configure general settings of train, val and test dataloader by specifying them here. However, their values can be overridden in dataloader's config. - samples_per_gpu=8, # Batch size per GPU - workers_per_gpu=4, # Number of workers to process data for each GPU - train_dataloader=dict(samples_per_gpu=10, drop_last=True), # Batch size = 10, workers_per_gpu = 4 - val_dataloader=dict(samples_per_gpu=6, workers_per_gpu=1), # Batch size = 6, workers_per_gpu = 1 - test_dataloader=dict(workers_per_gpu=16), # Batch size = 8, workers_per_gpu = 16 - ... -) -# Evaluation -evaluation = dict(interval=1, by_epoch=True) # Evaluate the model every epoch -# Saving and Logging -checkpoint_config = dict(interval=1) # Save a checkpoint every epoch -log_config = dict( - interval=5, # Print out the model's performance every 5 iterations - hooks=[ - dict(type='TextLoggerHook') - ]) -# Optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) # Supports all optimizers in PyTorch and shares the same parameters -optimizer_config = dict(grad_clip=None) # Parameters for the optimizer hook. See https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py for implementation details -# Learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-7, by_epoch=True) -``` diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py deleted file mode 100644 index aabce4af987aa5504e1748e10b9955f760a013e1..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -lr_config = dict(step=[12, 16]) -runner = dict(type='EpochBasedRunner', max_epochs=18) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py deleted file mode 100644 index bbe70145b8bf7c304370f725f5afa8db98666679..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .res_layer import ResLayer - -__all__ = ['ResLayer'] diff --git a/spaces/trysem/dfr/app.py b/spaces/trysem/dfr/app.py deleted file mode 100644 index 713a28f66ac2002ce892d9fd02f454132a687b85..0000000000000000000000000000000000000000 --- a/spaces/trysem/dfr/app.py +++ /dev/null @@ -1,169 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread -import emoji - - -text_gen=gr.Interface.load("spaces/trysem/visua") -def get_prompts(prompt_text): - return text_gen("photo, " + prompt_text) -proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0") - -def restart_script_periodically(): - while True: - time.sleep(600) # 10 minutes - try: - os.execl(sys.executable, sys.executable, *sys.argv) - except: - pass - -restart_thread = Thread(target=restart_script_periodically, daemon=True) -restart_thread.start() - -queue = Queue() -queue_threshold = 800 - -def add_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - # Get the percentage of characters to add as noise - percentage_noise = noise_level * 5 - # Get the number of characters to add as noise - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - # Get the indices of the characters to add noise to - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - # Add noise to the selected characters - prompt_list = list(prompt) - # Add numbers, special characters, and all emojis to the list of characters used to add noise - noise_chars = string.ascii_letters + string.punctuation + ' ' + string.digits + emoji.emojize(":all:") - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output1 = proc1(prompt_with_noise) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output2 = proc1(prompt_with_noise) - return output2 - -def send_it3(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output3 = proc1(prompt_with_noise) - return output3 - -def send_it4(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output4 = proc1(prompt_with_noise) - return output4 - -def send_it5(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output5 = proc1(prompt_with_noise) - return output5 - -def send_it6(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - output6 = proc1(prompt_with_noise) - return output6 - -with gr.Blocks(css="footer {visibility: hidden}") as demo: - gr.HTML( - """ -
                -
                -

                -

                -
                -

                - Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs, - -

                -

                - Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin!❤️ -

                -
                - """ - ) - with gr.Column(): - with gr.Row(): - input_text = gr.Textbox( - label="Short Prompt", - show_label=False, - max_lines=4, - placeholder="Enter a basic idea and click 'Magic Prompt'", - ).style( - container=True, - ) - with gr.Row(): - see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=True) - - - with gr.Row(): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=6, - placeholder="Full Prompt", - ).style( - container=True, - ) - with gr.Row(): - run = gr.Button("Generate Images").style(full_width=True) - - with gr.Row(): - - noise_level = gr.Slider(minimum=0.7, maximum=3, step=0.1, label="Noise Level") - with gr.Row(): - output1=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - with gr.Row(): - output2=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - with gr.Row(): - output3=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - with gr.Row(): - output4=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - with gr.Row(): - output5=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - with gr.Row(): - output6=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False) - - #with gr.Row(): - #output1=gr.Image() - - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2]) - run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3]) - run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4]) - run.click(send_it5, inputs=[prompt, noise_level], outputs=[output5]) - run.click(send_it6, inputs=[prompt, noise_level], outputs=[output6]) - - - - with gr.Row(): - gr.HTML( - """ - -
                -

                -
                - """ -) - - demo.launch(enable_queue=True, inline=True) - block.queue(concurrency_count=100) \ No newline at end of file diff --git a/spaces/tsi-org/zeroscope/share_btn.py b/spaces/tsi-org/zeroscope/share_btn.py deleted file mode 100644 index bc64b36c7335bc6fd3e96c8260e0a0d85a0704ce..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/zeroscope/share_btn.py +++ /dev/null @@ -1,77 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getVideoBlobFile(videoEL){ - const res = await fetch(videoEL.src); - const blob = await res.blob(); - const videoId = Date.now() % 200; - const fileName = `vid-zeroscope-${{videoId}}.mp4`; - const videoBlob = new File([blob], fileName, { type: 'video/mp4' }); - console.log(videoBlob); - return videoBlob; - } - - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const captionTxt = gradioEl.querySelector('#prompt-in textarea').value; - const outputVideo = gradioEl.querySelector('#video-output video'); - - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputVideo){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - - const videoOutFile = await getVideoBlobFile(outputVideo); - const dataOutputVid = await uploadFile(videoOutFile); - - const descriptionMd = ` -#### Prompt: -${captionTxt} - -#### Zeroscope video result: - - -`; - const params = new URLSearchParams({ - title: captionTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/zeroscope/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py deleted file mode 100644 index 35d0127ac66781969b80dfe3e4f887239459ca74..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/datasets/utils.py +++ /dev/null @@ -1,68 +0,0 @@ - -import numpy as np -import torch - - -def blend_image_segmentation(img, seg, mode, image_size=224): - - - if mode in {'blur_highlight', 'blur3_highlight', 'blur3_highlight01', 'blur_highlight_random', 'crop'}: - if isinstance(img, np.ndarray): - img = torch.from_numpy(img) - - if isinstance(seg, np.ndarray): - seg = torch.from_numpy(seg) - - if mode == 'overlay': - out = img * seg - out = [out.astype('float32')] - elif mode == 'highlight': - out = img * seg[None, :, :] * 0.85 + 0.15 * img - out = [out.astype('float32')] - elif mode == 'highlight2': - img = img / 2 - out = (img+0.1) * seg[None, :, :] + 0.3 * img - out = [out.astype('float32')] - elif mode == 'blur_highlight': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=1, bg_fac=0.5).numpy()[0] - 0.01] - elif mode == 'blur3_highlight': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=3, bg_fac=0.5).numpy()[0] - 0.01] - elif mode == 'blur3_highlight01': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=3, bg_fac=0.1).numpy()[0] - 0.01] - elif mode == 'blur_highlight_random': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=0 + torch.randint(0, 3, (1,)).item(), bg_fac=0.1 + 0.8*torch.rand(1).item()).numpy()[0] - 0.01] - elif mode == 'crop': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=1, center_context=0.1, image_size=image_size)[0].numpy()] - elif mode == 'crop_blur_highlight': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=3, center_context=0.1, bg_fac=0.1, image_size=image_size)[0].numpy()] - elif mode == 'crop_blur_highlight352': - from evaluation_utils import img_preprocess - out = [img_preprocess((None, [img], [seg]), blur=3, center_context=0.1, bg_fac=0.1, image_size=352)[0].numpy()] - elif mode == 'shape': - out = [np.stack([seg[:, :]]*3).astype('float32')] - elif mode == 'concat': - out = [np.concatenate([img, seg[None, :, :]]).astype('float32')] - elif mode == 'image_only': - out = [img.astype('float32')] - elif mode == 'image_black': - out = [img.astype('float32')*0] - elif mode is None: - out = [img.astype('float32')] - elif mode == 'separate': - out = [img.astype('float32'), seg.astype('int64')] - elif mode == 'separate_img_black': - out = [img.astype('float32')*0, seg.astype('int64')] - elif mode == 'separate_seg_ones': - out = [img.astype('float32'), np.ones_like(seg).astype('int64')] - elif mode == 'separate_both_black': - out = [img.astype('float32')*0, seg.astype('int64')*0] - else: - raise ValueError(f'invalid mode: {mode}') - - return out \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/modules/progress.py b/spaces/user238921933/stable-diffusion-webui/modules/progress.py deleted file mode 100644 index be6c8480a75305b7631be90f5ba3fc48df3f45a3..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/progress.py +++ /dev/null @@ -1,99 +0,0 @@ -import base64 -import io -import time - -import gradio as gr -from pydantic import BaseModel, Field - -from modules.shared import opts - -import modules.shared as shared - - -current_task = None -pending_tasks = {} -finished_tasks = [] - - -def start_task(id_task): - global current_task - - current_task = id_task - pending_tasks.pop(id_task, None) - - -def finish_task(id_task): - global current_task - - if current_task == id_task: - current_task = None - - finished_tasks.append(id_task) - if len(finished_tasks) > 16: - finished_tasks.pop(0) - - -def add_task_to_queue(id_job): - pending_tasks[id_job] = time.time() - - -class ProgressRequest(BaseModel): - id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for") - id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image") - - -class ProgressResponse(BaseModel): - active: bool = Field(title="Whether the task is being worked on right now") - queued: bool = Field(title="Whether the task is in queue") - completed: bool = Field(title="Whether the task has already finished") - progress: float = Field(default=None, title="Progress", description="The progress with a range of 0 to 1") - eta: float = Field(default=None, title="ETA in secs") - live_preview: str = Field(default=None, title="Live preview image", description="Current live preview; a data: uri") - id_live_preview: int = Field(default=None, title="Live preview image ID", description="Send this together with next request to prevent receiving same image") - textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.") - - -def setup_progress_api(app): - return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse) - - -def progressapi(req: ProgressRequest): - active = req.id_task == current_task - queued = req.id_task in pending_tasks - completed = req.id_task in finished_tasks - - if not active: - return ProgressResponse(active=active, queued=queued, completed=completed, id_live_preview=-1, textinfo="In queue..." if queued else "Waiting...") - - progress = 0 - - job_count, job_no = shared.state.job_count, shared.state.job_no - sampling_steps, sampling_step = shared.state.sampling_steps, shared.state.sampling_step - - if job_count > 0: - progress += job_no / job_count - if sampling_steps > 0 and job_count > 0: - progress += 1 / job_count * sampling_step / sampling_steps - - progress = min(progress, 1) - - elapsed_since_start = time.time() - shared.state.time_start - predicted_duration = elapsed_since_start / progress if progress > 0 else None - eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None - - id_live_preview = req.id_live_preview - shared.state.set_current_image() - if opts.live_previews_enable and shared.state.id_live_preview != req.id_live_preview: - image = shared.state.current_image - if image is not None: - buffered = io.BytesIO() - image.save(buffered, format="png") - live_preview = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("ascii") - id_live_preview = shared.state.id_live_preview - else: - live_preview = None - else: - live_preview = None - - return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) - diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/engine/trainer.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/engine/trainer.md deleted file mode 100644 index e9a06e45b817616d9d1298606bef3143f7682b4f..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/engine/trainer.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Train faster with mixed precision. Learn how to use BaseTrainer with Advanced Mixed Precision to optimize YOLOv3 and YOLOv4 models. -keywords: Ultralytics YOLO, BaseTrainer, object detection models, training guide ---- - -## BaseTrainer ---- -### ::: ultralytics.yolo.engine.trainer.BaseTrainer -

                diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataloaders/stream_loaders.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataloaders/stream_loaders.py deleted file mode 100644 index f497cb1c1b1f2e32a5d7c1570f5fa83deed4c08d..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataloaders/stream_loaders.py +++ /dev/null @@ -1,401 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -import glob -import math -import os -import time -from dataclasses import dataclass -from pathlib import Path -from threading import Thread -from urllib.parse import urlparse - -import cv2 -import numpy as np -import requests -import torch -from PIL import Image - -from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS -from ultralytics.yolo.utils import LOGGER, ROOT, is_colab, is_kaggle, ops -from ultralytics.yolo.utils.checks import check_requirements - - -@dataclass -class SourceTypes: - webcam: bool = False - screenshot: bool = False - from_img: bool = False - tensor: bool = False - - -class LoadStreams: - # YOLOv8 streamloader, i.e. `yolo predict source='rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='file.streams', imgsz=640, vid_stride=1): - """Initialize instance variables and check for consistent input stream shapes.""" - torch.backends.cudnn.benchmark = True # faster for fixed-size inference - self.mode = 'stream' - self.imgsz = imgsz - self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] - n = len(sources) - self.sources = [ops.clean_str(x) for x in sources] # clean source names for later - self.imgs, self.fps, self.frames, self.threads, self.shape = [[]] * n, [0] * n, [0] * n, [None] * n, [None] * n - for i, s in enumerate(sources): # index, source - # Start thread to read frames from video stream - st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video - # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' - s = get_best_youtube_url(s) - s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam - if s == 0 and (is_colab() or is_kaggle()): - raise NotImplementedError("'source=0' webcam not supported in Colab and Kaggle notebooks. " - "Try running 'source=0' in a local environment.") - cap = cv2.VideoCapture(s) - if not cap.isOpened(): - raise ConnectionError(f'{st}Failed to open {s}') - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan - self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback - self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback - - success, im = cap.read() # guarantee first frame - if not success or im is None: - raise ConnectionError(f'{st}Failed to read images from {s}') - self.imgs[i].append(im) - self.shape[i] = im.shape - self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f'{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)') - self.threads[i].start() - LOGGER.info('') # newline - - # Check for common shapes - self.bs = self.__len__() - - def update(self, i, cap, stream): - """Read stream `i` frames in daemon thread.""" - n, f = 0, self.frames[i] # frame number, frame array - while cap.isOpened() and n < f: - # Only read a new frame if the buffer is empty - if not self.imgs[i]: - n += 1 - cap.grab() # .read() = .grab() followed by .retrieve() - if n % self.vid_stride == 0: - success, im = cap.retrieve() - if success: - self.imgs[i].append(im) # add image to buffer - else: - LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') - self.imgs[i].append(np.zeros(self.shape[i])) - cap.open(stream) # re-open stream if signal was lost - else: - time.sleep(0.01) # wait until the buffer is empty - - def __iter__(self): - """Iterates through YOLO image feed and re-opens unresponsive streams.""" - self.count = -1 - return self - - def __next__(self): - """Returns source paths, transformed and original images for processing.""" - self.count += 1 - - # Wait until a frame is available in each buffer - while not all(self.imgs): - if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - time.sleep(1 / min(self.fps)) - - # Get and remove the next frame from imgs buffer - return self.sources, [x.pop(0) for x in self.imgs], None, '' - - def __len__(self): - """Return the length of the sources object.""" - return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years - - -class LoadScreenshots: - # YOLOv8 screenshot dataloader, i.e. `yolo predict source=screen` - def __init__(self, source, imgsz=640): - """source = [screen_number left top width height] (pixels).""" - check_requirements('mss') - import mss # noqa - - source, *params = source.split() - self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 - if len(params) == 1: - self.screen = int(params[0]) - elif len(params) == 4: - left, top, width, height = (int(x) for x in params) - elif len(params) == 5: - self.screen, left, top, width, height = (int(x) for x in params) - self.imgsz = imgsz - self.mode = 'stream' - self.frame = 0 - self.sct = mss.mss() - self.bs = 1 - - # Parse monitor shape - monitor = self.sct.monitors[self.screen] - self.top = monitor['top'] if top is None else (monitor['top'] + top) - self.left = monitor['left'] if left is None else (monitor['left'] + left) - self.width = width or monitor['width'] - self.height = height or monitor['height'] - self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} - - def __iter__(self): - """Returns an iterator of the object.""" - return self - - def __next__(self): - """mss screen capture: get raw pixels from the screen as np array.""" - im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' - - self.frame += 1 - return str(self.screen), im0, None, s # screen, img, original img, im0s, s - - -class LoadImages: - # YOLOv8 image/video dataloader, i.e. `yolo predict source=image.jpg/vid.mp4` - def __init__(self, path, imgsz=640, vid_stride=1): - """Initialize the Dataloader and raise FileNotFoundError if file not found.""" - if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line - path = Path(path).read_text().rsplit() - files = [] - for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: - p = str(Path(p).absolute()) # do not use .resolve() https://github.com/ultralytics/ultralytics/issues/2912 - if '*' in p: - files.extend(sorted(glob.glob(p, recursive=True))) # glob - elif os.path.isdir(p): - files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir - elif os.path.isfile(p): - files.append(p) # files - else: - raise FileNotFoundError(f'{p} does not exist') - - images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] - videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] - ni, nv = len(images), len(videos) - - self.imgsz = imgsz - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' - self.vid_stride = vid_stride # video frame-rate stride - self.bs = 1 - if any(videos): - self.orientation = None # rotation degrees - self._new_video(videos[0]) # new video - else: - self.cap = None - if self.nf == 0: - raise FileNotFoundError(f'No images or videos found in {p}. ' - f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}') - - def __iter__(self): - """Returns an iterator object for VideoStream or ImageFolder.""" - self.count = 0 - return self - - def __next__(self): - """Return next image, path and metadata from dataset.""" - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - for _ in range(self.vid_stride): - self.cap.grab() - success, im0 = self.cap.retrieve() - while not success: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - path = self.files[self.count] - self._new_video(path) - success, im0 = self.cap.read() - - self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' - - else: - # Read image - self.count += 1 - im0 = cv2.imread(path) # BGR - if im0 is None: - raise FileNotFoundError(f'Image Not Found {path}') - s = f'image {self.count}/{self.nf} {path}: ' - - return [path], [im0], self.cap, s - - def _new_video(self, path): - """Create a new video capture object.""" - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) - if hasattr(cv2, 'CAP_PROP_ORIENTATION_META'): # cv2<4.6.0 compatibility - self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees - # Disable auto-orientation due to known issues in https://github.com/ultralytics/yolov5/issues/8493 - # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) - - def _cv2_rotate(self, im): - """Rotate a cv2 video manually.""" - if self.orientation == 0: - return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) - elif self.orientation == 180: - return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) - elif self.orientation == 90: - return cv2.rotate(im, cv2.ROTATE_180) - return im - - def __len__(self): - """Returns the number of files in the object.""" - return self.nf # number of files - - -class LoadPilAndNumpy: - - def __init__(self, im0, imgsz=640): - """Initialize PIL and Numpy Dataloader.""" - if not isinstance(im0, list): - im0 = [im0] - self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] - self.im0 = [self._single_check(im) for im in im0] - self.imgsz = imgsz - self.mode = 'image' - # Generate fake paths - self.bs = len(self.im0) - - @staticmethod - def _single_check(im): - """Validate and format an image to numpy array.""" - assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}' - if isinstance(im, Image.Image): - if im.mode != 'RGB': - im = im.convert('RGB') - im = np.asarray(im)[:, :, ::-1] - im = np.ascontiguousarray(im) # contiguous - return im - - def __len__(self): - """Returns the length of the 'im0' attribute.""" - return len(self.im0) - - def __next__(self): - """Returns batch paths, images, processed images, None, ''.""" - if self.count == 1: # loop only once as it's batch inference - raise StopIteration - self.count += 1 - return self.paths, self.im0, None, '' - - def __iter__(self): - """Enables iteration for class LoadPilAndNumpy.""" - self.count = 0 - return self - - -class LoadTensor: - - def __init__(self, im0) -> None: - self.im0 = self._single_check(im0) - self.bs = self.im0.shape[0] - self.mode = 'image' - self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] - - @staticmethod - def _single_check(im, stride=32): - """Validate and format an image to torch.Tensor.""" - s = f'WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) ' \ - f'divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible.' - if len(im.shape) != 4: - if len(im.shape) == 3: - LOGGER.warning(s) - im = im.unsqueeze(0) - else: - raise ValueError(s) - if im.shape[2] % stride or im.shape[3] % stride: - raise ValueError(s) - if im.max() > 1.0: - LOGGER.warning(f'WARNING ⚠️ torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. ' - f'Dividing input by 255.') - im = im.float() / 255.0 - - return im - - def __iter__(self): - """Returns an iterator object.""" - self.count = 0 - return self - - def __next__(self): - """Return next item in the iterator.""" - if self.count == 1: - raise StopIteration - self.count += 1 - return self.paths, self.im0, None, '' - - def __len__(self): - """Returns the batch size.""" - return self.bs - - -def autocast_list(source): - """ - Merges a list of source of different types into a list of numpy arrays or PIL images - """ - files = [] - for im in source: - if isinstance(im, (str, Path)): # filename or uri - files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im)) - elif isinstance(im, (Image.Image, np.ndarray)): # PIL or np Image - files.append(im) - else: - raise TypeError(f'type {type(im).__name__} is not a supported Ultralytics prediction source type. \n' - f'See https://docs.ultralytics.com/modes/predict for supported source types.') - - return files - - -LOADERS = [LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots] - - -def get_best_youtube_url(url, use_pafy=True): - """ - Retrieves the URL of the best quality MP4 video stream from a given YouTube video. - - This function uses the pafy or yt_dlp library to extract the video info from YouTube. It then finds the highest - quality MP4 format that has video codec but no audio codec, and returns the URL of this video stream. - - Args: - url (str): The URL of the YouTube video. - use_pafy (bool): Use the pafy package, default=True, otherwise use yt_dlp package. - - Returns: - (str): The URL of the best quality MP4 video stream, or None if no suitable stream is found. - """ - if use_pafy: - check_requirements(('pafy', 'youtube_dl==2020.12.2')) - import pafy # noqa - return pafy.new(url).getbest(preftype='mp4').url - else: - check_requirements('yt-dlp') - import yt_dlp - with yt_dlp.YoutubeDL({'quiet': True}) as ydl: - info_dict = ydl.extract_info(url, download=False) # extract info - for f in info_dict.get('formats', None): - if f['vcodec'] != 'none' and f['acodec'] == 'none' and f['ext'] == 'mp4': - return f.get('url', None) - - -if __name__ == '__main__': - img = cv2.imread(str(ROOT / 'assets/bus.jpg')) - dataset = LoadPilAndNumpy(im0=img) - for d in dataset: - print(d[0]) diff --git a/spaces/vasudevgupta/BigGAN/app.py b/spaces/vasudevgupta/BigGAN/app.py deleted file mode 100644 index d889132695acb0520ff9cbca19ceae6b61f2e2ea..0000000000000000000000000000000000000000 --- a/spaces/vasudevgupta/BigGAN/app.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import subprocess - -GIT_TOKEN = os.environ.get("GIT_TOKEN") -GIT_USER = os.environ.get("GIT_USER") - -lib_url = f"git+https://{GIT_USER}:{GIT_TOKEN}@github.com/vasudevgupta7/huggingface-task@main" -subprocess.run(f"pip3 install -q {lib_url}".split()) - -import torch -import numpy as np -import gradio as gr -from transformers import AutoModel, AutoTokenizer -from pytorch_pretrained_biggan import BigGAN -from huggingface_task.models import AutoEncoder -from huggingface_task.run_model import generate_image_from_text - -biggan_id = 'biggan-deep-128' -text_encoder_id = "distilbert-base-uncased" -autoencoder_id = "vasudevgupta/biggan-mapping-model" - -text_tokenizer = AutoTokenizer.from_pretrained(text_encoder_id) -text_model = AutoModel.from_pretrained(text_encoder_id) - -autoencoder = AutoEncoder.from_pretrained(autoencoder_id) -biggan = BigGAN.from_pretrained(biggan_id) - -device = "cuda" if torch.cuda.is_available() else "cpu" -biggan.to(device).eval() -text_model.to(device).eval() -autoencoder.to(device).eval() - -def generate_image(text_query): - array = generate_image_from_text(text_query, text_tokenizer, text_model, autoencoder, biggan, device=device) - array = ((array + 1.0) / 2.0) * 256 - array.clip(0, 255, out=array) - array = np.asarray(np.uint8(array), dtype=np.uint8) - return array - -gr.Interface(fn=generate_image, inputs="text", outputs="image").launch() diff --git a/spaces/verkaDerkaDerk/face-image-to-face-obj/utils.py b/spaces/verkaDerkaDerk/face-image-to-face-obj/utils.py deleted file mode 100644 index 1eb4a4ad1eb59347f5ebc5478c58542986ba77e7..0000000000000000000000000000000000000000 --- a/spaces/verkaDerkaDerk/face-image-to-face-obj/utils.py +++ /dev/null @@ -1,128 +0,0 @@ -# from https://huggingface.co/spaces/shariqfarooq/ZoeDepth/raw/main/utils.py - -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import matplotlib -import matplotlib.cm -import numpy as np -import torch - -def colorize(value, vmin=None, vmax=None, cmap='magma_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None): - """Converts a depth map to a color image. - - Args: - value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed - vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None. - vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None. - cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'. - invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99. - invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None. - background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255). - gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False. - value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None. - - Returns: - numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4) - """ - if isinstance(value, torch.Tensor): - value = value.detach().cpu().numpy() - - value = value.squeeze() - if invalid_mask is None: - invalid_mask = value == invalid_val - mask = np.logical_not(invalid_mask) - - # normalize - vmin = np.percentile(value[mask],2) if vmin is None else vmin - vmax = np.percentile(value[mask],85) if vmax is None else vmax - if vmin != vmax: - value = (value - vmin) / (vmax - vmin) # vmin..vmax - else: - # Avoid 0-division - value = value * 0. - - # squeeze last dim if it exists - # grey out the invalid values - - value[invalid_mask] = np.nan - cmapper = matplotlib.cm.get_cmap(cmap) - if value_transform: - value = value_transform(value) - # value = value / value.max() - value = cmapper(value, bytes=True) # (nxmx4) - - # img = value[:, :, :] - img = value[...] - img[invalid_mask] = background_color - - # return img.transpose((2, 0, 1)) - if gamma_corrected: - # gamma correction - img = img / 255 - img = np.power(img, 2.2) - img = img * 255 - img = img.astype(np.uint8) - return img - - -import os - -# bard... -def find_most_recently_created_directory(temp_dir): - """Finds the most recently created directory in a directory. - - Args: - temp_dir: The directory to search. - - Returns: - The path to the most recently created directory. - """ - - directories = os.listdir(temp_dir) - most_recently_created_directory = None - for directory in directories: - path = os.path.join(temp_dir, directory) - st = os.stat(path) - if most_recently_created_directory is None or st.mtime > most_recently_created_directory.mtime: - most_recently_created_directory = path - - if most_recently_created_directory is None: - most_recently_created_directory = temp_dir - - return most_recently_created_directory - - -#chatgpt -def get_most_recent_subdirectory(path): - if not os.path.isdir(path): - return path - - subdirectories = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] - if not subdirectories: - return path - - most_recent_subdirectory = max(subdirectories, key=lambda d: os.path.getctime(os.path.join(path, d))) - return os.path.join(path, most_recent_subdirectory) - diff --git a/spaces/video-p2p-library/Video-P2P-Demo/uploader.py b/spaces/video-p2p-library/Video-P2P-Demo/uploader.py deleted file mode 100644 index d9e06ec02127db34016d3d7b550e88f820a737fe..0000000000000000000000000000000000000000 --- a/spaces/video-p2p-library/Video-P2P-Demo/uploader.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -from huggingface_hub import HfApi - - -class Uploader: - def __init__(self, hf_token: str | None): - self.hf_token = hf_token - - def upload(self, - folder_path: str, - repo_name: str, - organization: str = '', - repo_type: str = 'model', - private: bool = True, - delete_existing_repo: bool = False, - input_token: str | None = None) -> str: - - api = HfApi(token=self.hf_token if self.hf_token else input_token) - - if not folder_path: - raise ValueError - if not repo_name: - raise ValueError - if not organization: - organization = api.whoami()['name'] - - repo_id = f'{organization}/{repo_name}' - if delete_existing_repo: - try: - api.delete_repo(repo_id, repo_type=repo_type) - except Exception: - pass - try: - api.create_repo(repo_id, repo_type=repo_type, private=private) - api.upload_folder(repo_id=repo_id, - folder_path=folder_path, - path_in_repo='.', - repo_type=repo_type) - url = f'https://huggingface.co/{repo_id}' - message = f'Your model was successfully uploaded to {url}.' - except Exception as e: - message = str(e) - return message diff --git a/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/kishotenketsu.md b/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/kishotenketsu.md deleted file mode 100644 index 8754d84a0cca70078c91448554cc577bb4857223..0000000000000000000000000000000000000000 --- a/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/kishotenketsu.md +++ /dev/null @@ -1,39 +0,0 @@ -{ -"storyboard": { - "title": "Mystery at the JR Station", - "step_by_step_thinking_for_designing_your_storyboard": "I start off by introducing the busy scene at a JR train station with our salaryman protagonist appearing mildly disinterested, amplified by the Sazae-san's simplistic art style. The plot then develops as he stumbles upon a mysterious object, thus drawing interest and curiosity. The twist comes when the object, initially appearing as an expensive gem, is revealed to be a child's toy – totally unexpected and humorously anticlimactic. The narrative concludes by tying back the initial mundane scene with the realization it was a mere toy all along. The 80s joke manga style is maintained through friendly humor, simple story line and dramatic yet light-hearted reveal.", - - "step_by_step_thinking_for_effectively_applying_ki_sho_ten_ketsu": "The 'ki' represents the JR train environment, setting the backdrop of a mundane workday for our salaryman. The 'sho' introduces a new element, the mysterious object, into the monotonous scene, building anticipation. The 'ten' dramatically shifts the assumed trajectory of the plot by revealing the supposed gem to be a child's bauble, proving to be the comic punch of this 80s joke manga. The 'ketsu' reconciles the ordinary life of the salaryman with the momentary excitement, ensuring a satisfactory chuckle from the audience as the complexity resolves into well-paced humor.", - - "panels": [ - { - "id": 1, - "type": "ki", - "image_generation_prompt": "Salaryman, busy JR station, walking, in Machiko Hasegawa's Sazae-san style, 80s manga drawing", - "description": "The first panel introduces a typical Tokyo JR train station. The scene is bustling with people and the atmosphere is chaotic. Amid all the hustle, a middle-aged salaryman, our nondescript protagonist, walks stoically, seemingly detached from his environment. In his hand, he tightly grips a suitcase, symbolizing his repetitive, monotonous work life.", - "dialogue": {"0": "Salaryman monologue: 'Another mundane day...'"} - }, - { - "id": 2, - "type": "sho", - "image_generation_prompt": "Salaryman, mysterious bright object on station floor, looking intrigued, in Machiko Hasegawa's Sazae-san style, 80s manga drawing", - "description": "Suddenly, something on the station floor catches the salaryman's eye. A small, bright object that appears to be a precious gem, glistening under the station's pale artificial light, instantly breaks his monotony. An intrigued expression passes over his otherwise weary face, suggesting that the story is about to take an interesting turn.", - "dialogue": {"0": "Salaryman exclaiming: 'What's this?' "} - }, - { - "id": 3, - "type": "ten", - "image_generation_prompt": "Salaryman, holding toy gem, surprised, in Machiko Hasegawa's Sazae-san style, 80s manga drawing", - "description": "In the third panel, the salaryman bends down to pick up the object, a surprised expression on his face. As he holds it in his hand, the massive 'gemstone' is revealed to be nothing but a toy bauble! The anticlimactic reveal brings a lighthearted twist to the story, providing a well-timed spark of humor.", - "dialogue": {"0": "Salaryman: 'It's... it's a toy gem!' "} - }, - { - "id": 4, - "type": "ketsu", - "image_generation_prompt": "Salaryman, putting toy gem in pocket, chuckling, in Machiko Hasegawa's Sazae-san style, 80s manga drawing", - "description": "The final panel shows the salaryman, chuckling to himself, deciding to pocket the toy gem. The twist brought an unexpected spark to his routine day, leaving him mildly amused. Thus, the brief adventure ends up adding a dash of lighthearted fun to his everyday commute.", - "dialogue": {"0": "Salaryman laughing: 'What an adventure, haha!'"} - } - ] -} -} \ No newline at end of file diff --git a/spaces/vonbarnekowa/stable-diffusion/scripts/tests/test_watermark.py b/spaces/vonbarnekowa/stable-diffusion/scripts/tests/test_watermark.py deleted file mode 100644 index f93f8a6e70763c0e284157bc8225827520b2f5ef..0000000000000000000000000000000000000000 --- a/spaces/vonbarnekowa/stable-diffusion/scripts/tests/test_watermark.py +++ /dev/null @@ -1,18 +0,0 @@ -import cv2 -import fire -from imwatermark import WatermarkDecoder - - -def testit(img_path): - bgr = cv2.imread(img_path) - decoder = WatermarkDecoder('bytes', 136) - watermark = decoder.decode(bgr, 'dwtDct') - try: - dec = watermark.decode('utf-8') - except: - dec = "null" - print(dec) - - -if __name__ == "__main__": - fire.Fire(testit) \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py deleted file mode 100644 index 93258242a90695cc94a7c6bd41562d6a75988771..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py +++ /dev/null @@ -1,25 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='MobileNetV3', - arch='large', - out_indices=(1, 3, 16), - norm_cfg=norm_cfg), - decode_head=dict( - type='LRASPPHead', - in_channels=(16, 24, 960), - in_index=(0, 1, 2), - channels=128, - input_transform='multiple_select', - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU'), - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/w1zrd/MusicGen/tests/data/test_audio_dataset.py b/spaces/w1zrd/MusicGen/tests/data/test_audio_dataset.py deleted file mode 100644 index b69c9c397830738b73d6c229009f84b867cda801..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/tests/data/test_audio_dataset.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from functools import partial -from itertools import product -import json -import math -import os -import random -import typing as tp - -import pytest -import torch -from torch.utils.data import DataLoader - -from audiocraft.data.audio_dataset import ( - AudioDataset, - AudioMeta, - _get_audio_meta, - load_audio_meta, - save_audio_meta -) -from audiocraft.data.zip import PathInZip - -from ..common_utils import TempDirMixin, get_white_noise, save_wav - - -class TestAudioMeta(TempDirMixin): - - def test_get_audio_meta(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(duration * sample_rate) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path('sample.wav') - save_wav(path, wav, sample_rate) - m = _get_audio_meta(path, minimal=True) - assert m.path == path, 'path does not match' - assert m.sample_rate == sample_rate, 'sample rate does not match' - assert m.duration == duration, 'duration does not match' - assert m.amplitude is None - assert m.info_path is None - - def test_save_audio_meta(self): - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_audio_meta = [] - for idx, meta in enumerate([audio_meta, empty_audio_meta]): - path = self.get_temp_path(f'data_{idx}_save.jsonl') - save_audio_meta(path, meta) - with open(path, 'r') as f: - lines = f.readlines() - read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines] - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - assert m == read_m - - def test_load_audio_meta(self): - try: - import dora - except ImportError: - dora = None # type: ignore - - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_meta = [] - for idx, meta in enumerate([audio_meta, empty_meta]): - path = self.get_temp_path(f'data_{idx}_load.jsonl') - with open(path, 'w') as f: - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - f.write(json_str) - read_meta = load_audio_meta(path) - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - if dora: - m.path = dora.git_save.to_absolute_path(m.path) - assert m == read_m, f'original={m}, read={read_m}' - - -class TestAudioDataset(TempDirMixin): - - def _create_audio_files(self, - root_name: str, - num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1): - root_dir = self.get_temp_dir(root_name) - for i in range(num_examples): - if isinstance(durations, float): - duration = durations - elif isinstance(durations, tuple) and len(durations) == 1: - duration = durations[0] - elif isinstance(durations, tuple) and len(durations) == 2: - duration = random.uniform(durations[0], durations[1]) - else: - assert False - n_frames = int(duration * sample_rate) - wav = get_white_noise(channels, n_frames) - path = os.path.join(root_dir, f'example_{i}.wav') - save_wav(path, wav, sample_rate) - return root_dir - - def _create_audio_dataset(self, - root_name: str, - total_num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1, - segment_duration: tp.Optional[float] = None, - num_examples: int = 10, - shuffle: bool = True, - return_info: bool = False): - root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels) - dataset = AudioDataset.from_path(root_dir, - minimal_meta=True, - segment_duration=segment_duration, - num_samples=num_examples, - sample_rate=sample_rate, - channels=channels, - shuffle=shuffle, - return_info=return_info) - return dataset - - def test_dataset_full(self): - total_examples = 10 - min_duration, max_duration = 1., 4. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), - sample_rate=sample_rate, channels=channels, segment_duration=None) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] <= int(max_duration * sample_rate) - assert sample.shape[1] >= int(min_duration * sample_rate) - - def test_dataset_segment(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - - def test_dataset_equal_audio_and_segment_durations(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - # the random seek_time adds variability on audio read - sample_1 = dataset[0] - sample_2 = dataset[1] - assert not torch.allclose(sample_1, sample_2) - - def test_dataset_samples(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - - create_dataset = partial( - self._create_audio_dataset, - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, - ) - - dataset = create_dataset(shuffle=True) - # when shuffle = True, we have different inputs for the same index across epoch - sample_1 = dataset[0] - sample_2 = dataset[0] - assert not torch.allclose(sample_1, sample_2) - - dataset_noshuffle = create_dataset(shuffle=False) - # when shuffle = False, we have same inputs for the same index across epoch - sample_1 = dataset_noshuffle[0] - sample_2 = dataset_noshuffle[0] - assert torch.allclose(sample_1, sample_2) - - def test_dataset_return_info(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - assert segment_info.sample_rate == sample_rate - assert segment_info.total_frames == int(segment_duration * sample_rate) - assert segment_info.n_frames <= int(segment_duration * sample_rate) - assert segment_info.seek_time >= 0 - - def test_dataset_return_info_no_segment_duration(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = None - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == segment_info.total_frames - assert segment_info.sample_rate == sample_rate - assert segment_info.n_frames <= segment_info.total_frames - - def test_dataset_collate_fn(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - assert batch.shape[0] == batch_size - - @pytest.mark.parametrize("segment_duration", [1.0, None]) - def test_dataset_with_meta_collate_fn(self, segment_duration): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collater, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - wav, infos = batch - assert wav.shape[0] == batch_size - assert len(infos) == batch_size - - @pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [ - [1, True, True, 0.5, 0.5, 0.0], - [1, False, True, 0.25, 0.5, 0.25], - [1, True, False, 0.666, 0.333, 0.0], - [1, False, False, 0.333, 0.333, 0.333], - [None, False, False, 0.333, 0.333, 0.333]]) - def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist): - random.seed(1234) - rng = torch.Generator() - rng.manual_seed(1234) - - def _get_histogram(dataset, repetitions=20_000): - counts = {file_meta.path: 0. for file_meta in meta} - for _ in range(repetitions): - file_meta = dataset.sample_file(rng) - counts[file_meta.path] += 1 - return {name: count / repetitions for name, count in counts.items()} - - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset( - meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight, - sample_on_duration=sample_on_duration) - hist = _get_histogram(dataset) - assert math.isclose(hist['a'], a_hist, abs_tol=0.01) - assert math.isclose(hist['b'], b_hist, abs_tol=0.01) - assert math.isclose(hist['c'], c_hist, abs_tol=0.01) - - def test_meta_duration_filter_all(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - try: - AudioDataset(meta, segment_duration=11, min_segment_ratio=1) - assert False - except AssertionError: - assert True - - def test_meta_duration_filter_long(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7) - assert len(dataset) == 2 diff --git a/spaces/wiraindrak/summary-of-summarizer/README.md b/spaces/wiraindrak/summary-of-summarizer/README.md deleted file mode 100644 index d677db6f2297a7e404e6913e764f212ca4210a1a..0000000000000000000000000000000000000000 --- a/spaces/wiraindrak/summary-of-summarizer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Summary Of Summarizer -emoji: 😻 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xfys/yolov5_tracking/val_utils/docs/How_To/Add_a_new_metric.md b/spaces/xfys/yolov5_tracking/val_utils/docs/How_To/Add_a_new_metric.md deleted file mode 100644 index 7a3d12c59657f3197808f65e7714cbb472786a5c..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/docs/How_To/Add_a_new_metric.md +++ /dev/null @@ -1,11 +0,0 @@ -# How to add a new or custom family of evaluation metrics to TrackEval - - - Create your metrics code in ```trackeval/metrics/.py```. - - It's probably easiest to start by copying an existing metrics code and editing it, e.g. ```trackeval/metrics/identity.py``` is probably the simplest. - - Your metric should be class, and it should inherit from the ```trackeval.metrics._base_metric._BaseMetric``` class. - - Define an ```__init__``` function that defines the different ```fields``` (values) that your metric will calculate. See ```trackeval/metrics/_base_metric.py``` for a list of currently used field types. Feel free to add new types. - - Define your code to actually calculate your metric for a single sequence and single class in a function called ```eval_sequence```, which takes a data dictionary as input, and returns a results dictionary as output. - - Define functions for how to combine your metric field values over a) sequences ```combine_sequences```, b) over classes ```combine_classes_class_averaged```, and c) over classes weighted by the number of detections ```combine_classes_det_averaged```. - - We find using a function such as the ```_compute_final_fields``` function that we use in the current metrics is convienient because it is likely used for metrics calculation and for the different metric combination, however this is not required. - - Register your new metric by adding it to ```trackeval/metrics/init.py``` - - Your new metric can be used by passing the metrics class to a list of metrics which is passed to the evaluator (see files in ```scripts/*```). diff --git a/spaces/xiaolongbaox/gpt2.0/run_Linux.sh b/spaces/xiaolongbaox/gpt2.0/run_Linux.sh deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/xiaolongbaox/gpt2.0/run_Linux.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/yangfeixue/newbing/Dockerfile b/spaces/yangfeixue/newbing/Dockerfile deleted file mode 100644 index b309549e4603af7b57a36d9cf8c47bb7cef31af3..0000000000000000000000000000000000000000 --- a/spaces/yangfeixue/newbing/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="18FltUu3q1pp62iMrwyyabrAZMAk7CGIlZFrdju4hFQmHGsAHXCxxLBbMNg7eqG9anAwaL9iPbbmmpOm5EGqvFgmU6nuqkQuW0-LV-5CPmWbGzBu0w4OFsYYDbF7sQqpA6puu1oslsWL4rHg7-m2hTg2GwJxOo2JT1spfe9N1rHjiVFH4UXa6CK1Aqgjw6AnsNogFJKyt2RpMnsYCKssK-g" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/yderre-aubay/midi-player-demo/src/components/Slider.tsx b/spaces/yderre-aubay/midi-player-demo/src/components/Slider.tsx deleted file mode 100644 index 84e685c9cacf42ea6eaba10dbf1afa07bb002051..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/components/Slider.tsx +++ /dev/null @@ -1,96 +0,0 @@ -import styled from "@emotion/styled" -import { - Range, - Root, - SliderProps as Props, - Thumb, - Track, -} from "@radix-ui/react-slider" -import { FC } from "react" - -export type SliderProps = Omit< - Props, - "value" | "onValueChange" | "onChange" | "defaultValue" -> & { - value: number - defaultValue?: number - onChange: (value: number) => void - marks?: number[] -} - -const StyledRoot = styled(Root)` - position: relative; - display: flex; - align-items: center; - user-select: none; - touch-action: none; - width: 10rem; - height: 2rem; -` - -const StyledTrack = styled(Track)` - background-color: ${({ theme }) => theme.tertiaryTextColor}; - position: relative; - flex-grow: 1; - border-radius: 9999px; - height: 0.1rem; -` - -const StyledRange = styled(Range)` - position: absolute; - background-color: ${({ theme }) => theme.textColor}; - border-radius: 9999px; - height: 100%; -` - -const StyledThumb = styled(Thumb)` - display: block; - width: 0.75rem; - height: 0.75rem; - background-color: ${({ theme }) => theme.textColor}; - box-shadow: 0 0.1rem 1rem ${({ theme }) => theme.shadowColor}; - border-radius: 999px; - - &:hover { - background-color: ${({ theme }) => theme.secondaryTextColor}; - } - - &:focus { - outline: none; - } -` - -const Mark = styled.div` - width: 0.1rem; - height: 100%; - position: absolute; - background-color: ${({ theme }) => theme.textColor}; -` - -export const Slider: FC = ({ - value, - onChange, - defaultValue, - marks, - ...props -}) => ( - onChange(value[0])} - {...props} - > - - - {marks?.map((value, index) => ( - - ))} - - - -) diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRollToolbar/EventListButton.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRollToolbar/EventListButton.tsx deleted file mode 100644 index a15e94122ce7eaa60c7e30b80eb15d88912b6ec6..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRollToolbar/EventListButton.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import FormatListBulleted from "mdi-react/FormatListBulletedIcon" -import { FC, useCallback } from "react" -import { Localized } from "../../../components/Localized" -import { Tooltip } from "../../../components/Tooltip" -import { useStores } from "../../hooks/useStores" -import { ToolbarButton } from "../Toolbar/ToolbarButton" - -export const EventListButton: FC = () => { - const { pianoRollStore } = useStores() - - return ( - event-list}> - { - pianoRollStore.showEventList = !pianoRollStore.showEventList - }, [])} - > - - - - ) -} diff --git a/spaces/yigithan4568/bingo/README.md b/spaces/yigithan4568/bingo/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/yigithan4568/bingo/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
                - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
                - - diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bridgetower/processing_bridgetower.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bridgetower/processing_bridgetower.py deleted file mode 100644 index c268d7c26f43d988a3359ec6f4d62ce8dcff1bd0..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bridgetower/processing_bridgetower.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Processor class for BridgeTower. -""" - -from typing import List, Optional, Union - -from ...processing_utils import ProcessorMixin -from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy -from ...utils import TensorType - - -class BridgeTowerProcessor(ProcessorMixin): - r""" - Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single - processor. - - [`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and - [`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and - [`~BridgeTowerProcessor.decode`] for more information. - - Args: - image_processor (`BridgeTowerImageProcessor`): - An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input. - tokenizer (`RobertaTokenizerFast`): - An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input. - """ - attributes = ["image_processor", "tokenizer"] - image_processor_class = "BridgeTowerImageProcessor" - tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") - - def __init__(self, image_processor, tokenizer): - super().__init__(image_processor, tokenizer) - - def __call__( - self, - images, - text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, - add_special_tokens: bool = True, - padding: Union[bool, str, PaddingStrategy] = False, - truncation: Union[bool, str, TruncationStrategy] = None, - max_length: Optional[int] = None, - stride: int = 0, - pad_to_multiple_of: Optional[int] = None, - return_token_type_ids: Optional[bool] = None, - return_attention_mask: Optional[bool] = None, - return_overflowing_tokens: bool = False, - return_special_tokens_mask: bool = False, - return_offsets_mapping: bool = False, - return_length: bool = False, - verbose: bool = True, - return_tensors: Optional[Union[str, TensorType]] = None, - **kwargs, - ) -> BatchEncoding: - """ - This method uses [`BridgeTowerImageProcessor.__call__`] method to prepare image(s) for the model, and - [`RobertaTokenizerFast.__call__`] to prepare text for the model. - - Please refer to the docstring of the above two methods for more information. - """ - encoding = self.tokenizer( - text=text, - add_special_tokens=add_special_tokens, - padding=padding, - truncation=truncation, - max_length=max_length, - stride=stride, - pad_to_multiple_of=pad_to_multiple_of, - return_token_type_ids=return_token_type_ids, - return_attention_mask=return_attention_mask, - return_overflowing_tokens=return_overflowing_tokens, - return_special_tokens_mask=return_special_tokens_mask, - return_offsets_mapping=return_offsets_mapping, - return_length=return_length, - verbose=verbose, - return_tensors=return_tensors, - **kwargs, - ) - # add pixel_values + pixel_mask - encoding_image_processor = self.image_processor( - images, return_tensors=return_tensors, do_normalize=True, do_center_crop=True, **kwargs - ) - encoding.update(encoding_image_processor) - - return encoding - - def batch_decode(self, *args, **kwargs): - """ - This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please - refer to the docstring of this method for more information. - """ - return self.tokenizer.batch_decode(*args, **kwargs) - - def decode(self, *args, **kwargs): - """ - This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer - to the docstring of this method for more information. - """ - return self.tokenizer.decode(*args, **kwargs) - - @property - def model_input_names(self): - tokenizer_input_names = self.tokenizer.model_input_names - image_processor_input_names = self.image_processor.model_input_names - return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/convert_dinov2_to_hf.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/convert_dinov2_to_hf.py deleted file mode 100644 index 352454c9f3406237d4e6c398c798e05a0e2ab904..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/convert_dinov2_to_hf.py +++ /dev/null @@ -1,244 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Convert DINOv2 checkpoints from the original repository. - -URL: https://github.com/facebookresearch/dinov2/tree/main -""" - - -import argparse -from pathlib import Path - -import requests -import torch -from PIL import Image -from torchvision import transforms - -from transformers import BitImageProcessor, Dinov2Config, Dinov2Model -from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling -from transformers.utils import logging - - -logging.set_verbosity_info() -logger = logging.get_logger(__name__) - - -def get_dinov2_config(model_name): - config = Dinov2Config(image_size=518, patch_size=14) - - # size of the architecture - if "vits" in model_name: - config.hidden_size = 384 - config.num_attention_heads = 6 - elif "vitb" in model_name: - pass - elif "vitl" in model_name: - config.hidden_size = 1024 - config.num_hidden_layers = 24 - config.num_attention_heads = 16 - elif "vitg" in model_name: - config.use_swiglu_ffn = True - config.hidden_size = 1536 - config.num_hidden_layers = 40 - config.num_attention_heads = 24 - else: - raise ValueError("Model not supported") - - return config - - -def create_rename_keys(config): - rename_keys = [] - # fmt: off - - # patch embedding layer - rename_keys.append(("cls_token", "embeddings.cls_token")) - rename_keys.append(("mask_token", "embeddings.mask_token")) - rename_keys.append(("pos_embed", "embeddings.position_embeddings")) - rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight")) - rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias")) - - for i in range(config.num_hidden_layers): - # layernorms - rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight")) - rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias")) - rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight")) - rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias")) - # MLP - if config.use_swiglu_ffn: - rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight")) - rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias")) - rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight")) - rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias")) - else: - rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight")) - rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias")) - rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight")) - rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias")) - # layerscale - rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1")) - rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1")) - # attention projection layer - rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight")) - rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias")) - - # final layernorm - rename_keys.append(("norm.weight", "layernorm.weight")) - rename_keys.append(("norm.bias", "layernorm.bias")) - - # fmt: on - return rename_keys - - -def rename_key(dct, old, new): - val = dct.pop(old) - dct[new] = val - - -# we split up the matrix of each encoder layer into queries, keys and values -def read_in_q_k_v(state_dict, config): - for i in range(config.num_hidden_layers): - # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) - in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") - in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") - # next, add query, keys and values (in that order) to the state dict - state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] - state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] - state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ - config.hidden_size : config.hidden_size * 2, : - ] - state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ - config.hidden_size : config.hidden_size * 2 - ] - state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] - state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] - - -# We will verify our results on an image of cute cats -def prepare_img(): - url = "http://images.cocodataset.org/val2017/000000039769.jpg" - image = Image.open(requests.get(url, stream=True).raw) - return image - - -@torch.no_grad() -def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): - """ - Copy/paste/tweak model's weights to our DINOv2 structure. - """ - - # define default Dinov2 configuration - config = get_dinov2_config(model_name) - - # load original model from torch hub - original_model = torch.hub.load("facebookresearch/dinov2", model_name) - original_model.eval() - - # load state_dict of original model, remove and rename some keys - state_dict = original_model.state_dict() - rename_keys = create_rename_keys(config) - for src, dest in rename_keys: - rename_key(state_dict, src, dest) - read_in_q_k_v(state_dict, config) - - for key, val in state_dict.copy().items(): - val = state_dict.pop(key) - if "w12" in key: - key = key.replace("w12", "weights_in") - if "w3" in key: - key = key.replace("w3", "weights_out") - state_dict[key] = val - - # load HuggingFace model - model = Dinov2Model(config, add_pooling_layer=False).eval() - model.load_state_dict(state_dict) - - # load image - url = "http://images.cocodataset.org/val2017/000000039769.jpg" - image = Image.open(requests.get(url, stream=True).raw).convert("RGB") - - # preprocess image - transformations = transforms.Compose( - [ - transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize( - mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values - std=IMAGENET_DEFAULT_STD, # across a large photo dataset. - ), - ] - ) - - original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension - - processor = BitImageProcessor( - size={"shortest_edge": 256}, - resample=PILImageResampling.BICUBIC, - image_mean=IMAGENET_DEFAULT_MEAN, - image_std=IMAGENET_DEFAULT_STD, - ) - pixel_values = processor(image, return_tensors="pt").pixel_values - - assert torch.allclose(original_pixel_values, pixel_values) - - with torch.no_grad(): - outputs = model(pixel_values) - original_outputs = original_model(pixel_values) - - # assert values - assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape - assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3) - print("Looks ok!") - - if pytorch_dump_folder_path is not None: - Path(pytorch_dump_folder_path).mkdir(exist_ok=True) - print(f"Saving model {model_name} to {pytorch_dump_folder_path}") - model.save_pretrained(pytorch_dump_folder_path) - print(f"Saving image processor to {pytorch_dump_folder_path}") - processor.save_pretrained(pytorch_dump_folder_path) - - if push_to_hub: - model_name_to_hf_name = { - "dinov2_vits14": "dinov2-small", - "dinov2_vitb14": "dinov2-base", - "dinov2_vitl14": "dinov2-large", - "dinov2_vitg14": "dinov2-giant", - } - - name = model_name_to_hf_name[model_name] - model.push_to_hub(f"facebook/{name}") - processor.push_to_hub(f"facebook/{name}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--model_name", - default="dinov2_vitb14", - type=str, - choices=["dinov2_vits14", "dinov2_vitb14", "dinov2_vitl14", "dinov2_vitg14"], - help="Name of the model you'd like to convert.", - ) - parser.add_argument( - "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." - ) - parser.add_argument( - "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." - ) - - args = parser.parse_args() - convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/whisper/audio.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/whisper/audio.py deleted file mode 100644 index 3bdb70ba9357e95ff05853dcc06437c3401ef3be..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/whisper/audio.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from functools import lru_cache -from typing import Union - -import ffmpeg -import numpy as np -import torch -import torch.nn.functional as F - -from .utils import exact_div - -from librosa.filters import mel as librosa_mel_fn - -# hard-coded audio hyperparameters -SAMPLE_RATE = 16000 -N_FFT = 400 -N_MELS = 80 -HOP_LENGTH = 160 -CHUNK_LENGTH = 30 -N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk -N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input - - -def load_audio(file: str, sr: int = SAMPLE_RATE): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr) - .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 - - -def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): - """ - Pad or trim the audio array to N_SAMPLES, as expected by the encoder. - """ - if torch.is_tensor(array): - if array.shape[axis] > length: - array = array.index_select(dim=axis, index=torch.arange(length, device=array.device)) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) - else: - if array.shape[axis] > length: - array = array.take(indices=range(length), axis=axis) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = np.pad(array, pad_widths) - - return array - - -@lru_cache(maxsize=None) -def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: - """ - load the mel filterbank matrix for projecting STFT into a Mel spectrogram. - Allows decoupling librosa dependency; saved using: - - np.savez_compressed( - "mel_filters.npz", - mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), - ) - """ - assert n_mels == 80, f"Unsupported n_mels: {n_mels}" - return torch.from_numpy(librosa_mel_fn(sr=SAMPLE_RATE,n_fft=N_FFT,n_mels=n_mels)).to(device) - - -def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS): - """ - Compute the log-Mel spectrogram of - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor], shape = (*) - The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz - - n_mels: int - The number of Mel-frequency filters, only 80 is supported - - Returns - ------- - torch.Tensor, shape = (80, n_frames) - A Tensor that contains the Mel spectrogram - """ - if not torch.is_tensor(audio): - if isinstance(audio, str): - audio = load_audio(audio) - audio = torch.from_numpy(audio) - - window = torch.hann_window(N_FFT).to(audio.device) - stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) - magnitudes = stft[..., :-1].abs() ** 2 - - filters = mel_filters(audio.device, n_mels) - mel_spec = filters @ magnitudes - - log_spec = torch.clamp(mel_spec, min=1e-10).log10() - log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) - log_spec = (log_spec + 4.0) / 4.0 - return log_spec diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_export_caffe2.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_export_caffe2.py deleted file mode 100644 index 9a5e155fda907003f60e6bb3d40fd58599c50d59..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_export_caffe2.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -*- coding: utf-8 -*- - -import copy -import os -import tempfile -import unittest -import torch - -from detectron2 import model_zoo -from detectron2.export import Caffe2Model, Caffe2Tracer -from detectron2.utils.logger import setup_logger -from detectron2.utils.testing import get_sample_coco_image - - -# TODO: this test requires manifold access, see: T88318502 -# Running it on CircleCI causes crash, not sure why. -@unittest.skipIf(os.environ.get("CIRCLECI"), "Caffe2 tests crash on CircleCI.") -class TestCaffe2Export(unittest.TestCase): - def setUp(self): - setup_logger() - - def _test_model(self, config_path, device="cpu"): - cfg = model_zoo.get_config(config_path) - cfg.MODEL.DEVICE = device - model = model_zoo.get(config_path, trained=True, device=device) - - inputs = [{"image": get_sample_coco_image()}] - tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs)) - - with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d: - if not os.environ.get("CI"): - # This requires onnx, which is not yet available on public CI - c2_model = tracer.export_caffe2() - c2_model.save_protobuf(d) - c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs)) - - c2_model = Caffe2Model.load_protobuf(d) - c2_model(inputs)[0]["instances"] - - ts_model = tracer.export_torchscript() - ts_model.save(os.path.join(d, "model.ts")) - - def testMaskRCNN(self): - self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def testMaskRCNNGPU(self): - self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda") - - def testRetinaNet(self): - self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml") diff --git a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h deleted file mode 100644 index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h +++ /dev/null @@ -1,316 +0,0 @@ -// jpgd.h - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -#ifndef JPEG_DECODER_H -#define JPEG_DECODER_H - -#include -#include -#include - -namespace jpgd -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef signed int int32; - - // Loads a JPEG image from a memory buffer or a file. - // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA). - // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB). - // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly. - // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp. -// BEGIN EPIC MOD -//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps); - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format); -// END EPIC MOD - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps); - - // Success/failure error codes. - enum jpgd_status - { - JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1, - JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE, - JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS, - JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH, - JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER, - JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS, - JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE, - JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR, - JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM - }; - - // Input stream interface. - // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available. - // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set. - // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer. - // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding. - class jpeg_decoder_stream - { - public: - jpeg_decoder_stream() { } - virtual ~jpeg_decoder_stream() { } - - // The read() method is called when the internal input buffer is empty. - // Parameters: - // pBuf - input buffer - // max_bytes_to_read - maximum bytes that can be written to pBuf - // pEOF_flag - set this to true if at end of stream (no more bytes remaining) - // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0). - // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full. - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0; - }; - - // stdio FILE stream class. - class jpeg_decoder_file_stream : public jpeg_decoder_stream - { - jpeg_decoder_file_stream(const jpeg_decoder_file_stream &); - jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &); - - FILE *m_pFile; - bool m_eof_flag, m_error_flag; - - public: - jpeg_decoder_file_stream(); - virtual ~jpeg_decoder_file_stream(); - - bool open(const char *Pfilename); - void close(); - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Memory stream class. - class jpeg_decoder_mem_stream : public jpeg_decoder_stream - { - const uint8 *m_pSrc_data; - uint m_ofs, m_size; - - public: - jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { } - jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { } - - virtual ~jpeg_decoder_mem_stream() { } - - bool open(const uint8 *pSrc_data, uint size); - void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; } - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Loads JPEG file from a jpeg_decoder_stream. - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps); - - enum - { - JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4, - JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384 - }; - - typedef int16 jpgd_quant_t; - typedef int16 jpgd_block_t; - - class jpeg_decoder - { - public: - // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc. - // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline. - jpeg_decoder(jpeg_decoder_stream *pStream); - - ~jpeg_decoder(); - - // Call this method after constructing the object to begin decompression. - // If JPGD_SUCCESS is returned you may then call decode() on each scanline. - int begin_decoding(); - - // Returns the next scan line. - // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1). - // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4). - // Returns JPGD_SUCCESS if a scan line has been returned. - // Returns JPGD_DONE if all scan lines have been returned. - // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info. - int decode(const void** pScan_line, uint* pScan_line_len); - - inline jpgd_status get_error_code() const { return m_error_code; } - - inline int get_width() const { return m_image_x_size; } - inline int get_height() const { return m_image_y_size; } - - inline int get_num_components() const { return m_comps_in_frame; } - - inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; } - inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); } - - // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file). - inline int get_total_bytes_read() const { return m_total_bytes_read; } - - private: - jpeg_decoder(const jpeg_decoder &); - jpeg_decoder &operator =(const jpeg_decoder &); - - typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int); - - struct huff_tables - { - bool ac_table; - uint look_up[256]; - uint look_up2[256]; - uint8 code_size[256]; - uint tree[512]; - }; - - struct coeff_buf - { - uint8 *pData; - int block_num_x, block_num_y; - int block_len_x, block_len_y; - int block_size; - }; - - struct mem_block - { - mem_block *m_pNext; - size_t m_used_count; - size_t m_size; - char m_data[1]; - }; - - jmp_buf m_jmp_state; - mem_block *m_pMem_blocks; - int m_image_x_size; - int m_image_y_size; - jpeg_decoder_stream *m_pStream; - int m_progressive_flag; - uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES]; - uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size - uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size - jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables - int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported) - int m_comps_in_frame; // # of components in frame - int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor - int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor - int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector - int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID - int m_comp_h_blocks[JPGD_MAX_COMPONENTS]; - int m_comp_v_blocks[JPGD_MAX_COMPONENTS]; - int m_comps_in_scan; // # of components in scan - int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan - int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector - int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector - int m_spectral_start; // spectral selection start - int m_spectral_end; // spectral selection end - int m_successive_low; // successive approximation low - int m_successive_high; // successive approximation high - int m_max_mcu_x_size; // MCU's max. X size in pixels - int m_max_mcu_y_size; // MCU's max. Y size in pixels - int m_blocks_per_mcu; - int m_max_blocks_per_row; - int m_mcus_per_row, m_mcus_per_col; - int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU]; - int m_total_lines_left; // total # lines left in image - int m_mcu_lines_left; // total # lines left in this MCU - int m_real_dest_bytes_per_scan_line; - int m_dest_bytes_per_scan_line; // rounded up - int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y) - huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES]; - coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS]; - coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS]; - int m_eob_run; - int m_block_y_mcu[JPGD_MAX_COMPONENTS]; - uint8* m_pIn_buf_ofs; - int m_in_buf_left; - int m_tem_flag; - bool m_eof_flag; - uint8 m_in_buf_pad_start[128]; - uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128]; - uint8 m_in_buf_pad_end[128]; - int m_bits_left; - uint m_bit_buf; - int m_restart_interval; - int m_restarts_left; - int m_next_restart_num; - int m_max_mcus_per_row; - int m_max_blocks_per_mcu; - int m_expanded_blocks_per_mcu; - int m_expanded_blocks_per_row; - int m_expanded_blocks_per_component; - bool m_freq_domain_chroma_upsample; - int m_max_mcus_per_col; - uint m_last_dc_val[JPGD_MAX_COMPONENTS]; - jpgd_block_t* m_pMCU_coefficients; - int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU]; - uint8* m_pSample_buf; - int m_crr[256]; - int m_cbb[256]; - int m_crg[256]; - int m_cbg[256]; - uint8* m_pScan_line_0; - uint8* m_pScan_line_1; - jpgd_status m_error_code; - bool m_ready_flag; - int m_total_bytes_read; - - void free_all_blocks(); - // BEGIN EPIC MOD - UE_NORETURN void stop_decoding(jpgd_status status); - // END EPIC MOD - void *alloc(size_t n, bool zero = false); - void word_clear(void *p, uint16 c, uint n); - void prep_in_buffer(); - void read_dht_marker(); - void read_dqt_marker(); - void read_sof_marker(); - void skip_variable_marker(); - void read_dri_marker(); - void read_sos_marker(); - int next_marker(); - int process_markers(); - void locate_soi_marker(); - void locate_sof_marker(); - int locate_sos_marker(); - void init(jpeg_decoder_stream * pStream); - void create_look_ups(); - void fix_in_buffer(); - void transform_mcu(int mcu_row); - void transform_mcu_expand(int mcu_row); - coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y); - inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y); - void load_next_row(); - void decode_next_row(); - void make_huff_table(int index, huff_tables *pH); - void check_quant_tables(); - void check_huff_tables(); - void calc_mcu_block_order(); - int init_scan(); - void init_frame(); - void process_restart(); - void decode_scan(pDecode_block_func decode_block_func); - void init_progressive(); - void init_sequential(); - void decode_start(); - void decode_init(jpeg_decoder_stream * pStream); - void H2V2Convert(); - void H2V1Convert(); - void H1V2Convert(); - void H1V1Convert(); - void gray_convert(); - void expanded_convert(); - void find_eoi(); - inline uint get_char(); - inline uint get_char(bool *pPadding_flag); - inline void stuff_char(uint8 q); - inline uint8 get_octet(); - inline uint get_bits(int num_bits); - inline uint get_bits_no_markers(int numbits); - inline int huff_decode(huff_tables *pH); - inline int huff_decode(huff_tables *pH, int& extrabits); - static inline uint8 clamp(int i); - static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - }; - -} // namespace jpgd - -#endif // JPEG_DECODER_H diff --git a/spaces/zideliu/styledrop/open_clip/version.py b/spaces/zideliu/styledrop/open_clip/version.py deleted file mode 100644 index 754dd42c2a768c881ebf544251fb6374a32f9b6a..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/open_clip/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '2.15.0'